diff --git a/dash-spv-ffi/FFI_API.md b/dash-spv-ffi/FFI_API.md index 38a8e0ea2..5873e39db 100644 --- a/dash-spv-ffi/FFI_API.md +++ b/dash-spv-ffi/FFI_API.md @@ -999,7 +999,6 @@ Release a wallet manager obtained from `dash_spv_ffi_client_get_wallet_manager`. - `FFINetwork` - Network type (Dash, Testnet, Regtest, Devnet) - `FFIValidationMode` - Validation mode (None, Basic, Full) - `FFIMempoolStrategy` - Mempool strategy (FetchAll, BloomFilter, Selective) -- `FFISyncStage` - Synchronization stage ## Memory Management diff --git a/dash-spv-ffi/scripts/generate_ffi_docs.py b/dash-spv-ffi/scripts/generate_ffi_docs.py index 635fec438..da1ea462c 100755 --- a/dash-spv-ffi/scripts/generate_ffi_docs.py +++ b/dash-spv-ffi/scripts/generate_ffi_docs.py @@ -279,7 +279,6 @@ def generate_markdown(functions: List[FFIFunction]) -> str: md.append("- `FFINetwork` - Network type (Dash, Testnet, Regtest, Devnet)") md.append("- `FFIValidationMode` - Validation mode (None, Basic, Full)") md.append("- `FFIMempoolStrategy` - Mempool strategy (FetchAll, BloomFilter, Selective)") - md.append("- `FFISyncStage` - Synchronization stage") md.append("") # Memory Management diff --git a/dash-spv-ffi/src/client.rs b/dash-spv-ffi/src/client.rs index 4ea599898..2a941a9aa 100644 --- a/dash-spv-ffi/src/client.rs +++ b/dash-spv-ffi/src/client.rs @@ -13,7 +13,6 @@ use dash_spv::Hash; use futures::future::{AbortHandle, Abortable}; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; -use std::time::Duration; use tokio::runtime::Handle; use tokio::runtime::Runtime; use tokio::sync::{broadcast, watch}; @@ -355,70 +354,6 @@ pub unsafe extern "C" fn dash_spv_ffi_client_stop(client: *mut FFIDashSpvClient) } } -pub fn client_test_sync(client: &FFIDashSpvClient) -> i32 { - let result = client.runtime.block_on(async { - let spv_client = { - let mut guard = client.inner.lock().unwrap(); - match guard.take() { - Some(client) => client, - None => { - return Err(dash_spv::SpvError::Config("Client not initialized".to_string())) - } - } - }; - tracing::info!("Starting test sync..."); - - // Get initial height - let progress = spv_client.sync_progress(); - let start_height = match progress.headers() { - Ok(progress) => progress.current_height(), - Err(e) => { - tracing::error!("Failed to get initial height: {}", e); - return Err(e.into()); - } - }; - tracing::info!("Initial height: {}", start_height); - - // Wait a bit for headers to download - tokio::time::sleep(Duration::from_secs(10)).await; - - // Check if headers increased - let progress = spv_client.sync_progress(); - let end_height = match progress.headers() { - Ok(progress) => progress.current_height(), - Err(e) => { - tracing::error!("Failed to get final height: {}", e); - let mut guard = client.inner.lock().unwrap(); - *guard = Some(spv_client); - return Err(e.into()); - } - }; - tracing::info!("Final height: {}", end_height); - - let result = if end_height > start_height { - tracing::info!("✅ Sync working! Downloaded {} headers", end_height - start_height); - Ok(()) - } else { - let msg = "No headers downloaded".to_string(); - tracing::error!("❌ {}", msg); - Err(dash_spv::SpvError::Sync(dash_spv::SyncError::Network(msg))) - }; - - // put client back - let mut guard = client.inner.lock().unwrap(); - *guard = Some(spv_client); - result - }); - - match result { - Ok(_) => FFIErrorCode::Success as i32, - Err(e) => { - set_last_error(&e.to_string()); - FFIErrorCode::from(e) as i32 - } - } -} - /// Start the SPV client and begin syncing in the background. /// /// This is the streamlined entry point that combines `start()` and continuous monitoring diff --git a/dash-spv-ffi/src/types.rs b/dash-spv-ffi/src/types.rs index b695d5db8..ecb921c27 100644 --- a/dash-spv-ffi/src/types.rs +++ b/dash-spv-ffi/src/types.rs @@ -3,8 +3,7 @@ use dash_spv::sync::{ BlockHeadersProgress, BlocksProgress, ChainLockProgress, FilterHeadersProgress, FiltersProgress, InstantSendProgress, MasternodesProgress, SyncProgress, SyncState, }; -use dash_spv::types::{DetailedSyncProgress, MempoolRemovalReason, SyncStage}; -use dash_spv::SyncProgress as LegacySyncProgress; +use dash_spv::types::MempoolRemovalReason; use std::ffi::{CStr, CString}; use std::os::raw::c_char; @@ -46,75 +45,6 @@ impl FFIString { } } -#[repr(C)] -pub struct FFILegacySyncProgress { - pub header_height: u32, - pub filter_header_height: u32, - pub masternode_height: u32, - pub peer_count: u32, - pub filter_sync_available: bool, - pub filters_downloaded: u32, - pub last_synced_filter_height: u32, -} - -impl From for FFILegacySyncProgress { - fn from(progress: LegacySyncProgress) -> Self { - FFILegacySyncProgress { - header_height: progress.header_height, - filter_header_height: progress.filter_header_height, - masternode_height: progress.masternode_height, - peer_count: progress.peer_count, - filter_sync_available: progress.filter_sync_available, - filters_downloaded: progress.filters_downloaded as u32, - last_synced_filter_height: progress.last_synced_filter_height.unwrap_or(0), - } - } -} - -#[repr(C)] -#[derive(Debug, Clone, Copy)] -pub enum FFISyncStage { - Connecting = 0, - QueryingHeight = 1, - Downloading = 2, - Validating = 3, - Storing = 4, - DownloadingFilterHeaders = 5, - DownloadingFilters = 6, - DownloadingBlocks = 7, - Complete = 8, - Failed = 9, -} - -impl From for FFISyncStage { - fn from(stage: SyncStage) -> Self { - match stage { - SyncStage::Connecting => FFISyncStage::Connecting, - SyncStage::QueryingPeerHeight => FFISyncStage::QueryingHeight, - SyncStage::DownloadingHeaders { - .. - } => FFISyncStage::Downloading, - SyncStage::ValidatingHeaders { - .. - } => FFISyncStage::Validating, - SyncStage::StoringHeaders { - .. - } => FFISyncStage::Storing, - SyncStage::DownloadingFilterHeaders { - .. - } => FFISyncStage::DownloadingFilterHeaders, - SyncStage::DownloadingFilters { - .. - } => FFISyncStage::DownloadingFilters, - SyncStage::DownloadingBlocks { - .. - } => FFISyncStage::DownloadingBlocks, - SyncStage::Complete => FFISyncStage::Complete, - SyncStage::Failed(_) => FFISyncStage::Failed, - } - } -} - /// SyncState exposed by the FFI as FFISyncState. #[repr(C)] #[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] @@ -404,74 +334,6 @@ impl From for FFISyncProgress { } } -#[repr(C)] -pub struct FFIDetailedSyncProgress { - pub total_height: u32, - pub percentage: f64, - pub headers_per_second: f64, - pub estimated_seconds_remaining: i64, // -1 if unknown - pub stage: FFISyncStage, - pub stage_message: FFIString, - pub overview: FFILegacySyncProgress, - pub total_headers: u64, - pub sync_start_timestamp: i64, -} - -impl From for FFIDetailedSyncProgress { - fn from(progress: DetailedSyncProgress) -> Self { - use std::time::UNIX_EPOCH; - - let stage_message = match &progress.sync_stage { - SyncStage::Connecting => "Connecting to peers".to_string(), - SyncStage::QueryingPeerHeight => "Querying blockchain height".to_string(), - SyncStage::DownloadingHeaders { - start, - end, - } => format!("Downloading headers {} to {}", start, end), - SyncStage::ValidatingHeaders { - batch_size, - } => format!("Validating {} headers", batch_size), - SyncStage::StoringHeaders { - batch_size, - } => format!("Storing {} headers", batch_size), - SyncStage::DownloadingFilterHeaders { - current, - target, - } => format!("Downloading filter headers {} / {}", current, target), - SyncStage::DownloadingFilters { - completed, - total, - } => format!("Downloading filters {} / {}", completed, total), - SyncStage::DownloadingBlocks { - pending, - } => format!("Downloading blocks ({} pending)", pending), - SyncStage::Complete => "Synchronization complete".to_string(), - SyncStage::Failed(err) => err.clone(), - }; - - let overview = FFILegacySyncProgress::from(progress.sync_progress.clone()); - - FFIDetailedSyncProgress { - total_height: progress.peer_best_height, - percentage: progress.percentage, - headers_per_second: progress.headers_per_second, - estimated_seconds_remaining: progress - .estimated_time_remaining - .map(|d| d.as_secs() as i64) - .unwrap_or(-1), - stage: progress.sync_stage.into(), - stage_message: FFIString::new(&stage_message), - overview, - total_headers: progress.total_headers_processed, - sync_start_timestamp: progress - .sync_start_time - .duration_since(UNIX_EPOCH) - .unwrap_or(std::time::Duration::from_secs(0)) - .as_secs() as i64, - } - } -} - /// # Safety /// - `s.ptr` must be a pointer previously returned by `FFIString::new` or compatible. /// - It must not be used after this call. diff --git a/dash-spv-ffi/tests/test_client.rs b/dash-spv-ffi/tests/test_client.rs index afbe0747b..f5b2e49d6 100644 --- a/dash-spv-ffi/tests/test_client.rs +++ b/dash-spv-ffi/tests/test_client.rs @@ -4,7 +4,6 @@ mod tests { use key_wallet_ffi::FFINetwork; use serial_test::serial; use std::ffi::CString; - use std::os::raw::c_void; use std::sync::{Arc, Mutex}; use tempfile::TempDir; @@ -14,25 +13,6 @@ mod tests { last_progress: Arc>, } - extern "C" fn _test_progress_callback( - progress: f64, - _message: *const std::os::raw::c_char, - user_data: *mut c_void, - ) { - let data = unsafe { &*(user_data as *const _TestCallbackData) }; - *data.progress_called.lock().unwrap() = true; - *data.last_progress.lock().unwrap() = progress; - } - - extern "C" fn _test_completion_callback( - _success: bool, - _error: *const std::os::raw::c_char, - user_data: *mut c_void, - ) { - let data = unsafe { &*(user_data as *const _TestCallbackData) }; - *data.completion_called.lock().unwrap() = true; - } - fn create_test_config() -> (*mut FFIClientConfig, TempDir) { let temp_dir = TempDir::new().unwrap(); let config = dash_spv_ffi_config_new(FFINetwork::Regtest); @@ -98,91 +78,4 @@ mod tests { assert!(progress.is_null()); } } - - #[test] - #[serial] - fn test_sync_progress() { - unsafe { - let (config, _temp_dir) = create_test_config(); - let client = dash_spv_ffi_client_new(config); - - let progress = dash_spv_ffi_client_get_sync_progress(client); - if !progress.is_null() { - let _progress_ref = &*progress; - // header_height and filter_header_height are u32, always >= 0 - dash_spv_ffi_sync_progress_destroy(progress); - } - - dash_spv_ffi_client_destroy(client); - dash_spv_ffi_config_destroy(config); - } - } - - #[test] - #[serial] - fn test_client_stats() { - unsafe { - let (config, _temp_dir) = create_test_config(); - let client = dash_spv_ffi_client_new(config); - - dash_spv_ffi_client_destroy(client); - dash_spv_ffi_config_destroy(config); - } - } - - #[test] - #[serial] - #[ignore] - fn test_sync_diagnostic() { - unsafe { - // Allow running this test only when explicitly enabled - if std::env::var("RUST_DASH_FFI_RUN_NETWORK_TESTS").unwrap_or_default() != "1" { - println!( - "Skipping test_sync_diagnostic (set RUST_DASH_FFI_RUN_NETWORK_TESTS=1 to run)" - ); - return; - } - - // Create testnet config for the diagnostic test - let config = dash_spv_ffi_config_testnet(); - let temp_dir = TempDir::new().unwrap(); - let path = CString::new(temp_dir.path().to_str().unwrap()).unwrap(); - dash_spv_ffi_config_set_data_dir(config, path.as_ptr()); - - // Create client - let client = dash_spv_ffi_client_new(config); - assert!(!client.is_null(), "Failed to create client"); - - // Start the client - let start_result = dash_spv_ffi_client_start(client); - if start_result != FFIErrorCode::Success as i32 { - println!("Warning: Failed to start client, error code: {}", start_result); - let error = dash_spv_ffi_get_last_error(); - if !error.is_null() { - let error_str = std::ffi::CStr::from_ptr(error); - println!("Error message: {:?}", error_str); - } - } - - // Run the diagnostic sync test - println!("Running sync diagnostic test..."); - let test_result = client_test_sync(&*client); - - if test_result == FFIErrorCode::Success as i32 { - println!("✅ Sync test passed!"); - } else { - println!("❌ Sync test failed with error code: {}", test_result); - let error = dash_spv_ffi_get_last_error(); - if !error.is_null() { - let error_str = std::ffi::CStr::from_ptr(error); - println!("Error message: {:?}", error_str); - } - } - - // Stop and cleanup - let _stop_result = dash_spv_ffi_client_stop(client); - dash_spv_ffi_client_destroy(client); - dash_spv_ffi_config_destroy(config); - } - } } diff --git a/dash-spv-ffi/tests/test_types.rs b/dash-spv-ffi/tests/test_types.rs index efac13ecc..c17e4beb8 100644 --- a/dash-spv-ffi/tests/test_types.rs +++ b/dash-spv-ffi/tests/test_types.rs @@ -4,7 +4,6 @@ mod tests { BlockHeadersProgress, BlocksProgress, ChainLockProgress, FilterHeadersProgress, FiltersProgress, InstantSendProgress, MasternodesProgress, SyncProgress, SyncState, }; - use dash_spv::SyncProgress as LegacySyncProgress; use dash_spv_ffi::*; use key_wallet_ffi::FFINetwork; @@ -44,30 +43,6 @@ mod tests { assert_eq!(FFINetwork::Devnet, dashcore::Network::Devnet.into()); } - #[test] - fn test_legacy_sync_progress_conversion() { - let progress = LegacySyncProgress { - header_height: 100, - filter_header_height: 90, - masternode_height: 80, - peer_count: 5, - filter_sync_available: true, - filters_downloaded: 50, - last_synced_filter_height: Some(45), - sync_start: std::time::SystemTime::now(), - last_update: std::time::SystemTime::now(), - }; - - let ffi_progress = FFILegacySyncProgress::from(progress); - - assert_eq!(ffi_progress.header_height, 100); - assert_eq!(ffi_progress.filter_header_height, 90); - assert_eq!(ffi_progress.masternode_height, 80); - assert_eq!(ffi_progress.peer_count, 5); - assert_eq!(ffi_progress.filters_downloaded, 50); - assert_eq!(ffi_progress.last_synced_filter_height, 45); - } - #[test] fn test_sync_progress_conversion() { let mut progress = SyncProgress::default(); diff --git a/dash-spv-ffi/tests/unit/test_async_operations.rs b/dash-spv-ffi/tests/unit/test_async_operations.rs index b8ef6be8f..14c974ba6 100644 --- a/dash-spv-ffi/tests/unit/test_async_operations.rs +++ b/dash-spv-ffi/tests/unit/test_async_operations.rs @@ -27,114 +27,6 @@ mod tests { } } - #[test] - #[serial] - #[ignore] // Disabled due to unreliable behavior in test environments - fn test_callback_reentrancy() { - unsafe { - let (client, config, _temp_dir) = create_test_client(); - assert!(!client.is_null()); - - // Test data for tracking reentrancy behavior - let reentrancy_count = Arc::new(AtomicU32::new(0)); - let reentrancy_detected = Arc::new(AtomicBool::new(false)); - let callback_active = Arc::new(AtomicBool::new(false)); - let deadlock_detected = Arc::new(AtomicBool::new(false)); - - struct ReentrantData { - count: Arc, - reentrancy_detected: Arc, - callback_active: Arc, - deadlock_detected: Arc, - client: *mut FFIDashSpvClient, - } - - let reentrant_data = ReentrantData { - count: reentrancy_count.clone(), - reentrancy_detected: reentrancy_detected.clone(), - callback_active: callback_active.clone(), - deadlock_detected: deadlock_detected.clone(), - client, - }; - - unsafe extern "C" fn reentrant_callback( - _success: bool, - _error: *const c_char, - user_data: *mut c_void, - ) { - let data = unsafe { &*(user_data as *const ReentrantData) }; - let count = data.count.fetch_add(1, Ordering::SeqCst); - - // Check if callback is already active (reentrancy detection) - if data.callback_active.swap(true, Ordering::SeqCst) { - data.reentrancy_detected.store(true, Ordering::SeqCst); - println!("Reentrancy detected! Count: {}", count); - return; - } - - println!("Callback invoked, count: {}", count); - - // Test 1: Try to make a reentrant call (should be safely handled) - if count == 0 { - // Attempt to start another sync operation from within callback - // This tests that the FFI layer properly handles reentrancy - let start_time = Instant::now(); - - // Try to call test_sync which is a simpler operation - let test_result = client_test_sync(&*data.client); - let elapsed = start_time.elapsed(); - - // If this takes too long, it might indicate a deadlock - if elapsed > Duration::from_secs(1) { - data.deadlock_detected.store(true, Ordering::SeqCst); - } - - if test_result != 0 { - println!("Reentrant call failed with error code: {}", test_result); - } - } - - // Mark callback as no longer active - data.callback_active.store(false, Ordering::SeqCst); - } - - // Test with actual async operation - println!("Testing callback reentrancy safety with actual FFI operations"); - - // First, start the client to enable operations - let start_result = dash_spv_ffi_client_start(client); - assert_eq!(start_result, 0); - - // Give client time to initialize - thread::sleep(Duration::from_millis(100)); - - // Now test reentrancy by invoking callback directly and through FFI - reentrant_callback(true, std::ptr::null(), &reentrant_data as *const _ as *mut c_void); - - // Wait for operations to complete - thread::sleep(Duration::from_millis(500)); - - // Verify results - let final_count = reentrancy_count.load(Ordering::SeqCst); - let reentrancy_occurred = reentrancy_detected.load(Ordering::SeqCst); - let deadlock_occurred = deadlock_detected.load(Ordering::SeqCst); - - println!("Final callback count: {}", final_count); - println!("Reentrancy detected: {}", reentrancy_occurred); - println!("Deadlock detected: {}", deadlock_occurred); - - // Assertions - relaxed for test environment - // Note: Complex async operations may not trigger callbacks consistently in test environments - assert!(!deadlock_occurred, "No deadlock should occur during reentrancy"); - println!("Callback count: {} (may be 0 in test environment)", final_count); - - // Clean up - dash_spv_ffi_client_stop(client); - dash_spv_ffi_client_destroy(client); - dash_spv_ffi_config_destroy(config); - } - } - #[test] #[serial] #[ignore] // Disabled due to unreliable behavior in test environments diff --git a/dash-spv-ffi/tests/unit/test_type_conversions.rs b/dash-spv-ffi/tests/unit/test_type_conversions.rs index 12e655215..da6930636 100644 --- a/dash-spv-ffi/tests/unit/test_type_conversions.rs +++ b/dash-spv-ffi/tests/unit/test_type_conversions.rs @@ -76,29 +76,6 @@ mod tests { } } - #[test] - fn test_sync_progress_extreme_values() { - let progress = dash_spv::SyncProgress { - header_height: u32::MAX, - filter_header_height: u32::MAX, - masternode_height: u32::MAX, - peer_count: u32::MAX, - filter_sync_available: true, - filters_downloaded: u64::MAX, - last_synced_filter_height: Some(u32::MAX), - sync_start: std::time::SystemTime::now(), - last_update: std::time::SystemTime::now(), - }; - - let ffi_progress = FFILegacySyncProgress::from(progress); - assert_eq!(ffi_progress.header_height, u32::MAX); - assert_eq!(ffi_progress.filter_header_height, u32::MAX); - assert_eq!(ffi_progress.masternode_height, u32::MAX); - assert_eq!(ffi_progress.peer_count, u32::MAX); - assert_eq!(ffi_progress.filters_downloaded, u32::MAX); // Note: truncated from u64 - assert_eq!(ffi_progress.last_synced_filter_height, u32::MAX); - } - #[test] fn test_concurrent_ffi_string_creation() { use std::sync::atomic::{AtomicUsize, Ordering}; diff --git a/dash-spv/ARCHITECTURE.md b/dash-spv/ARCHITECTURE.md index 820a2a6e9..1e84a2ca3 100644 --- a/dash-spv/ARCHITECTURE.md +++ b/dash-spv/ARCHITECTURE.md @@ -7,10 +7,6 @@ 1. [Executive Summary](#executive-summary) 2. [Architecture Overview](#architecture-overview) 3. [Module Analysis](#module-analysis) -4. [Critical Assessment](#critical-assessment) -5. [Recommendations](#recommendations) -6. [Complexity Metrics](#complexity-metrics) -7. [Security Considerations](#security-considerations) --- @@ -231,17 +227,6 @@ - **GOOD**: Checkpoint sync support - **BAD**: No documentation on thread-safety assumptions -4. **`DetailedSyncProgress`** (lines 138-213) - - Performance metrics and ETA calculation - - **GOOD**: Useful for UX - - **ISSUE**: Tight coupling to specific sync stages - -5. **Custom serde for `AddressBalance`** (lines 707-804) - - **WHY**: dashcore::Amount doesn't derive Serialize - - **COMPLEXITY**: Manual Visitor pattern - - **JUSTIFIED**: Necessary for persistence - - **ISSUE**: Verbose - consider upstream fix - **Analysis**: - **GOOD**: Comprehensive type coverage - **ISSUE**: File is becoming a dumping ground (1,065 lines) @@ -251,8 +236,7 @@ **Refactoring needed**: - ⚠️ **HIGH PRIORITY**: Split into multiple files: - `types/chain.rs` - ChainState, CachedHeader - - `types/sync.rs` - SyncProgress, SyncStage, DetailedSyncProgress - - `types/events.rs` - SpvEvent, MempoolRemovalReason + - `types/events.rs` - MempoolRemovalReason - `types/stats.rs` - SpvStats, PeerInfo - `types/balances.rs` - AddressBalance, MempoolBalance, UnconfirmedTransaction - ⚠️ **MEDIUM**: Add documentation on thread-safety for ChainState @@ -1128,10 +1112,6 @@ sync// - Validates signatures - Emits `InstantLockReceived` events -#### Legacy Module - -The previous sequential sync implementation is preserved in `src/sync/legacy/` for reference. This approach used phase-based sequential synchronization where each phase completed before the next began. - #### Design Strengths - **True parallelism**: Headers, filters, and masternodes sync concurrently @@ -1143,280 +1123,6 @@ The previous sequential sync implementation is preserved in `src/sync/legacy/` f --- -### 8. VALIDATION MODULE (6 files, ~2,000 lines) - -#### Overview -Validation module handles header validation, ChainLock verification, and InstantLock verification. - -#### `src/validation/mod.rs` (264 lines) ✅ GOOD - -**Purpose**: ValidationManager orchestration. - -**What it does**: -- Coordinates header validation -- Coordinates ChainLock validation -- Coordinates InstantLock validation -- Configurable validation modes - -**Analysis**: -- **GOOD**: Clean orchestration -- **GOOD**: Mode-based validation -- **EXCELLENT**: Well-tested - -**Refactoring needed**: ❌ None - -#### `src/validation/headers.rs` (418 lines) ✅ GOOD - -**Purpose**: Header chain validation. - -**What it does**: -- Validates PoW -- Validates timestamps -- Validates difficulty transitions -- Validates block linking - -**Analysis**: -- **GOOD**: Correct validation rules -- **GOOD**: Proper Dash-specific rules -- **EXCELLENT**: Comprehensive tests (headers_test.rs, headers_edge_test.rs) - -**Refactoring needed**: ❌ None - well-crafted - -#### `src/validation/quorum.rs` (248 lines) ✅ GOOD - -**Purpose**: Quorum validation for ChainLocks and InstantLocks. - -**What it does**: -- Validates quorum membership -- Validates BLS signatures -- Tracks active quorums - -**Analysis**: -- **GOOD**: Dash-specific functionality -- **ISSUE**: TODO comments indicate incomplete implementation - -**Refactoring needed**: -- ⚠️ **HIGH**: Complete TODO items for signature validation - -#### `src/validation/instantlock.rs` (87 lines) ⚠️ INCOMPLETE - -**Purpose**: InstantLock validation. - -**Analysis**: -- **ISSUE**: Contains TODO for actual signature validation -- **CRITICAL**: Validation is stubbed out - -**Refactoring needed**: -- 🚨 **CRITICAL**: Implement actual InstantLock signature validation - -**Overall Validation Module Assessment**: -- ✅ **GOOD**: Header validation is solid -- 🚨 **CRITICAL**: BLS signature validation incomplete (security risk) -- ✅ **EXCELLENT**: Test coverage for headers -- ⚠️ **HIGH PRIORITY**: Complete Dash-specific validation features - ---- - -### 9. MEMPOOL_FILTER.RS (793 lines) ✅ GOOD - -**Purpose**: Filters mempool transactions based on wallet addresses. - -**What it does**: -- Receives mempool transactions -- Checks against watched addresses -- Emits events for relevant txns -- Manages mempool state - -**Complex Types Used**: -- `Arc>` - **JUSTIFIED**: Shared between sync and mempool tasks - -**Analysis**: -- **GOOD**: Clean implementation -- **GOOD**: Proper async handling -- **GOOD**: Event emission - -**Refactoring needed**: -- ✅ **LOW**: Could extract to mempool/ module directory - ---- - -### 10. TERMINAL.RS (223 lines) ✅ EXCELLENT - -**Purpose**: Terminal UI for CLI binary (optional feature). - -**What it does**: -- Renders status bar -- Updates sync progress -- Displays peer count - -**Analysis**: -- **EXCELLENT**: Properly feature-gated -- **EXCELLENT**: Clean implementation -- **GOOD**: Uses crossterm effectively - -**Refactoring needed**: ❌ None - this is well-done - ---- - -## Critical Assessment - -### 🏆 STRENGTHS - -1. **Excellent Architecture Principles** - - Trait-based abstraction (NetworkManager, StorageManager) - - Dependency injection enables testing - - Clear module boundaries - -2. **Comprehensive Functionality** - - Full SPV implementation - - Dash-specific features (ChainLocks, InstantLocks, Masternodes) - - BIP157 compact filters - - Robust reorg handling - -3. **Good Testing Culture** - - Mock network implementation - - Comprehensive header validation tests - - Unit tests for critical components - -4. **Modern Rust** - - Async/await throughout - - Proper error handling with thiserror - - Good use of type system - -5. **Performance Optimizations** - - CachedHeader for X11 hash caching - - Segmented storage for efficient I/O - - Bloom filters for transaction filtering - -### 🚨 CRITICAL PROBLEMS - -1. **INCOMPLETE SECURITY FEATURES** 🔥🔥 - - ChainLock signature validation stubbed (chainlock_manager.rs:127) - - InstantLock signature validation incomplete - - **SECURITY RISK**: Could accept invalid ChainLocks/InstantLocks - - **PRIORITY**: Must be completed before mainnet production use - - **EFFORT**: 1-2 weeks - -### ⚠️ AREAS FOR IMPROVEMENT - -1. **Testing Coverage** - - Network layer could use more integration tests - - End-to-end sync cycle testing would increase confidence - - Property-based testing could validate invariants - -2. **Resource Management** - - Connection limits not enforced - - No bandwidth throttling - - Peer ban list not persisted across restarts - -3. **Code Duplication** - - Some overlap between headers.rs and headers_with_reorg.rs - - Validation logic could be further consolidated - -5. **Error Recovery** - - Retry strategies could be more consistent - - Some edge cases may lack retry logic - -### ✅ MINOR ISSUES - -1. **Dead Code** - - bloom/stats.rs not used - - Some deprecated error variants still present - -2. **Hardcoded Values** - - Checkpoints in code rather than data file - - Timeout values not configurable - -3. **Missing Features** - - No compression in storage - - No checksums for corruption detection - - Peer ban list not persisted - ---- - -## Recommendations - -### 🚨 CRITICAL PRIORITY (Do First) - -1. **Implement BLS Signature Validation** - - **Why**: Security vulnerability - could accept invalid ChainLocks/InstantLocks - - **Impact**: 🔥🔥🔥 CRITICAL SECURITY - - **Effort**: 1-2 weeks (requires BLS library integration) - - **Benefit**: Production-ready security for mainnet - -### ⚠️ HIGH PRIORITY (Do Soon) - -2. **Add Comprehensive Integration Tests** - - **Why**: Increase confidence in network layer and sync pipeline - - **Impact**: 🔥🔥 HIGH - - **Effort**: 1 week - - **Benefit**: Catch regressions, validate end-to-end behavior - -3. **Document Lock Ordering More Prominently** - - **Why**: Prevent deadlocks - - **Impact**: 🔥🔥 HIGH (correctness) - - **Effort**: 1 day - - **Benefit**: Correctness, debugging - -7. **Add Comprehensive Integration Tests** - - **Why**: Network layer undertested - - **Impact**: 🔥🔥 HIGH - - **Effort**: 1 week - - **Benefit**: Confidence, regression prevention - -### ✅ MEDIUM PRIORITY (Plan For) - -8. **Extract Checkpoint Data to Config File** - - **Impact**: 🔥 MEDIUM - - **Effort**: 1 day - -9. **Add Resource Limits** - - Connection limits - - Bandwidth throttling - - Memory limits - - **Impact**: 🔥 MEDIUM (DoS protection) - - **Effort**: 3-4 days - -10. **Improve Error Recovery** - - Consolidate retry logic - - Consistent backoff strategies - - **Impact**: 🔥 MEDIUM - - **Effort**: 1 week - -11. **Add Property-Based Tests** - - Use proptest for filter properties - - Test reorg handling - - **Impact**: 🔥 MEDIUM - - **Effort**: 1 week - -### ✅ LOW PRIORITY (Nice to Have) - -12. **Type Aliases for Common Configurations** (Ergonomics Only) - - Generic design is intentional and excellent for library flexibility - - Type aliases just provide convenience without losing flexibility - ```rust - type StandardSpvClient = DashSpvClient< - WalletManager, - PeerNetworkManager, - DiskStorageManager - >; - ``` - -13. **Consider Embedded DB for Storage** - - RocksDB or Sled - - Better concurrency - - Compression built-in - -14. **Add Compression to Storage** - - Filters compress well - - Save disk space - -15. **Persist Peer Ban List** - - Survives restarts - ---- - -## Complexity Metrics ### Sync Module Structure @@ -1429,162 +1135,3 @@ Validation module handles header validation, ChainLock verification, and Instant | MasternodesManager | sync/masternodes/ | manager.rs, pipeline.rs, sync_manager.rs | Masternode list via QRInfo/MnListDiff | | ChainLockManager | sync/chainlock/ | manager.rs, sync_manager.rs | ChainLock message handling | | InstantSendManager | sync/instantsend/ | manager.rs, sync_manager.rs | InstantLock message handling | - -### File Complexity (Largest Files) - -| File | Lines | Complexity | Notes | -|------|-------|------------|-------| -| sync/ (total) | 60+ files | ✅ EXCELLENT | 7 parallel managers with consistent structure | -| client/ | 8 modules | ✅ EXCELLENT | Client functionality modules | -| storage/disk/ | 7 modules | ✅ EXCELLENT | Persistent storage modules | -| network/manager.rs | ~1,300 | ✅ ACCEPTABLE | Complex peer management logic | -| types.rs | ~1,065 | ✅ ACCEPTABLE | Core type definitions | - -### Module Health - -| Module | Files | Health | Characteristics | -|--------|-------|--------|-----------------| -| sync/ | 60+ | ✅ EXCELLENT | Parallel managers, SyncManager trait, event-driven | -| client/ | 8 | ✅ EXCELLENT | Clean separation: lifecycle, sync, progress, mempool, events | -| storage/ | 13 | ✅ EXCELLENT | Disk storage split into focused modules | -| network/ | 14 | ✅ GOOD | Handles peer management, connections, message routing | -| chain/ | 10 | ✅ GOOD | ChainLock, checkpoint, orphan pool management | -| bloom/ | 6 | ✅ GOOD | Bloom filter implementation for transaction filtering | -| validation/ | 6 | ⚠️ FAIR | Needs BLS validation implementation (security) | -| error/ | 1 | ✅ EXCELLENT | Clean error hierarchy with thiserror | -| types/ | 1 | ✅ ACCEPTABLE | Core type definitions, reasonable size | - ---- - -## Security Considerations - -### 🚨 CRITICAL SECURITY ISSUES - -1. **Incomplete ChainLock Validation** - - File: `chain/chainlock_manager.rs:127` - - Issue: Signature validation stubbed out - - Risk: Could accept invalid ChainLocks - - Fix: Implement BLS signature verification - -2. **Incomplete InstantLock Validation** - - File: `validation/instantlock.rs` - - Issue: Validation incomplete - - Risk: Could accept invalid InstantLocks - - Fix: Complete InstantLock validation - -### ⚠️ POTENTIAL RISKS - -3. **No Checksums on Stored Data** - - File: `storage/disk.rs` - - Risk: Silent corruption - - Fix: Add checksums - -4. **No Connection Limits** - - File: `network/manager.rs` - - Risk: DoS via connection exhaustion - - Fix: Add configurable limits - -5. **Peer Ban List Not Persisted** - - File: `network/reputation.rs` - - Risk: Misbehaving peers reconnect after restart - - Fix: Persist ban list - ---- - -## Performance Considerations - -### ✅ OPTIMIZATIONS PRESENT - -1. **CachedHeader** - Excellent X11 hash caching -2. **Segmented Storage** - Good I/O patterns -3. **Bloom Filters** - Efficient transaction filtering -4. **Async/Await** - Non-blocking operations - -### 🔧 POTENTIAL IMPROVEMENTS - -1. **Add Compression** - Filters compress ~70% -2. **Connection Pooling** - Reuse TCP connections -3. **Batch Storage Writes** - Reduce fsync calls -4. **RocksDB** - Better than file-based storage - ---- - -## Maintainability Score - -### By Module - -| Module | Maintainability | Reasoning | -|--------|----------------|-----------| -| error | 95/100 ✅ | Perfect design | -| terminal | 90/100 ✅ | Small, focused | -| bloom | 85/100 ✅ | Well-organized | -| chain | 80/100 ✅ | Good structure | -| validation | 70/100 ⚠️ | Incomplete features | -| network | 65/100 ⚠️ | Large files | -| storage | 60/100 ⚠️ | disk.rs too large | -| client | 45/100 🔥 | God object | -| sync | 30/100 🔥🔥🔥 | Massive files | - -### Overall: **55/100** ⚠️ NEEDS IMPROVEMENT - -**Primary Blockers**: -1. File size issues (sync/filters.rs especially) -2. God objects -3. Missing security features - -**After Refactoring Estimate**: **75-80/100** ✅ - ---- - -## Conclusion - -### The Good - -This is a **comprehensive, feature-rich SPV client** with: -- Excellent architectural foundations -- Good use of Rust's type system -- Comprehensive Dash-specific features -- Solid testing culture - -### The Bad - -The codebase suffers from **maintainability crisis**: -- Several files exceed 2,000 lines (one is 4,027!) -- God objects violate Single Responsibility Principle -- Critical security features incomplete - -### The Path Forward - -**Phase 1 (2-3 weeks)**: Critical refactoring -1. Split sync/filters.rs -2. Implement BLS signature validation -3. Split client/mod.rs - -**Phase 2 (2-3 weeks)**: High-priority improvements -4. Split remaining large files -5. Document lock ordering -6. Add integration tests - -**Phase 3 (Ongoing)**: Incremental improvements -7. Resource limits -8. Enhanced error recovery -9. Performance optimizations - -### Final Verdict - -**Rating**: ⚠️ **B- (Good but Needs Work)** - -- **Architecture**: A- (excellent design) -- **Functionality**: A (comprehensive features) -- **Code Quality**: C+ (too many large files) -- **Security**: C (critical features incomplete) -- **Testing**: B- (good but gaps) -- **Documentation**: C+ (incomplete) - -**Recommendation**: This codebase is **production-capable** for its current feature set, but **REQUIRES IMMEDIATE REFACTORING** before adding major new features. The file size issues will cause serious problems for collaboration and maintenance. The incomplete signature validation is a security concern that must be addressed before production use on mainnet. - -**With the recommended refactorings**, this could easily become an **A-grade codebase** - the foundations are solid. - ---- - -*End of Architectural Analysis* diff --git a/dash-spv/CLAUDE.md b/dash-spv/CLAUDE.md index 8c649ee71..dacd9771e 100644 --- a/dash-spv/CLAUDE.md +++ b/dash-spv/CLAUDE.md @@ -18,7 +18,7 @@ The project follows a layered, trait-based architecture with clear separation of - **`sync/sequential/`**: Sequential sync manager that handles all synchronization phases - **`validation/`**: Header validation, ChainLock, and InstantLock verification - **`wallet/`**: UTXO tracking, balance calculation, and transaction processing -- **`types.rs`**: Common data structures (`SyncProgress`, `ValidationMode`, `WatchItem`, etc.) +- **`types.rs`**: Common data structures - **`error.rs`**: Unified error handling with domain-specific error types ### Key Design Patterns @@ -103,7 +103,7 @@ TCP-based networking with proper Dash protocol implementation: - **Exclusive mode**: When explicit peers are provided, uses only those peers (no DNS discovery) - Connection management via `Peer` - Handshake handling via `HandshakeManager` -- Message routing via `MessageHandler` +- Message routing via `MessageDispatcher` - Peer support via `PeerNetworkManager` ### Validation Modes diff --git a/dash-spv/Cargo.toml b/dash-spv/Cargo.toml index 71300548a..20b400456 100644 --- a/dash-spv/Cargo.toml +++ b/dash-spv/Cargo.toml @@ -51,9 +51,6 @@ indexmap = "2.0" # Parallelization rayon = "1.11" -# Terminal UI (optional) -crossterm = { version = "0.27", optional = true } - # DNS (trust-dns-resolver was renamed to hickory_resolver) hickory-resolver = "0.25" @@ -78,13 +75,10 @@ harness = false [[bin]] name = "dash-spv" path = "src/main.rs" -required-features = ["terminal-ui"] [lib] name = "dash_spv" path = "src/lib.rs" [features] -# Terminal UI feature (off by default, for use by binary only) -terminal-ui = ["dep:crossterm"] test-utils = [] diff --git a/dash-spv/src/chain/chainlock_manager.rs b/dash-spv/src/chain/chainlock_manager.rs deleted file mode 100644 index adb0c103f..000000000 --- a/dash-spv/src/chain/chainlock_manager.rs +++ /dev/null @@ -1,457 +0,0 @@ -//! ChainLock manager for DIP8 implementation -//! -//! This module implements ChainLock validation and management according to DIP8, -//! providing protection against 51% attacks and securing InstantSend transactions. - -use dashcore::sml::masternode_list_engine::MasternodeListEngine; -use dashcore::{BlockHash, ChainLock}; -use indexmap::IndexMap; -use std::sync::{Arc, RwLock}; -use tracing::{debug, error, info, warn}; - -use crate::error::{StorageError, StorageResult, ValidationError, ValidationResult}; -use crate::storage::StorageManager; -use crate::types::ChainState; - -/// Maximum number of pending ChainLocks to queue -const MAX_PENDING_CHAINLOCKS: usize = 100; - -/// Number of blocks back from a ChainLock's block height where we need the masternode list -/// for validation. ChainLock signatures are created by the masternode quorum that existed -/// 8 blocks before the ChainLock's block. -const CHAINLOCK_VALIDATION_MASTERNODE_OFFSET: u32 = 8; - -/// ChainLock storage entry -#[derive(Debug, Clone)] -pub struct ChainLockEntry { - /// The chain lock message - pub chain_lock: ChainLock, - /// When this chain lock was received - pub received_at: std::time::SystemTime, - /// Whether this chain lock has been validated - pub validated: bool, -} - -/// Manages ChainLocks according to DIP8 -pub struct ChainLockManager { - /// In-memory cache of chain locks by height (maintains insertion order) - chain_locks_by_height: Arc>>, - /// In-memory cache of chain locks by block hash - chain_locks_by_hash: Arc>>, - /// Maximum number of chain locks to keep in memory - max_cache_size: usize, - /// Whether to enforce chain locks (can be disabled for testing) - enforce_chain_locks: bool, - /// Optional reference to masternode engine for full validation - masternode_engine: Arc>>>, - /// Queue for ChainLocks pending validation (received before masternode sync) - pending_chainlocks: Arc>>, -} - -impl ChainLockManager { - /// Create a new ChainLockManager - pub fn new(enforce_chain_locks: bool) -> Self { - Self { - chain_locks_by_height: Arc::new(RwLock::new(IndexMap::new())), - chain_locks_by_hash: Arc::new(RwLock::new(IndexMap::new())), - max_cache_size: 1000, - enforce_chain_locks, - masternode_engine: Arc::new(RwLock::new(None)), - pending_chainlocks: Arc::new(RwLock::new(Vec::new())), - } - } - - /// Set the masternode engine for validation - pub fn set_masternode_engine(&self, engine: Arc) { - match self.masternode_engine.write() { - Ok(mut guard) => { - *guard = Some(engine); - info!("Masternode engine set for ChainLock validation"); - } - Err(e) => { - error!("Failed to set masternode engine: {}", e); - } - } - } - - /// Queue a ChainLock for validation when masternode data is available - pub fn queue_pending_chainlock(&self, chain_lock: ChainLock) -> StorageResult<()> { - let mut pending = self - .pending_chainlocks - .write() - .map_err(|_| StorageError::LockPoisoned("pending_chainlocks".to_string()))?; - - // If at capacity, drop the oldest ChainLock - if pending.len() >= MAX_PENDING_CHAINLOCKS { - let dropped = pending.remove(0); - warn!( - "Pending ChainLocks queue at capacity ({}), dropping oldest ChainLock at height {}", - MAX_PENDING_CHAINLOCKS, dropped.block_height - ); - } - - pending.push(chain_lock); - debug!("Queued ChainLock for pending validation, total pending: {}", pending.len()); - Ok(()) - } - - /// Validate all pending ChainLocks after masternode sync - pub async fn validate_pending_chainlocks( - &self, - chain_state: &ChainState, - storage: &mut S, - ) -> ValidationResult<()> { - let pending = { - let mut pending_guard = self - .pending_chainlocks - .write() - .map_err(|_| ValidationError::InvalidChainLock("Lock poisoned".to_string()))?; - std::mem::take(&mut *pending_guard) - }; - - info!("Validating {} pending ChainLocks", pending.len()); - - let mut validated_count = 0; - let mut failed_count = 0; - - for chain_lock in pending { - match self.process_chain_lock(chain_lock.clone(), chain_state, storage).await { - Ok(_) => { - validated_count += 1; - debug!( - "Successfully validated pending ChainLock at height {}", - chain_lock.block_height - ); - } - Err(e) => { - failed_count += 1; - error!( - "Failed to validate pending ChainLock at height {}: {}", - chain_lock.block_height, e - ); - } - } - } - - info!( - "Pending ChainLock validation complete: {} validated, {} failed", - validated_count, failed_count - ); - - Ok(()) - } - - /// Process a new chain lock - pub async fn process_chain_lock( - &self, - chain_lock: ChainLock, - chain_state: &ChainState, - storage: &mut S, - ) -> ValidationResult<()> { - info!( - "Processing ChainLock for height {} hash {}", - chain_lock.block_height, chain_lock.block_hash - ); - - // Check if we already have this chain lock - if self.has_chain_lock_at_height(chain_lock.block_height) { - let existing = self.get_chain_lock_by_height(chain_lock.block_height); - if let Some(existing_entry) = existing { - if existing_entry.chain_lock.block_hash != chain_lock.block_hash { - error!( - "Conflicting ChainLock at height {}: existing {} vs new {}", - chain_lock.block_height, - existing_entry.chain_lock.block_hash, - chain_lock.block_hash - ); - return Err(ValidationError::InvalidChainLock(format!( - "Conflicting ChainLock at height {}", - chain_lock.block_height - ))); - } - debug!("Already have ChainLock for height {}", chain_lock.block_height); - return Ok(()); - } - } - - // Verify the block exists in our chain - if let Some(header) = storage - .get_header(chain_lock.block_height) - .await - .map_err(ValidationError::StorageError)? - { - let header_hash = header.block_hash(); - if header_hash != chain_lock.block_hash { - return Err(ValidationError::InvalidChainLock(format!( - "ChainLock block hash {} does not match our chain at height {} (expected {})", - chain_lock.block_hash, chain_lock.block_height, header_hash - ))); - } - } else { - // We don't have this block yet, store the chain lock for future validation - warn!("Received ChainLock for future block at height {}", chain_lock.block_height); - } - - // Full validation with masternode engine if available - let mut validated = false; - { - let engine_guard = self - .masternode_engine - .read() - .map_err(|_| ValidationError::InvalidChainLock("Lock poisoned".to_string()))?; - - if let Some(engine) = engine_guard.as_ref() { - // Use the masternode engine's verify_chain_lock method - match engine.verify_chain_lock(&chain_lock) { - Ok(()) => { - info!( - "✅ ChainLock validated with masternode engine for height {}", - chain_lock.block_height - ); - validated = true; - } - Err(e) => { - // Check if the error is due to missing masternode lists - let error_string = e.to_string(); - if error_string.contains("No masternode lists in engine") { - // ChainLock validation requires masternode list at (block_height - CHAINLOCK_VALIDATION_MASTERNODE_OFFSET) - let required_height = chain_lock - .block_height - .saturating_sub(CHAINLOCK_VALIDATION_MASTERNODE_OFFSET); - warn!("⚠️ Masternode engine exists but lacks required masternode lists for height {} (needs list at height {} for ChainLock validation), queueing ChainLock for later validation", - chain_lock.block_height, required_height); - self.queue_pending_chainlock(chain_lock.clone()).map_err(|e| { - ValidationError::InvalidChainLock(format!( - "Failed to queue pending ChainLock: {}", - e - )) - })?; - } else { - return Err(ValidationError::InvalidChainLock(format!( - "MasternodeListEngine validation failed: {:?}", - e - ))); - } - } - } - } else { - // Queue for later validation when engine becomes available - warn!( - "⚠️ Masternode engine not available, queueing ChainLock for later validation" - ); - self.queue_pending_chainlock(chain_lock.clone()).map_err(|e| { - ValidationError::InvalidChainLock(format!( - "Failed to queue pending ChainLock: {}", - e - )) - })?; - } - } // engine_guard dropped before any await - - // Store the chain lock with appropriate validation status - self.store_chain_lock_with_validation(chain_lock.clone(), storage, validated).await?; - - // Update chain state - self.update_chain_state_with_lock(&chain_lock, chain_state); - - if validated { - info!( - "Successfully processed and validated ChainLock for height {}", - chain_lock.block_height - ); - } else { - info!( - "Processed ChainLock for height {} (pending full validation)", - chain_lock.block_height - ); - } - - Ok(()) - } - - /// Store a chain lock with validation status - async fn store_chain_lock_with_validation( - &self, - chain_lock: ChainLock, - storage: &mut S, - validated: bool, - ) -> StorageResult<()> { - let entry = ChainLockEntry { - chain_lock: chain_lock.clone(), - received_at: std::time::SystemTime::now(), - validated, - }; - - self.store_chain_lock_internal(chain_lock, entry, storage).await - } - - /// Store a chain lock (deprecated, use store_chain_lock_with_validation) - #[allow(dead_code)] - async fn store_chain_lock( - &self, - chain_lock: ChainLock, - storage: &mut S, - ) -> StorageResult<()> { - self.store_chain_lock_with_validation(chain_lock, storage, true).await - } - - /// Internal method to store a chain lock entry - async fn store_chain_lock_internal( - &self, - chain_lock: ChainLock, - entry: ChainLockEntry, - storage: &mut S, - ) -> StorageResult<()> { - // Store in memory caches - { - let mut by_height = self - .chain_locks_by_height - .write() - .map_err(|_| StorageError::LockPoisoned("chain_locks_by_height".to_string()))?; - let mut by_hash = self - .chain_locks_by_hash - .write() - .map_err(|_| StorageError::LockPoisoned("chain_locks_by_hash".to_string()))?; - - by_height.insert(chain_lock.block_height, entry.clone()); - by_hash.insert(chain_lock.block_hash, entry.clone()); - - // Enforce cache size limit - if by_height.len() > self.max_cache_size { - // Calculate how many entries to remove - let entries_to_remove = by_height.len() - self.max_cache_size; - - // Collect keys to remove (oldest entries are at the beginning) - let keys_to_remove: Vec<(u32, BlockHash)> = by_height - .iter() - .take(entries_to_remove) - .map(|(height, entry)| (*height, entry.chain_lock.block_hash)) - .collect(); - - // Batch remove from both maps - for (height, block_hash) in keys_to_remove { - by_height.shift_remove(&height); - by_hash.shift_remove(&block_hash); - } - } - } - - // Store persistently - let key = format!("chainlock_{}", chain_lock.block_height); - let data = bincode::encode_to_vec(&chain_lock, bincode::config::standard()) - .map_err(|e| StorageError::Serialization(e.to_string()))?; - storage.store_metadata(&key, &data).await?; - - Ok(()) - } - - /// Check if we have a chain lock at the given height - pub fn has_chain_lock_at_height(&self, height: u32) -> bool { - self.chain_locks_by_height.read().map(|locks| locks.contains_key(&height)).unwrap_or(false) - } - - /// Get chain lock by height - pub fn get_chain_lock_by_height(&self, height: u32) -> Option { - self.chain_locks_by_height.read().ok().and_then(|locks| locks.get(&height).cloned()) - } - - /// Get chain lock by block hash - pub fn get_chain_lock_by_hash(&self, hash: &BlockHash) -> Option { - self.chain_locks_by_hash.read().ok().and_then(|locks| locks.get(hash).cloned()) - } - - /// Check if a block is chain-locked - pub fn is_block_chain_locked(&self, block_hash: &BlockHash, height: u32) -> bool { - // First check by hash (most specific) - if let Some(entry) = self.get_chain_lock_by_hash(block_hash) { - return entry.validated && entry.chain_lock.block_hash == *block_hash; - } - - // Then check by height - if let Some(entry) = self.get_chain_lock_by_height(height) { - return entry.validated && entry.chain_lock.block_hash == *block_hash; - } - - false - } - - /// Get the highest chain-locked block height - pub fn get_highest_chain_locked_height(&self) -> Option { - self.chain_locks_by_height.read().ok().and_then(|locks| locks.keys().max().cloned()) - } - - /// Check if a reorganization would violate chain locks - pub fn would_violate_chain_lock(&self, reorg_from_height: u32, reorg_to_height: u32) -> bool { - if !self.enforce_chain_locks { - return false; - } - - let locks = match self.chain_locks_by_height.read() { - Ok(locks) => locks, - Err(_) => return false, // If we can't read locks, assume no violation - }; - - // Check if any chain-locked block would be reorganized - for height in reorg_from_height..=reorg_to_height { - if locks.contains_key(&height) { - debug!("Reorg would violate chain lock at height {}", height); - return true; - } - } - - false - } - - /// Update chain state with a new chain lock - fn update_chain_state_with_lock(&self, _chain_lock: &ChainLock, _chain_state: &ChainState) { - // This is handled by the caller to avoid mutable borrow issues - // The chain state will be updated with the chain lock information - } - - /// Load chain locks from storage - pub async fn load_from_storage( - &self, - storage: &S, - start_height: u32, - end_height: u32, - ) -> StorageResult> { - let mut chain_locks = Vec::new(); - - for height in start_height..=end_height { - let key = format!("chainlock_{}", height); - if let Some(data) = storage.load_metadata(&key).await? { - match bincode::decode_from_slice::(&data, bincode::config::standard()) - { - Ok((chain_lock, _)) => { - // Cache it - let entry = ChainLockEntry { - chain_lock: chain_lock.clone(), - received_at: std::time::SystemTime::now(), - validated: true, - }; - - let mut by_height = self.chain_locks_by_height.write().map_err(|_| { - StorageError::LockPoisoned("chain_locks_by_height".to_string()) - })?; - let mut by_hash = self.chain_locks_by_hash.write().map_err(|_| { - StorageError::LockPoisoned("chain_locks_by_hash".to_string()) - })?; - - by_height.insert(chain_lock.block_height, entry.clone()); - by_hash.insert(chain_lock.block_hash, entry); - - chain_locks.push(chain_lock); - } - Err(e) => { - error!("Failed to deserialize chain lock at height {}: {}", height, e); - } - } - } - } - Ok(chain_locks) - } -} - -#[cfg(test)] -#[path = "chainlock_test.rs"] -mod chainlock_test; diff --git a/dash-spv/src/chain/chainlock_test.rs b/dash-spv/src/chain/chainlock_test.rs deleted file mode 100644 index 96f3b2cae..000000000 --- a/dash-spv/src/chain/chainlock_test.rs +++ /dev/null @@ -1,145 +0,0 @@ -#[cfg(test)] -mod tests { - use super::super::*; - use crate::{ - storage::{BlockHeaderStorage, DiskStorageManager}, - types::ChainState, - }; - use dashcore::{Header, Network}; - - #[tokio::test] - async fn test_chainlock_processing() { - // Create storage and ChainLock manager - let mut storage = - DiskStorageManager::with_temp_dir().await.expect("Failed to create tmp storage"); - let chainlock_manager = ChainLockManager::new(true); - let chain_state = ChainState::new_for_network(Network::Testnet); - - let chainlock = ChainLock::dummy(1000); - - // Process the ChainLock - let result = chainlock_manager - .process_chain_lock(chainlock.clone(), &chain_state, &mut storage) - .await; - - // Should succeed even without full validation - assert!(result.is_ok(), "ChainLock processing should succeed"); - - // Verify it was stored - assert!(chainlock_manager.has_chain_lock_at_height(1000)); - - // Verify we can retrieve it - let entry = chainlock_manager - .get_chain_lock_by_height(1000) - .expect("ChainLock should be retrievable after storing"); - assert_eq!(entry.chain_lock.block_height, 1000); - assert_eq!(entry.chain_lock.block_hash, chainlock.block_hash); - } - - #[tokio::test] - async fn test_chainlock_superseding() { - let mut storage = - DiskStorageManager::with_temp_dir().await.expect("Failed to create tmp storage"); - let chainlock_manager = ChainLockManager::new(true); - let chain_state = ChainState::new_for_network(Network::Testnet); - - let chainlock1 = ChainLock::dummy(1000); - - chainlock_manager - .process_chain_lock(chainlock1.clone(), &chain_state, &mut storage) - .await - .expect("First ChainLock should process successfully"); - - let chainlock2 = ChainLock::dummy(2000); - - chainlock_manager - .process_chain_lock(chainlock2.clone(), &chain_state, &mut storage) - .await - .expect("Second ChainLock should process successfully"); - - // Verify both are stored - assert!(chainlock_manager.has_chain_lock_at_height(1000)); - assert!(chainlock_manager.has_chain_lock_at_height(2000)); - - // Get highest ChainLock - let highest = chainlock_manager.get_highest_chain_locked_height(); - assert_eq!(highest, Some(2000)); - } - - #[tokio::test] - async fn test_reorganization_protection() { - let chainlock_manager = ChainLockManager::new(true); - let chain_state = ChainState::new_for_network(Network::Testnet); - let mut storage = - DiskStorageManager::with_temp_dir().await.expect("Failed to create tmp storage"); - - // Add ChainLocks at heights 1000, 2000, 3000 - for height in [1000, 2000, 3000] { - let chainlock = ChainLock::dummy(height); - chainlock_manager - .process_chain_lock(chainlock, &chain_state, &mut storage) - .await - .unwrap_or_else(|_| { - panic!("ChainLock at height {} should process successfully", height) - }); - } - - // Test reorganization protection - assert!(!chainlock_manager.would_violate_chain_lock(500, 999)); // Before ChainLocks - OK - assert!(chainlock_manager.would_violate_chain_lock(1500, 2500)); // Would reorg ChainLock at 2000 - assert!(!chainlock_manager.would_violate_chain_lock(3001, 4000)); // After ChainLocks - OK - } - - #[tokio::test] - async fn test_chainlock_queue_and_process_flow() { - let chainlock_manager = ChainLockManager::new(true); - - // Queue multiple ChainLocks - let chain_lock1 = ChainLock::dummy(100); - let chain_lock2 = ChainLock::dummy(200); - let chain_lock3 = ChainLock::dummy(300); - - chainlock_manager.queue_pending_chainlock(chain_lock1).unwrap(); - chainlock_manager.queue_pending_chainlock(chain_lock2).unwrap(); - chainlock_manager.queue_pending_chainlock(chain_lock3).unwrap(); - - // Verify all are queued - { - // Note: pending_chainlocks is private, can't access directly - let pending = chainlock_manager.pending_chainlocks.read().unwrap(); - assert_eq!(pending.len(), 3); - assert_eq!(pending[0].block_height, 100); - assert_eq!(pending[1].block_height, 200); - assert_eq!(pending[2].block_height, 300); - } - } - - #[tokio::test] - async fn test_chainlock_manager_cache_operations() { - let mut storage = DiskStorageManager::with_temp_dir().await.unwrap(); - - let chainlock_manager = ChainLockManager::new(true); - - // Add test headers - let header = Header::dummy(0); - storage.store_headers_at_height(&[header], 0).await.unwrap(); - - // Create and process a ChainLock - let chain_lock = ChainLock::dummy(0); - let chain_state = ChainState::new(); - let _ = chainlock_manager - .process_chain_lock(chain_lock.clone(), &chain_state, &mut storage) - .await; - - // Test cache operations - assert!(chainlock_manager.has_chain_lock_at_height(0)); - - let entry = chainlock_manager.get_chain_lock_by_height(0); - assert!(entry.is_some()); - assert_eq!(entry.unwrap().chain_lock.block_height, 0); - - let entry_by_hash = chainlock_manager.get_chain_lock_by_hash(&header.block_hash()); - assert!(entry_by_hash.is_some()); - assert_eq!(entry_by_hash.unwrap().chain_lock.block_height, 0); - } -} diff --git a/dash-spv/src/chain/mod.rs b/dash-spv/src/chain/mod.rs index 1c5e2630f..3231c70ea 100644 --- a/dash-spv/src/chain/mod.rs +++ b/dash-spv/src/chain/mod.rs @@ -8,7 +8,6 @@ pub mod chain_tip; pub mod chain_work; -pub mod chainlock_manager; pub mod checkpoints; #[cfg(test)] @@ -16,5 +15,4 @@ mod checkpoint_test; pub use chain_tip::{ChainTip, ChainTipManager}; pub use chain_work::ChainWork; -pub use chainlock_manager::{ChainLockEntry, ChainLockManager}; pub use checkpoints::{Checkpoint, CheckpointManager}; diff --git a/dash-spv/src/client/chainlock.rs b/dash-spv/src/client/chainlock.rs deleted file mode 100644 index 88a8fb8c8..000000000 --- a/dash-spv/src/client/chainlock.rs +++ /dev/null @@ -1,104 +0,0 @@ -//! ChainLock processing and validation. -//! -//! This module contains: -//! - ChainLock processing -//! - InstantSendLock processing -//! - ChainLock validation updates -//! - Pending ChainLock validation - -use crate::error::{Result, SpvError}; -use crate::network::NetworkManager; -use crate::storage::StorageManager; -use crate::types::SpvEvent; -use key_wallet_manager::wallet_interface::WalletInterface; -use std::net::SocketAddr; - -use super::DashSpvClient; - -impl DashSpvClient { - /// Process and validate a ChainLock. - pub async fn process_chainlock( - &mut self, - peer_address: SocketAddr, - chainlock: dashcore::ephemerealdata::chain_lock::ChainLock, - ) -> Result<()> { - tracing::info!( - "Processing ChainLock for block {} at height {}", - chainlock.block_hash, - chainlock.block_height - ); - - // First perform basic validation and storage through ChainLockManager - let chain_state = self.state.read().await; - { - let mut storage = self.storage.lock().await; - if let Err(e) = self - .chainlock_manager - .process_chain_lock(chainlock.clone(), &chain_state, &mut *storage) - .await - { - // Penalize the peer that relayed the invalid ChainLock - let reason = format!("Invalid ChainLock: {}", e); - self.network.penalize_peer_invalid_chainlock(peer_address, &reason).await; - return Err(SpvError::Validation(e)); - } - } - drop(chain_state); - - // Sequential sync handles masternode validation internally - tracing::info!( - "ChainLock stored, sequential sync will handle masternode validation internally" - ); - - // Update chain state with the new ChainLock - let mut state = self.state.write().await; - if let Some(current_chainlock_height) = state.last_chainlock_height { - if chainlock.block_height <= current_chainlock_height { - tracing::debug!( - "ChainLock for height {} does not supersede current ChainLock at height {}", - chainlock.block_height, - current_chainlock_height - ); - return Ok(()); - } - } - - // Update our confirmed chain tip - state.last_chainlock_height = Some(chainlock.block_height); - state.last_chainlock_hash = Some(chainlock.block_hash); - - tracing::info!( - "🔒 Updated confirmed chain tip to ChainLock at height {} ({})", - chainlock.block_height, - chainlock.block_hash - ); - - // Emit ChainLock event - self.emit_event(SpvEvent::ChainLockReceived { - chain_lock: chainlock, - validated: true, - }); - - // No need for additional storage - ChainLockManager already handles it - Ok(()) - } - - /// Validate all pending ChainLocks after masternode engine is available. - /// This requires mutable access to self for storage access. - pub async fn validate_pending_chainlocks(&mut self) -> Result<()> { - let chain_state = self.state.read().await; - - let mut storage = self.storage.lock().await; - match self.chainlock_manager.validate_pending_chainlocks(&chain_state, &mut *storage).await - { - Ok(_) => { - tracing::info!("Successfully validated pending ChainLocks"); - Ok(()) - } - Err(e) => { - tracing::error!("Failed to validate pending ChainLocks: {}", e); - Err(SpvError::Validation(e)) - } - } - } -} diff --git a/dash-spv/src/client/core.rs b/dash-spv/src/client/core.rs index 0550e6bd8..5a0c5a7b0 100644 --- a/dash-spv/src/client/core.rs +++ b/dash-spv/src/client/core.rs @@ -8,14 +8,11 @@ //! - Configuration updates //! - Terminal UI accessors -#[cfg(feature = "terminal-ui")] -use crate::terminal::TerminalUI; use dashcore::sml::masternode_list_engine::MasternodeListEngine; use std::sync::Arc; -use tokio::sync::{mpsc, Mutex, RwLock}; +use tokio::sync::{Mutex, RwLock}; -use super::{ClientConfig, StatusDisplay}; -use crate::chain::ChainLockManager; +use super::ClientConfig; use crate::error::{Result, SpvError}; use crate::mempool_filter::MempoolFilter; use crate::network::NetworkManager; @@ -23,9 +20,8 @@ use crate::storage::{ PersistentBlockHeaderStorage, PersistentBlockStorage, PersistentFilterHeaderStorage, PersistentFilterStorage, StorageManager, }; -use crate::sync::legacy::filters::FilterNotificationSender; use crate::sync::SyncCoordinator; -use crate::types::{ChainState, MempoolState, SpvEvent}; +use crate::types::MempoolState; use key_wallet_manager::wallet_interface::WalletInterface; /// Main Dash SPV client with generic trait-based architecture. @@ -101,7 +97,6 @@ use key_wallet_manager::wallet_interface::WalletInterface; /// The generic design is an intentional, beneficial architectural choice for a library. pub struct DashSpvClient { pub(super) config: ClientConfig, - pub(super) state: Arc>, pub(super) network: N, pub(super) storage: Arc>, /// External wallet implementation (required) @@ -114,13 +109,7 @@ pub struct DashSpvClient, - pub(super) chainlock_manager: Arc, pub(super) running: Arc>, - #[cfg(feature = "terminal-ui")] - pub(super) terminal_ui: Option>, - pub(super) filter_processor: Option, - pub(super) event_tx: mpsc::UnboundedSender, - pub(super) event_rx: Option>, pub(super) mempool_state: Arc>, pub(super) mempool_filter: Option>, } @@ -143,11 +132,6 @@ impl DashSpvClient &Arc { - &self.chainlock_manager - } - // ============ State Queries ============ /// Check if the client is running. @@ -166,12 +150,6 @@ impl DashSpvClient ChainState { - let display = self.create_status_display().await; - display.chain_state().await - } - // ============ Storage Operations ============ /// Clear all persisted storage (headers, filters, state, sync state) and reset in-memory state. @@ -182,12 +160,6 @@ impl DashSpvClient DashSpvClient Option> { - self.terminal_ui.clone() - } - - // ============ Internal Helpers ============ - - /// Helper to create a StatusDisplay instance. - #[cfg(feature = "terminal-ui")] - pub(super) async fn create_status_display(&self) -> StatusDisplay<'_, S, W> { - StatusDisplay::new( - &self.state, - self.storage.clone(), - Some(&self.wallet), - &self.terminal_ui, - &self.config, - ) - } - - /// Helper to create a StatusDisplay instance (without terminal UI). - #[cfg(not(feature = "terminal-ui"))] - pub(super) async fn create_status_display(&self) -> StatusDisplay<'_, S, W> { - StatusDisplay::new( - &self.state, - self.storage.clone(), - Some(&self.wallet), - &None, - &self.config, - ) - } } diff --git a/dash-spv/src/client/events.rs b/dash-spv/src/client/events.rs index b1b4c3891..2c929fe47 100644 --- a/dash-spv/src/client/events.rs +++ b/dash-spv/src/client/events.rs @@ -4,29 +4,17 @@ //! - Event receiver management //! - Event emission -use tokio::sync::{mpsc, watch}; +use tokio::sync::watch; use crate::network::{NetworkEvent, NetworkManager}; use crate::storage::StorageManager; use crate::sync::{SyncEvent, SyncProgress}; -use crate::types::SpvEvent; use key_wallet_manager::wallet_interface::WalletInterface; use tokio::sync::broadcast; use super::DashSpvClient; impl DashSpvClient { - /// Take the event receiver for external consumption. - pub fn take_event_receiver(&mut self) -> Option> { - self.event_rx.take() - } - - /// Emit an event. - pub(crate) fn emit_event(&self, event: SpvEvent) { - tracing::debug!("Emitting event: {:?}", event); - let _ = self.event_tx.send(event); - } - /// Subscribe to sync progress updates via watch channel. pub fn subscribe_progress(&self) -> watch::Receiver { self.sync_coordinator.subscribe_progress() diff --git a/dash-spv/src/client/lifecycle.rs b/dash-spv/src/client/lifecycle.rs index ef77b50ce..b4b01465e 100644 --- a/dash-spv/src/client/lifecycle.rs +++ b/dash-spv/src/client/lifecycle.rs @@ -10,11 +10,10 @@ use std::collections::HashSet; use std::sync::Arc; -use tokio::sync::{mpsc, Mutex, RwLock}; +use tokio::sync::{Mutex, RwLock}; use super::{ClientConfig, DashSpvClient}; use crate::chain::checkpoints::{mainnet_checkpoints, testnet_checkpoints, CheckpointManager}; -use crate::chain::ChainLockManager as LegacyChainLockManager; use crate::error::{Result, SpvError}; use crate::mempool_filter::MempoolFilter; use crate::network::NetworkManager; @@ -26,7 +25,7 @@ use crate::sync::{ BlockHeadersManager, BlocksManager, ChainLockManager, FilterHeadersManager, FiltersManager, InstantSendManager, Managers, MasternodesManager, SyncCoordinator, }; -use crate::types::{ChainState, MempoolState}; +use crate::types::MempoolState; use dashcore::network::constants::NetworkExt; use dashcore::sml::masternode_list_engine::MasternodeListEngine; use dashcore_hashes::Hash; @@ -43,9 +42,6 @@ impl DashSpvClient DashSpvClient DashSpvClient DashSpvClient DashSpvClient DashSpvClient { - sync_manager: &'a mut SyncManager, - storage: &'a mut S, - network: &'a mut N, - config: &'a ClientConfig, - mempool_filter: &'a Option>, - mempool_state: &'a Arc>, - event_tx: &'a tokio::sync::mpsc::UnboundedSender, -} - -impl<'a, S: StorageManager, N: NetworkManager, W: WalletInterface> MessageHandler<'a, S, N, W> { - /// Create a new message handler. - #[allow(clippy::too_many_arguments)] - pub fn new( - sync_manager: &'a mut SyncManager, - storage: &'a mut S, - network: &'a mut N, - config: &'a ClientConfig, - mempool_filter: &'a Option>, - mempool_state: &'a Arc>, - event_tx: &'a tokio::sync::mpsc::UnboundedSender, - ) -> Self { - Self { - sync_manager, - storage, - network, - config, - mempool_filter, - mempool_state, - event_tx, - } - } - - /// Handle incoming network messages during monitoring. - pub async fn handle_network_message(&mut self, message: &Message) -> Result<()> { - use dashcore::network::message::NetworkMessage; - - tracing::debug!( - "Client handling network message: {:?}", - std::mem::discriminant(message.inner()) - ); - - // First check if this is a message that ONLY the sync manager handles - // These messages can be moved to the sync manager without cloning - match message.inner() { - NetworkMessage::Headers2(ref headers2) => { - tracing::info!( - "📋 Received Headers2 message with {} compressed headers", - headers2.headers.len() - ); - - // Move to sync manager without cloning - return self - .sync_manager - .handle_message(message, &mut *self.network, &mut *self.storage) - .await - .map_err(|e| { - tracing::error!("Sequential sync manager error handling message: {}", e); - SpvError::Sync(e) - }); - } - NetworkMessage::MnListDiff(ref diff) => { - tracing::info!("📨 Received MnListDiff message: {} new masternodes, {} deleted masternodes, {} quorums", - diff.new_masternodes.len(), diff.deleted_masternodes.len(), diff.new_quorums.len()); - // Move to sync manager without cloning - return self - .sync_manager - .handle_message(message, &mut *self.network, &mut *self.storage) - .await - .map_err(|e| { - tracing::error!("Sequential sync manager error handling message: {}", e); - SpvError::Sync(e) - }); - } - NetworkMessage::CFHeaders(ref cf_headers) => { - // Try to include the peer address for better diagnostics - tracing::info!( - "📨 Client received CFHeaders message with {} filter headers from {}", - cf_headers.filter_hashes.len(), - message.peer_address() - ); - // Move to sync manager without cloning - return self - .sync_manager - .handle_message(message, &mut *self.network, &mut *self.storage) - .await - .map_err(|e| { - tracing::error!("Sequential sync manager error handling message: {}", e); - SpvError::Sync(e) - }); - } - NetworkMessage::QRInfo(ref qr_info) => { - tracing::info!( - "📨 Received QRInfo message with {} diffs and {} snapshots", - qr_info.mn_list_diff_list.len(), - qr_info.quorum_snapshot_list.len() - ); - // Move to sync manager without cloning - return self - .sync_manager - .handle_message(message, &mut *self.network, &mut *self.storage) - .await - .map_err(|e| { - tracing::error!("Sequential sync manager error handling QRInfo: {}", e); - SpvError::Sync(e) - }); - } - NetworkMessage::Headers(_) | NetworkMessage::CFilter(_) => { - // Headers and CFilters are relatively small, cloning is acceptable - if let Err(e) = self - .sync_manager - .handle_message(message, &mut *self.network, &mut *self.storage) - .await - { - tracing::error!("Sequential sync manager error handling message: {}", e); - } - } - NetworkMessage::Block(_) => { - if self.sync_manager.is_in_downloading_blocks_phase() { - if let Err(e) = self - .sync_manager - .handle_message(message, &mut *self.network, &mut *self.storage) - .await - { - tracing::error!( - "Sequential sync manager error handling block message: {}", - e - ); - } - } else { - // Sync manager will just log and return, no need to send it - tracing::debug!("Block received outside of DownloadingBlocks phase - skipping sync manager processing"); - } - } - _ => { - // Other messages don't need sync manager processing in this context - } - } - - // Then handle client-specific message processing - match message.inner() { - NetworkMessage::Headers(headers) => { - // For post-sync headers, we need special handling - if self.sync_manager.is_synced() && !headers.is_empty() { - tracing::info!( - "📋 Post-sync headers received from {} ({} headers), additional processing may be needed", - message.peer_address(), - headers.len() - ); - } - } - NetworkMessage::Block(block) => { - let block_hash = block.header.block_hash(); - tracing::info!("Received new block: {}", block_hash); - tracing::debug!( - "📋 Block {} contains {} transactions", - block_hash, - block.txdata.len() - ); - - // 1) Ensure header processing and chain tip update for this block - // Route the header through the sequential sync manager as a Headers message - let headers_msg = Message::new( - message.peer_address(), - NetworkMessage::Headers(vec![block.header]), - ); - if let Err(e) = self - .sync_manager - .handle_message(&headers_msg, &mut *self.network, &mut *self.storage) - .await - { - tracing::error!( - "❌ Failed to process header for block {} via sync manager: {}", - block_hash, - e - ); - return Err(SpvError::Sync(e)); - } - } - NetworkMessage::Inv(inv) => { - tracing::debug!("Received inventory message with {} items", inv.len()); - // Handle inventory messages (new blocks, transactions, etc.) - self.handle_inventory(inv.clone()).await?; - } - NetworkMessage::Tx(tx) => { - tracing::info!("📨 Received transaction: {}", tx.txid()); - - // Only process if mempool tracking is enabled - if let Some(filter) = self.mempool_filter { - // Check if we should process this transaction - if let Some(unconfirmed_tx) = filter.process_transaction(tx.clone()).await { - let txid = unconfirmed_tx.txid(); - let amount = unconfirmed_tx.net_amount; - let is_instant_send = unconfirmed_tx.is_instant_send; - let addresses: Vec = - unconfirmed_tx.addresses.iter().map(|a| a.to_string()).collect(); - - // Store in mempool - let mut state = self.mempool_state.write().await; - state.add_transaction(unconfirmed_tx.clone()); - drop(state); - - // Store in storage if persistence is enabled - if self.config.persist_mempool { - if let Err(e) = - self.storage.store_mempool_transaction(&txid, &unconfirmed_tx).await - { - tracing::error!("Failed to persist mempool transaction: {}", e); - } - } - - // Emit event - let event = SpvEvent::MempoolTransactionAdded { - txid, - transaction: Box::new(tx.clone()), - amount, - addresses, - is_instant_send, - }; - let _ = self.event_tx.send(event); - - tracing::info!( - "💸 Added mempool transaction {} (amount: {})", - txid, - amount - ); - } else { - tracing::debug!( - "Transaction {} not relevant or at capacity, ignoring", - tx.txid() - ); - } - } else { - tracing::warn!("⚠️ Received transaction {} but mempool tracking is disabled (enable_mempool_tracking=false)", tx.txid()); - } - } - NetworkMessage::CLSig(chain_lock) => { - tracing::info!("Received ChainLock for block {}", chain_lock.block_hash); - // ChainLock processing would need access to state and validation - // This might need to be handled at the client level - tracing::debug!("ChainLock processing not yet implemented in message handler"); - } - NetworkMessage::ISLock(instant_lock) => { - tracing::info!("Received InstantSendLock for tx {}", instant_lock.txid); - // InstantLock processing would need access to validation - // This might need to be handled at the client level - tracing::debug!("InstantLock processing not yet implemented in message handler"); - } - NetworkMessage::Ping(nonce) => { - tracing::debug!("Received ping with nonce {}", nonce); - } - NetworkMessage::Pong(nonce) => { - tracing::debug!("Received pong with nonce {}", nonce); - } - NetworkMessage::CFilter(cfilter) => { - tracing::debug!("Received CFilter for block {}", cfilter.block_hash); - } - NetworkMessage::SendDsq(wants_dsq) => { - tracing::info!("Received SendDsq message - peer wants DSQ messages: {}", wants_dsq); - - // Send our own SendDsq(false) in response - we're an SPV client and don't want DSQ messages - tracing::info!("Sending SendDsq(false) to indicate we don't want DSQ messages"); - if let Err(e) = self.network.send_message(NetworkMessage::SendDsq(false)).await { - tracing::error!("Failed to send SendDsq response: {}", e); - } - } - _ => { - // Ignore other message types for now - tracing::debug!( - "Received network message: {:?}", - std::mem::discriminant(message.inner()) - ); - } - } - - Ok(()) - } - - /// Handle inventory messages - auto-request ChainLocks and other important data. - async fn handle_inventory( - &mut self, - inv: Vec, - ) -> Result<()> { - use dashcore::network::message::NetworkMessage; - use dashcore::network::message_blockdata::Inventory; - - let mut chainlocks_to_request = Vec::new(); - let mut blocks_to_request = Vec::new(); - let mut islocks_to_request = Vec::new(); - - for item in inv { - match item { - Inventory::Block(block_hash) => { - tracing::info!("🆕 Inventory: New block announcement {}", block_hash); - blocks_to_request.push(item); - } - Inventory::ChainLock(chainlock_hash) => { - tracing::info!("🔒 Inventory: New ChainLock {}", chainlock_hash); - chainlocks_to_request.push(item); - } - Inventory::InstantSendLock(islock_hash) => { - // Only fetch InstantSendLocks when we're fully synced and have masternode data - if self.sync_manager.is_synced() - && self.sync_manager.get_masternode_engine().is_some() - { - tracing::info!("⚡ Inventory: New InstantSendLock {}", islock_hash); - islocks_to_request.push(item); - } else { - tracing::debug!( - "Skipping InstantSendLock {} fetch - not fully synced or masternode engine unavailable", - islock_hash - ); - } - } - Inventory::Transaction(txid) => { - tracing::debug!("💸 Inventory: New transaction {}", txid); - - // Check if we should fetch this transaction - if let Some(filter) = self.mempool_filter { - if self.config.fetch_mempool_transactions - && filter.should_fetch_transaction(&txid).await - { - tracing::info!("📥 Requesting transaction {}", txid); - // Request the transaction - let getdata = NetworkMessage::GetData(vec![item]); - if let Err(e) = self.network.send_message(getdata).await { - tracing::error!("Failed to request transaction {}: {}", txid, e); - } - } else { - tracing::debug!("Not fetching transaction {} (fetch_mempool_transactions={}, should_fetch={})", - txid, - self.config.fetch_mempool_transactions, - filter.should_fetch_transaction(&txid).await - ); - } - } else { - tracing::warn!("⚠️ Transaction {} announced but mempool tracking is disabled (enable_mempool_tracking=false)", txid); - } - } - _ => { - tracing::debug!("❓ Inventory: Other item type"); - } - } - } - - // Auto-request ChainLocks (highest priority for validation) - if !chainlocks_to_request.is_empty() { - tracing::info!("Requesting {} ChainLocks", chainlocks_to_request.len()); - let getdata = NetworkMessage::GetData(chainlocks_to_request); - self.network.send_message(getdata).await.map_err(SpvError::Network)?; - } - - // Auto-request InstantLocks (only when synced and masternodes available; gated above) - if !islocks_to_request.is_empty() { - tracing::info!("Requesting {} InstantLocks", islocks_to_request.len()); - let getdata = NetworkMessage::GetData(islocks_to_request); - self.network.send_message(getdata).await.map_err(SpvError::Network)?; - } - - // For blocks announced via inventory during tip sync, request full blocks for privacy - if !blocks_to_request.is_empty() { - tracing::info!( - "📥 Requesting {} new blocks announced via inventory", - blocks_to_request.len() - ); - - let getdata = NetworkMessage::GetData(blocks_to_request); - if let Err(e) = self.network.send_message(getdata).await { - tracing::error!("Failed to request announced blocks: {}", e); - } - } - - Ok(()) - } - - /// Handle new headers received after the initial sync is complete. - /// The sequential sync manager will handle requesting filter headers internally. - pub async fn handle_post_sync_headers( - &mut self, - headers: &[dashcore::block::Header], - ) -> Result<()> { - if !self.config.enable_filters { - tracing::debug!( - "Filters not enabled, skipping post-sync filter requests for {} headers", - headers.len() - ); - return Ok(()); - } - - tracing::info!( - "Handling {} post-sync headers - sequential sync will manage filter requests", - headers.len() - ); - - // The sequential sync manager's handle_new_headers method will automatically - // request filter headers and filters as needed - self.sync_manager - .handle_new_headers(headers, &mut *self.network, &mut *self.storage) - .await - .map_err(SpvError::Sync)?; - - Ok(()) - } -} diff --git a/dash-spv/src/client/message_handler_test.rs b/dash-spv/src/client/message_handler_test.rs deleted file mode 100644 index cff1b59aa..000000000 --- a/dash-spv/src/client/message_handler_test.rs +++ /dev/null @@ -1,361 +0,0 @@ -//! Unit tests for network message handling - -#[cfg(test)] -mod tests { - use crate::client::{ClientConfig, MessageHandler}; - use crate::network::Message; - use crate::storage::DiskStorageManager; - use crate::sync::legacy::SyncManager; - use crate::test_utils::{test_socket_address, MockNetworkManager}; - use crate::types::{MempoolState, SpvEvent}; - use crate::ChainState; - use dashcore::block::Header as BlockHeader; - use dashcore::network::message::NetworkMessage; - use dashcore::network::message_blockdata::Inventory; - use dashcore::{Block, BlockHash, Network, Transaction}; - use dashcore_hashes::Hash; - use key_wallet_manager::WalletManager; - use std::collections::HashSet; - use std::sync::Arc; - use tokio::sync::{mpsc, Mutex, RwLock}; - - async fn setup_test_components() -> ( - MockNetworkManager, - DiskStorageManager, - SyncManager, - ClientConfig, - Arc>, - mpsc::UnboundedSender, - ) { - let network = MockNetworkManager::new(); - let storage = - DiskStorageManager::with_temp_dir().await.expect("Failed to create tmp storage"); - let config = ClientConfig::default(); - let mempool_state = Arc::new(RwLock::new(MempoolState::default())); - let (event_tx, _event_rx) = mpsc::unbounded_channel(); - - let wallet = WalletManager::new(Network::Testnet); - - // Create sync manager - let received_filter_heights = Arc::new(Mutex::new(HashSet::new())); - let sync_manager = SyncManager::new( - &config, - received_filter_heights, - Arc::new(RwLock::new(wallet)), - Arc::new(RwLock::new(ChainState::new())), - ) - .unwrap(); - - (network, storage, sync_manager, config, mempool_state, event_tx) - } - - #[tokio::test] - async fn test_handle_headers2_message() { - let (mut network, mut storage, mut sync_manager, config, mempool_state, event_tx) = - setup_test_components().await; - - let mut handler = MessageHandler::new( - &mut sync_manager, - &mut storage, - &mut network, - &config, - &None, - &mempool_state, - &event_tx, - ); - - // Create a Headers2 message - let headers2 = dashcore::network::message_headers2::Headers2Message { - headers: vec![], - }; - let message = Message::new(test_socket_address(1), NetworkMessage::Headers2(headers2)); - - // Handle the message - let result = handler.handle_network_message(&message).await; - assert!(result.is_ok()); - - // Verify peer was marked as having sent headers2 - // (MockNetworkManager would track this) - } - - #[tokio::test] - async fn test_handle_mnlistdiff_message() { - let (mut network, mut storage, mut sync_manager, config, mempool_state, event_tx) = - setup_test_components().await; - - let mut handler = MessageHandler::new( - &mut sync_manager, - &mut storage, - &mut network, - &config, - &None, - &mempool_state, - &event_tx, - ); - - // Create a MnListDiff message - let mnlistdiff = dashcore::network::message_sml::MnListDiff { - version: 1, - base_block_hash: BlockHash::from([0u8; 32]), - block_hash: BlockHash::from([0u8; 32]), - total_transactions: 0, - merkle_hashes: vec![], - merkle_flags: vec![], - coinbase_tx: dashcore::Transaction { - version: 1, - lock_time: 0, - input: vec![], - output: vec![], - special_transaction_payload: None, - }, - deleted_masternodes: vec![], - new_masternodes: vec![], - deleted_quorums: vec![], - new_quorums: vec![], - quorums_chainlock_signatures: vec![], - }; - let message = Message::new(test_socket_address(1), NetworkMessage::MnListDiff(mnlistdiff)); - - // Handle the message - let result = handler.handle_network_message(&message).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_handle_cfheaders_message() { - let (mut network, mut storage, mut sync_manager, config, mempool_state, event_tx) = - setup_test_components().await; - - let mut handler = MessageHandler::new( - &mut sync_manager, - &mut storage, - &mut network, - &config, - &None, - &mempool_state, - &event_tx, - ); - - // Create a CFHeaders message - let cfheaders = dashcore::network::message_filter::CFHeaders { - filter_type: 0, - stop_hash: BlockHash::from([0u8; 32]), - previous_filter_header: dashcore::hash_types::FilterHeader::from([0u8; 32]), - filter_hashes: vec![], - }; - let message = Message::new(test_socket_address(1), NetworkMessage::CFHeaders(cfheaders)); - - // Handle the message - let result = handler.handle_network_message(&message).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_handle_cfilter_message() { - let (mut network, mut storage, mut sync_manager, config, mempool_state, event_tx) = - setup_test_components().await; - - let mut handler = MessageHandler::new( - &mut sync_manager, - &mut storage, - &mut network, - &config, - &None, - &mempool_state, - &event_tx, - ); - - // Create a CFilter message - let cfilter = dashcore::network::message_filter::CFilter { - filter_type: 0, - block_hash: BlockHash::from([0u8; 32]), - filter: vec![], - }; - let message = Message::new(test_socket_address(1), NetworkMessage::CFilter(cfilter)); - - // Handle the message - should be passed to sync manager - let result = handler.handle_network_message(&message).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_handle_block_message() { - let (mut network, mut storage, mut sync_manager, config, mempool_state, event_tx) = - setup_test_components().await; - - let mut handler = MessageHandler::new( - &mut sync_manager, - &mut storage, - &mut network, - &config, - &None, - &mempool_state, - &event_tx, - ); - - // Create a Block message - let block = Block { - header: BlockHeader { - version: dashcore::block::Version::from_consensus(1), - prev_blockhash: BlockHash::from([0u8; 32]), - merkle_root: dashcore::hash_types::TxMerkleNode::from([0u8; 32]), - time: 0, - bits: dashcore::CompactTarget::from_consensus(0), - nonce: 0, - }, - txdata: vec![], - }; - let message = Message::new(test_socket_address(1), NetworkMessage::Block(block.clone())); - - // Handle the message - let result = handler.handle_network_message(&message).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_handle_inv_message_with_mempool() { - let (mut network, mut storage, mut sync_manager, mut config, mempool_state, event_tx) = - setup_test_components().await; - - // Enable mempool tracking - config.enable_mempool_tracking = true; - config.fetch_mempool_transactions = true; - - let mut handler = MessageHandler::new( - &mut sync_manager, - &mut storage, - &mut network, - &config, - &None, - &mempool_state, - &event_tx, - ); - - // Create an Inv message with transaction - let inv = vec![Inventory::Transaction(dashcore::Txid::all_zeros())]; - let message = Message::new(test_socket_address(1), NetworkMessage::Inv(inv)); - - // Handle the message - let result = handler.handle_network_message(&message).await; - assert!(result.is_ok()); - - // Should have requested the transaction - // (MockNetworkManager would track this) - } - - #[tokio::test] - async fn test_handle_tx_message() { - let (mut network, mut storage, mut sync_manager, config, mempool_state, event_tx) = - setup_test_components().await; - - let mut handler = MessageHandler::new( - &mut sync_manager, - &mut storage, - &mut network, - &config, - &None, - &mempool_state, - &event_tx, - ); - - // Create a Tx message - let tx = Transaction { - version: 1, - lock_time: 0, - input: vec![], - output: vec![], - special_transaction_payload: None, - }; - let message = Message::new(test_socket_address(1), NetworkMessage::Tx(tx.clone())); - - // Handle the message - let result = handler.handle_network_message(&message).await; - assert!(result.is_ok()); - - // Should have emitted transaction event - // Note: The test setup has event_tx (sender), not event_rx (receiver) - // In a real test, we'd need to create a receiver to check events - // For now, just verify the handler processed without error - } - - #[tokio::test] - async fn test_handle_chainlock_message() { - let (mut network, mut storage, mut sync_manager, config, mempool_state, event_tx) = - setup_test_components().await; - - let mut handler = MessageHandler::new( - &mut sync_manager, - &mut storage, - &mut network, - &config, - &None, - &mempool_state, - &event_tx, - ); - - // Create a ChainLock message - let chainlock = dashcore::ChainLock { - block_height: 100, - block_hash: BlockHash::from([0u8; 32]), - signature: dashcore::bls_sig_utils::BLSSignature::from([0u8; 96]), - }; - let message = Message::new(test_socket_address(1), NetworkMessage::CLSig(chainlock)); - - // Handle the message - let result = handler.handle_network_message(&message).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_handle_ping_message() { - let (mut network, mut storage, mut sync_manager, config, mempool_state, event_tx) = - setup_test_components().await; - - let mut handler = MessageHandler::new( - &mut sync_manager, - &mut storage, - &mut network, - &config, - &None, - &mempool_state, - &event_tx, - ); - - // Create a Ping message - let message = Message::new(test_socket_address(1), NetworkMessage::Ping(12345)); - - // Handle the message - let result = handler.handle_network_message(&message).await; - assert!(result.is_ok()); - - // Should respond with pong (MockNetworkManager would track this) - } - - #[tokio::test] - async fn test_error_propagation() { - let (mut network, mut storage, mut sync_manager, config, mempool_state, event_tx) = - setup_test_components().await; - - let mut handler = MessageHandler::new( - &mut sync_manager, - &mut storage, - &mut network, - &config, - &None, - &mempool_state, - &event_tx, - ); - - // Create a message that might cause an error in sync manager - // For example, Headers2 with invalid data - let headers2 = dashcore::network::message_headers2::Headers2Message { - headers: vec![], // Empty headers might cause validation error - }; - let message = Message::new(test_socket_address(1), NetworkMessage::Headers2(headers2)); - - // Handle the message - error should be propagated - let result = handler.handle_network_message(&message).await; - // The result depends on sync manager validation - assert!(result.is_ok() || result.is_err()); - } -} diff --git a/dash-spv/src/client/mod.rs b/dash-spv/src/client/mod.rs index ba5473a6d..755354f7c 100644 --- a/dash-spv/src/client/mod.rs +++ b/dash-spv/src/client/mod.rs @@ -1,42 +1,33 @@ //! High-level client API for the Dash SPV client. //! -//! This module has been refactored from a monolithic 2,851-line file into focused submodules: +//! Provides `DashSpvClient`, the main entry point for SPV operations including +//! sync orchestration, mempool tracking, peer/masternode queries, and transaction +//! broadcasting. //! //! ## Module Structure //! -//! - `core.rs` - Core DashSpvClient struct definition and simple accessors +//! - `config.rs` - Client configuration +//! - `core.rs` - Core `DashSpvClient` struct definition and simple accessors //! - `lifecycle.rs` - Client lifecycle (new, start, stop, shutdown) //! - `events.rs` - Event emission and progress tracking receivers //! - `mempool.rs` - Mempool tracking and coordination //! - `queries.rs` - Peer, masternode, and balance queries //! - `transactions.rs` - Transaction operations (e.g., broadcast) -//! - `chainlock.rs` - ChainLock and InstantLock processing -//! - `sync_coordinator.rs` - Sync orchestration and network monitoring (the largest module) +//! - `sync_coordinator.rs` - Sync orchestration and network monitoring +//! - `interface.rs` - Client interface trait //! -//! ## Already Extracted Modules +//! ## Lock Ordering //! -//! - `config.rs` (484 lines) - Client configuration -//! - `message_handler.rs` (585 lines) - Network message handling -//! - `status_display.rs` (242 lines) - Status display formatting -//! -//! ## Lock Ordering (CRITICAL - Prevents Deadlocks) -//! -//! When acquiring multiple locks, ALWAYS use this order: +//! When acquiring multiple locks, always use this order: //! 1. running (`Arc>`) -//! 2. state (`Arc>`) -//! 3. mempool_state (`Arc>`) -//! 4. storage (`Arc>`) +//! 2. mempool_state (`Arc>`) +//! 3. storage (`Arc>`) //! //! Never acquire locks in reverse order or deadlock will occur! -// Existing extracted modules pub mod config; pub mod interface; -pub mod message_handler; -pub mod status_display; -// New refactored modules -mod chainlock; mod core; mod events; mod lifecycle; @@ -47,8 +38,6 @@ mod transactions; // Re-export public types from extracted modules pub use config::ClientConfig; -pub use message_handler::MessageHandler; -pub use status_display::StatusDisplay; // Re-export the main client struct pub use core::DashSpvClient; @@ -56,9 +45,6 @@ pub use core::DashSpvClient; #[cfg(test)] mod config_test; -#[cfg(test)] -mod message_handler_test; - #[cfg(test)] mod tests { use super::{ClientConfig, DashSpvClient}; diff --git a/dash-spv/src/client/queries.rs b/dash-spv/src/client/queries.rs index 7cd8f9979..9289467c1 100644 --- a/dash-spv/src/client/queries.rs +++ b/dash-spv/src/client/queries.rs @@ -9,7 +9,6 @@ use crate::error::{Result, SpvError}; use crate::network::NetworkManager; use crate::storage::StorageManager; -use crate::types::AddressBalance; use dashcore::sml::llmq_type::LLMQType; use dashcore::sml::masternode_list_engine::MasternodeListEngine; use dashcore::sml::quorum_entry::qualified_quorum_entry::QualifiedQuorumEntry; @@ -112,41 +111,4 @@ impl DashSpvClient Result { - // This method requires wallet-specific functionality not in WalletInterface - // The wallet should expose balance info through its own interface - Err(SpvError::Config( - "Address balance queries should be made directly to the wallet implementation" - .to_string(), - )) - } - - /// Get balances for all watched addresses. - /// - /// This method is deprecated - use the wallet's balance query methods instead. - pub async fn get_all_balances( - &self, - ) -> Result> { - // TODO: Get balances from wallet instead of tracking separately - // Will be implemented when wallet integration is complete - Ok(std::collections::HashMap::new()) - } - - // ============ Filter Queries ============ - - /// Check if filter sync is available (any peer supports compact filters). - pub async fn is_filter_sync_available(&self) -> bool { - self.network - .has_peer_with_service(dashcore::network::constants::ServiceFlags::COMPACT_FILTERS) - .await - } } diff --git a/dash-spv/src/client/status_display.rs b/dash-spv/src/client/status_display.rs deleted file mode 100644 index 9efb65f13..000000000 --- a/dash-spv/src/client/status_display.rs +++ /dev/null @@ -1,266 +0,0 @@ -//! Status display and progress reporting for the Dash SPV client. - -use std::sync::Arc; -use tokio::sync::{Mutex, RwLock}; - -use crate::client::ClientConfig; -use crate::error::Result; -use crate::storage::StorageManager; -#[cfg(feature = "terminal-ui")] -use crate::terminal::TerminalUI; -use crate::types::{ChainState, SyncProgress}; -use key_wallet_manager::wallet_interface::WalletInterface; - -/// Status display manager for updating UI and reporting sync progress. -pub struct StatusDisplay<'a, S: StorageManager, W: WalletInterface> { - state: &'a Arc>, - storage: Arc>, - wallet: Option<&'a Arc>>, - #[cfg(feature = "terminal-ui")] - terminal_ui: &'a Option>, - #[allow(dead_code)] - config: &'a ClientConfig, -} - -impl<'a, S: StorageManager, W: WalletInterface> StatusDisplay<'a, S, W> { - /// Create a new status display manager. - #[cfg(feature = "terminal-ui")] - pub fn new( - state: &'a Arc>, - storage: Arc>, - wallet: Option<&'a Arc>>, - terminal_ui: &'a Option>, - config: &'a ClientConfig, - ) -> Self { - Self { - state, - storage, - wallet, - terminal_ui, - config, - } - } - - /// Create a new status display manager (without terminal UI support). - #[cfg(not(feature = "terminal-ui"))] - pub fn new( - state: &'a Arc>, - storage: Arc>, - wallet: Option<&'a Arc>>, - _terminal_ui: &'a Option<()>, - config: &'a ClientConfig, - ) -> Self { - Self { - state, - storage, - wallet, - config, - } - } - - /// Calculate the header height based on the current state and storage. - /// This handles both checkpoint sync and normal sync scenarios. - async fn calculate_header_height_with_logging( - &self, - state: &ChainState, - with_logging: bool, - ) -> u32 { - // Unified formula for both checkpoint and genesis sync: - // For genesis sync: sync_base_height = 0, so height = 0 + storage_count - // For checkpoint sync: height = checkpoint_height + storage_count - let storage = self.storage.lock().await; - if let Some(storage_tip) = storage.get_tip_height().await { - let blockchain_height = storage_tip; - if with_logging { - tracing::debug!( - "Status display: reported tip height={}, sync_base={}, raw_storage_tip={}", - blockchain_height, - state.sync_base_height, - storage_tip - ); - } - blockchain_height - } else { - // No headers in storage yet - state.sync_base_height - } - } - - /// Calculate the header height based on the current state and storage. - /// This handles both checkpoint sync and normal sync scenarios. - async fn calculate_header_height(&self, state: &ChainState) -> u32 { - self.calculate_header_height_with_logging(state, false).await - } - - /// Get current sync progress. - pub async fn sync_progress(&self) -> Result { - let state = self.state.read().await; - - // Calculate the actual header height considering checkpoint sync - let header_height = self.calculate_header_height(&state).await; - - // Get filter header height from storage - let storage = self.storage.lock().await; - let filter_header_height = - storage.get_filter_tip_height().await.ok().flatten().unwrap_or(0); - drop(storage); - - Ok(SyncProgress { - header_height, - filter_header_height, - masternode_height: state.last_masternode_diff_height.unwrap_or(0), - peer_count: 1, // TODO: Get from network manager - filter_sync_available: false, // TODO: Get from network manager - filters_downloaded: 0, - last_synced_filter_height: None, - sync_start: std::time::SystemTime::now(), // TODO: Track properly - last_update: std::time::SystemTime::now(), - }) - } - - /// Get current chain state (read-only). - pub async fn chain_state(&self) -> ChainState { - let state = self.state.read().await; - state.clone() - } - - /// Helper to try to get wallet balance if W implements Any. - /// This is a wrapper that handles the case where W might not implement Any. - fn try_get_balance_if_any(wallet: &W) -> Option - where - W: 'static, - { - // Try to use Any trait for downcasting - // We check if W is WalletManager using TypeId - use key_wallet::wallet::managed_wallet_info::ManagedWalletInfo; - use key_wallet_manager::wallet_manager::WalletManager; - use std::any::TypeId; - - // Check if W is WalletManager - let wallet_type_id = TypeId::of::(); - let wallet_manager_type_id = TypeId::of::>(); - - if wallet_type_id == wallet_manager_type_id { - // Unsafe downcast: we've verified the types match, so this is safe - unsafe { - let wallet_ptr = wallet as *const W as *const WalletManager; - let wallet_ref = &*wallet_ptr; - return Some(wallet_ref.get_total_balance()); - } - } - - None - } - - /// Format balance in DASH with 8 decimal places. - fn format_balance(satoshis: u64) -> String { - use dashcore::Amount; - use dashcore::Denomination; - let amount = Amount::from_sat(satoshis); - amount.to_string_with_denomination(Denomination::Dash) - } - - /// Update the status display. - pub async fn update_status_display(&self) { - #[cfg(feature = "terminal-ui")] - { - if let Some(ui) = self.terminal_ui { - // Get header height - when syncing from checkpoint, use the actual blockchain height - let header_height = { - let state = self.state.read().await; - self.calculate_header_height_with_logging(&state, true).await - }; - - // Get filter header height from storage - let storage = self.storage.lock().await; - let filter_height = - storage.get_filter_tip_height().await.ok().flatten().unwrap_or(0); - drop(storage); - - // Get latest chainlock height from state - let chainlock_height = { - let state = self.state.read().await; - state.last_chainlock_height - }; - - // Get latest chainlock height from storage metadata (in case state wasn't updated) - let stored_chainlock_height = { - let storage = self.storage.lock().await; - if let Ok(Some(data)) = storage.load_metadata("latest_chainlock_height").await { - if data.len() >= 4 { - Some(u32::from_le_bytes([data[0], data[1], data[2], data[3]])) - } else { - None - } - } else { - None - } - }; - - // Use the higher of the two chainlock heights - let latest_chainlock = match (chainlock_height, stored_chainlock_height) { - (Some(a), Some(b)) => Some(a.max(b)), - (Some(a), None) => Some(a), - (None, Some(b)) => Some(b), - (None, None) => None, - }; - - // Update terminal UI - let _ = ui - .update_status(|status| { - status.headers = header_height; - status.filter_headers = filter_height; - status.chainlock_height = latest_chainlock; - status.peer_count = 1; // TODO: Get actual peer count - status.network = format!("{:?}", self.config.network); - }) - .await; - return; - } - } - - { - // Fall back to simple logging if terminal UI is not enabled - // Get header height - when syncing from checkpoint, use the actual blockchain height - let header_height = { - let state = self.state.read().await; - self.calculate_header_height_with_logging(&state, true).await - }; - - // Get filter header height from storage - let storage = self.storage.lock().await; - let filter_height = storage.get_filter_tip_height().await.ok().flatten().unwrap_or(0); - drop(storage); - - let chainlock_height = { - let state = self.state.read().await; - state.last_chainlock_height.unwrap_or(0) - }; - - // Get wallet balance if available - let balance_str = if let Some(wallet_ref) = self.wallet { - let wallet_guard = wallet_ref.read().await; - // Try to get balance if W implements Any (for WalletManager support) - // We use a helper that requires W: Any, so we need to handle this carefully - // For now, we'll attempt to get balance only if possible - Self::try_get_balance_if_any(&*wallet_guard) - .map(|balance_sat| format!(" | Balance: {}", Self::format_balance(balance_sat))) - .unwrap_or_default() - } else { - String::new() - }; - - tracing::info!( - "📊 [SYNC STATUS] Headers: {} | Filter Headers: {} | Latest ChainLock: {} | {}", - header_height, - filter_height, - if chainlock_height > 0 { - format!("#{}", chainlock_height) - } else { - "None".to_string() - }, - balance_str - ); - } - } -} diff --git a/dash-spv/src/client/sync_coordinator.rs b/dash-spv/src/client/sync_coordinator.rs index 43ab132c5..5a97fb66c 100644 --- a/dash-spv/src/client/sync_coordinator.rs +++ b/dash-spv/src/client/sync_coordinator.rs @@ -130,31 +130,4 @@ impl DashSpvClient, - block_height: u32, - ) -> Result<()> { - tracing::info!("💰 Balance changes detected in block at height {}:", block_height); - - for (address, change_sat) in balance_changes { - if *change_sat != 0 { - let change_amount = dashcore::Amount::from_sat(change_sat.unsigned_abs()); - let sign = if *change_sat > 0 { - "+" - } else { - "-" - }; - tracing::info!(" 📍 Address {}: {}{}", address, sign, change_amount); - } - } - - // TODO: Get monitored addresses from wallet and report balances - // Will be implemented when wallet integration is complete - - Ok(()) - } } diff --git a/dash-spv/src/lib.rs b/dash-spv/src/lib.rs index c82cb97ba..f76929247 100644 --- a/dash-spv/src/lib.rs +++ b/dash-spv/src/lib.rs @@ -66,8 +66,6 @@ pub mod mempool_filter; pub mod network; pub mod storage; pub mod sync; -#[cfg(feature = "terminal-ui")] -pub mod terminal; pub mod types; pub mod validation; @@ -78,7 +76,7 @@ pub use error::{ }; pub use logging::{init_console_logging, init_logging, LogFileConfig, LoggingConfig, LoggingGuard}; pub use tracing::level_filters::LevelFilter; -pub use types::{ChainState, FilterMatch, SyncProgress, ValidationMode}; +pub use types::{FilterMatch, ValidationMode}; // Re-export commonly used dashcore types pub use dashcore::{Address, BlockHash, Network, OutPoint, QuorumHash, ScriptBuf}; diff --git a/dash-spv/src/main.rs b/dash-spv/src/main.rs index 22bc58258..299610933 100644 --- a/dash-spv/src/main.rs +++ b/dash-spv/src/main.rs @@ -6,7 +6,6 @@ use std::process; use std::sync::Arc; use clap::{Arg, Command}; -use dash_spv::terminal::TerminalGuard; use dash_spv::{ClientConfig, DashSpvClient, LevelFilter, Network}; use key_wallet::wallet::managed_wallet_info::ManagedWalletInfo; use key_wallet_manager::wallet_manager::WalletManager; @@ -99,26 +98,6 @@ async fn run() -> Result<(), Box> { .value_parser(["none", "basic", "full"]) .default_value("full"), ) - .arg( - Arg::new("watch-address") - .short('w') - .long("watch-address") - .value_name("ADDRESS") - .help("Dash address to watch for transactions (can be used multiple times)") - .action(clap::ArgAction::Append), - ) - .arg( - Arg::new("add-example-addresses") - .long("add-example-addresses") - .help("Add some example Dash addresses to watch for testing") - .action(clap::ArgAction::SetTrue), - ) - .arg( - Arg::new("terminal-ui") - .long("terminal-ui") - .help("Enable terminal UI status bar") - .action(clap::ArgAction::SetTrue), - ) .arg( Arg::new("start-height") .long("start-height") @@ -210,15 +189,13 @@ async fn run() -> Result<(), Box> { // Parse logging flags and initialize logging early let no_log_file = matches.get_flag("no-log-file"); let print_to_console = matches.get_flag("print-to-console"); - let enable_terminal_ui = matches.get_flag("terminal-ui"); let max_log_files = *matches.get_one::("max-log-files").unwrap(); let log_dir = matches .get_one::("log-dir") .map(PathBuf::from) .unwrap_or_else(|| data_dir.join("logs")); - // When terminal UI is enabled, force file logging and disable console to avoid mixing - let file_config = if !no_log_file || enable_terminal_ui { + let file_config = if !no_log_file { Some(dash_spv::LogFileConfig { log_dir, max_files: max_log_files, @@ -227,12 +204,7 @@ async fn run() -> Result<(), Box> { None }; - // Disable console logging when terminal UI is enabled - let console_enabled = if enable_terminal_ui { - false - } else { - no_log_file || print_to_console - }; + let console_enabled = no_log_file || print_to_console; let logging_config = dash_spv::LoggingConfig { level: Some(log_level), @@ -301,7 +273,7 @@ async fn run() -> Result<(), Box> { // Create the wallet manager let mut wallet_manager = WalletManager::::new(config.network); - let wallet_id = wallet_manager.create_wallet_from_mnemonic( + wallet_manager.create_wallet_from_mnemonic( mnemonic_phrase.as_str(), "", 0, @@ -325,16 +297,7 @@ async fn run() -> Result<(), Box> { process::exit(1); } }; - run_client( - config, - network_manager, - storage_manager, - wallet, - enable_terminal_ui, - &matches, - wallet_id, - ) - .await?; + run_client(config, network_manager, storage_manager, wallet).await?; Ok(()) } @@ -344,9 +307,6 @@ async fn run_client( network_manager: dash_spv::network::manager::PeerNetworkManager, storage_manager: S, wallet: Arc>>, - enable_terminal_ui: bool, - matches: &clap::ArgMatches, - wallet_id: [u8; 32], ) -> Result<(), Box> { // Create and start the client let mut client = @@ -364,37 +324,6 @@ async fn run_client( } }; - // Enable terminal UI in the client if requested - let _terminal_guard = if enable_terminal_ui { - client.enable_terminal_ui(); - - // Get the terminal UI from the client and initialize it - if let Some(ui) = client.get_terminal_ui() { - match TerminalGuard::new(ui.clone()) { - Ok(guard) => { - // Initial update with network info - let network_name = format!("{:?}", config.network); - let _ = ui - .update_status(|status| { - status.network = network_name; - status.peer_count = 0; // Will be updated when connected - }) - .await; - - Some(guard) - } - Err(e) => { - tracing::warn!("Failed to initialize terminal UI: {}", e); - None - } - } - } else { - None - } - } else { - None - }; - if let Err(e) = client.start().await { eprintln!("Failed to start SPV client: {}", e); process::exit(1); @@ -402,215 +331,6 @@ async fn run_client( tracing::info!("SPV client started successfully"); - // Set up event logging: count detected transactions and log wallet balances periodically - // Take the client's event receiver and spawn a logger task - if let Some(mut event_rx) = client.take_event_receiver() { - let wallet_for_logger = wallet.clone(); - let wallet_id_for_logger = wallet_id; - tokio::spawn(async move { - use dash_spv::types::SpvEvent; - let mut total_detected_block_txs: u64 = 0; - let mut total_detected_mempool_txs: u64 = 0; - let mut last_snapshot = std::time::Instant::now(); - let snapshot_interval = std::time::Duration::from_secs(10); - - loop { - tokio::select! { - maybe_event = event_rx.recv() => { - match maybe_event { - Some(SpvEvent::BlockProcessed { relevant_transactions, .. }) => { - if relevant_transactions > 0 { - total_detected_block_txs = total_detected_block_txs.saturating_add(relevant_transactions as u64); - tracing::info!( - "Detected {} wallet-relevant tx(s) in block; cumulative (blocks): {}", - relevant_transactions, - total_detected_block_txs - ); - } - } - Some(SpvEvent::MempoolTransactionAdded { .. }) => { - total_detected_mempool_txs = total_detected_mempool_txs.saturating_add(1); - tracing::info!( - "Detected wallet-relevant mempool tx; cumulative (mempool): {}", - total_detected_mempool_txs - ); - } - Some(_) => { /* ignore other events */ } - None => break, // sender closed - } - } - // Also do a periodic snapshot while events are flowing - _ = tokio::time::sleep(snapshot_interval) => { - // Log snapshot if interval has elapsed - if last_snapshot.elapsed() >= snapshot_interval { - let (tx_count, wallet_balance) = { - let mgr = wallet_for_logger.read().await; - - // Count wallet-affecting transactions from wallet transaction history - let tx_count = mgr - .wallet_transaction_history(&wallet_id_for_logger) - .map(|v| v.len()) - .unwrap_or(0); - - // Read wallet balance from the managed wallet info - let wallet_balance = mgr.get_wallet_balance(&wallet_id_for_logger).unwrap_or_default(); - - (tx_count, wallet_balance) - }; - tracing::info!( - "Wallet tx summary: tx_count={} (blocks={} + mempool={}), balances: {}", - tx_count, - total_detected_block_txs, - total_detected_mempool_txs, - wallet_balance, - ); - last_snapshot = std::time::Instant::now(); - } - } - } - } - }); - } else { - tracing::warn!("Event channel not available; transaction/balance logging disabled"); - } - - // Add watch addresses if specified - if let Some(addresses) = matches.get_many::("watch-address") { - for addr_str in addresses { - match addr_str.parse::>() { - Ok(addr) => { - let network = config.network; - let checked_addr = addr.require_network(network).map_err(|_| { - format!("Address '{}' is not valid for network {:?}", addr_str, network) - }); - match checked_addr { - Ok(valid_addr) => { - // TODO: Add address to wallet for monitoring - // For now, just log that we would watch this address - tracing::info!( - "Would watch address: {} (wallet integration pending)", - valid_addr - ); - } - Err(e) => { - tracing::error!("Invalid address for network: {}", e); - } - } - } - Err(e) => { - tracing::error!("Invalid address format '{}': {}", addr_str, e); - } - } - } - } - - // Add example addresses for testing if requested - if matches.get_flag("add-example-addresses") { - let network = config.network; - let example_addresses = match network { - dashcore::Network::Dash => vec![ - // Some example mainnet addresses (these are from block explorers/faucets) - "Xesjop7V9xLndFMgZoCrckJ5ZPgJdJFbA3", // Crowdnode - ], - dashcore::Network::Testnet => vec![ - // Testnet addresses - "yNEr8u4Kx8PTH9A9G3P7NwkJRmqFD7tKSj", // Example testnet address - "yMGqjKTqr2HKKV6zqSg5vTPQUzJNt72h8h", // Another testnet example - ], - dashcore::Network::Regtest => vec![ - // Regtest addresses (these would be from local testing) - "yQ9J8qK3nNW8JL8h5T6tB3VZwwH9h5T6tB", // Example regtest address - "yeRZBWYfeNE4yVUHV4ZLs83Ppn9aMRH57A", // Another regtest example - ], - _ => vec![], - }; - - for addr_str in example_addresses { - match addr_str.parse::>() { - Ok(addr) => { - if let Ok(_valid_addr) = addr.require_network(network) { - // TODO: In the future, we could add these example addresses to the wallet - // For now, just log that we would monitor them - let height_info = if network == dashcore::Network::Dash - && addr_str == "Xesjop7V9xLndFMgZoCrckJ5ZPgJdJFbA3" - { - " (from height 200,000)" - } else { - "" - }; - tracing::info!( - "Would monitor example address: {}{}", - addr_str, - height_info - ); - } - } - Err(e) => { - tracing::warn!("Example address '{}' failed to parse: {}", addr_str, e); - } - } - } - } - - // Display current wallet addresses - { - let wallet_lock = wallet.read().await; - let monitored = wallet_lock.monitored_addresses(); - if !monitored.is_empty() { - tracing::info!("Wallet monitoring {} addresses:", monitored.len()); - for (i, addr) in monitored.iter().take(10).enumerate() { - tracing::info!(" {}: {}", i + 1, addr); - } - if monitored.len() > 10 { - tracing::info!(" ... and {} more addresses", monitored.len() - 10); - } - } else { - tracing::info!("No addresses being monitored by wallet. The wallet will generate addresses as needed."); - } - } - - // Wait for at least one peer to connect before attempting sync - tracing::info!("Waiting for peers to connect..."); - let mut wait_time = 0; - const MAX_WAIT_TIME: u64 = 60; // Wait up to 60 seconds for peers - - loop { - let peer_count = client.get_peer_count().await; - if peer_count > 0 { - tracing::info!("Connected to {} peer(s), starting synchronization", peer_count); - break; - } - - if wait_time >= MAX_WAIT_TIME { - tracing::error!("No peers connected after {} seconds", MAX_WAIT_TIME); - return Err("SPV client failed to connect to any peers".into()); - } - - tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - wait_time += 1; - - if wait_time % 5 == 0 { - tracing::info!("Still waiting for peers... ({}s elapsed)", wait_time); - } - } - - // Check filters for matches if wallet has addresses before starting monitoring - let should_check_filters = { - let wallet_lock = wallet.read().await; - let monitored = wallet_lock.monitored_addresses(); - !monitored.is_empty() && !matches.get_flag("no-filters") - }; - - // Start monitoring immediately after sync requests are sent - tracing::info!("Starting network monitoring..."); - - // For now, just focus on the core fix - getting headers to sync properly - // Filter checking can be done manually later - if should_check_filters { - tracing::info!("Filter checking will be available after headers sync completes"); - tracing::info!("You can manually trigger filter sync later if needed"); - } - let (_command_sender, command_receiver) = tokio::sync::mpsc::unbounded_channel(); let shutdown_token = CancellationToken::new(); diff --git a/dash-spv/src/storage/chainstate.rs b/dash-spv/src/storage/chainstate.rs deleted file mode 100644 index c6c3b69af..000000000 --- a/dash-spv/src/storage/chainstate.rs +++ /dev/null @@ -1,101 +0,0 @@ -use std::path::PathBuf; - -use async_trait::async_trait; - -use crate::{ - error::StorageResult, - storage::{io::atomic_write, PersistentStorage}, - ChainState, -}; - -#[async_trait] -pub trait ChainStateStorage { - async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()>; - - async fn load_chain_state(&self) -> StorageResult>; -} - -pub struct PersistentChainStateStorage { - storage_path: PathBuf, -} - -impl PersistentChainStateStorage { - const FOLDER_NAME: &str = "chainstate"; - const FILE_NAME: &str = "chainstate.json"; -} - -#[async_trait] -impl PersistentStorage for PersistentChainStateStorage { - async fn open(storage_path: impl Into + Send) -> StorageResult { - Ok(PersistentChainStateStorage { - storage_path: storage_path.into(), - }) - } - - async fn persist(&mut self, _storage_path: impl Into + Send) -> StorageResult<()> { - // Current implementation persists data everytime data is stored - Ok(()) - } -} - -#[async_trait] -impl ChainStateStorage for PersistentChainStateStorage { - async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()> { - let state_data = serde_json::json!({ - "last_chainlock_height": state.last_chainlock_height, - "last_chainlock_hash": state.last_chainlock_hash, - "current_filter_tip": state.current_filter_tip, - "last_masternode_diff_height": state.last_masternode_diff_height, - "sync_base_height": state.sync_base_height, - }); - - let chainstate_folder = self.storage_path.join(Self::FOLDER_NAME); - let path = chainstate_folder.join(Self::FILE_NAME); - - tokio::fs::create_dir_all(chainstate_folder).await?; - - let json = state_data.to_string(); - atomic_write(&path, json.as_bytes()).await?; - - Ok(()) - } - - async fn load_chain_state(&self) -> StorageResult> { - let path = self.storage_path.join(Self::FOLDER_NAME).join(Self::FILE_NAME); - if !path.exists() { - return Ok(None); - } - - let content = tokio::fs::read_to_string(path).await?; - let value: serde_json::Value = serde_json::from_str(&content).map_err(|e| { - crate::error::StorageError::Serialization(format!("Failed to parse chain state: {}", e)) - })?; - - let state = ChainState { - last_chainlock_height: value - .get("last_chainlock_height") - .and_then(|v| v.as_u64()) - .map(|h| h as u32), - last_chainlock_hash: value - .get("last_chainlock_hash") - .and_then(|v| v.as_str()) - .and_then(|s| s.parse().ok()), - current_filter_tip: value - .get("current_filter_tip") - .and_then(|v| v.as_str()) - .and_then(|s| s.parse().ok()), - masternode_engine: None, - last_masternode_diff_height: value - .get("last_masternode_diff_height") - .and_then(|v| v.as_u64()) - .map(|h| h as u32), - sync_base_height: value - .get("sync_base_height") - .and_then(|v| v.as_u64()) - .map(|h| h as u32) - .unwrap_or(0), - }; - - Ok(Some(state)) - } -} diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index c967ff58d..6138bad66 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -4,7 +4,6 @@ pub mod types; mod block_headers; mod blocks; -mod chainstate; mod filter_headers; mod filters; mod io; @@ -26,18 +25,16 @@ use std::time::Duration; use tokio::sync::RwLock; use crate::error::StorageResult; -use crate::storage::chainstate::PersistentChainStateStorage; use crate::storage::lockfile::LockFile; use crate::storage::metadata::PersistentMetadataStorage; use crate::storage::transactions::PersistentTransactionStorage; use crate::types::{HashedBlock, HashedBlockHeader, MempoolState, UnconfirmedTransaction}; -use crate::{ChainState, ClientConfig}; +use crate::ClientConfig; pub use crate::storage::block_headers::{ BlockHeaderStorage, BlockHeaderTip, PersistentBlockHeaderStorage, }; pub use crate::storage::blocks::{BlockStorage, PersistentBlockStorage}; -pub use crate::storage::chainstate::ChainStateStorage; pub use crate::storage::filter_headers::{FilterHeaderStorage, PersistentFilterHeaderStorage}; pub use crate::storage::filters::{FilterStorage, PersistentFilterStorage}; pub use crate::storage::masternode::{MasternodeStateStorage, PersistentMasternodeStateStorage}; @@ -64,7 +61,6 @@ pub trait StorageManager: + BlockStorage + TransactionStorage + MetadataStorage - + ChainStateStorage + MasternodeStateStorage + Send + Sync @@ -109,7 +105,6 @@ pub struct DiskStorageManager { blocks: Arc>, transactions: Arc>, metadata: Arc>, - chainstate: Arc>, masternodestate: Arc>, // Background worker @@ -148,9 +143,6 @@ impl DiskStorageManager { PersistentTransactionStorage::open(&storage_path).await?, )), metadata: Arc::new(RwLock::new(PersistentMetadataStorage::open(&storage_path).await?)), - chainstate: Arc::new(RwLock::new( - PersistentChainStateStorage::open(&storage_path).await?, - )), masternodestate: Arc::new(RwLock::new( PersistentMasternodeStateStorage::open(&storage_path).await?, )), @@ -181,7 +173,6 @@ impl DiskStorageManager { let blocks = Arc::clone(&self.blocks); let transactions = Arc::clone(&self.transactions); let metadata = Arc::clone(&self.metadata); - let chainstate = Arc::clone(&self.chainstate); let masternodestate = Arc::clone(&self.masternodestate); let storage_path = self.storage_path.clone(); @@ -198,7 +189,6 @@ impl DiskStorageManager { let _ = blocks.write().await.persist(&storage_path).await; let _ = transactions.write().await.persist(&storage_path).await; let _ = metadata.write().await.persist(&storage_path).await; - let _ = chainstate.write().await.persist(&storage_path).await; let _ = masternodestate.write().await.persist(&storage_path).await; } }); @@ -257,7 +247,6 @@ impl DiskStorageManager { let _ = self.blocks.write().await.persist(storage_path).await; let _ = self.transactions.write().await.persist(storage_path).await; let _ = self.metadata.write().await.persist(storage_path).await; - let _ = self.chainstate.write().await.persist(storage_path).await; let _ = self.masternodestate.write().await.persist(storage_path).await; } } @@ -297,8 +286,6 @@ impl StorageManager for DiskStorageManager { self.transactions = Arc::new(RwLock::new(PersistentTransactionStorage::open(storage_path).await?)); self.metadata = Arc::new(RwLock::new(PersistentMetadataStorage::open(storage_path).await?)); - self.chainstate = - Arc::new(RwLock::new(PersistentChainStateStorage::open(storage_path).await?)); self.masternodestate = Arc::new(RwLock::new(PersistentMasternodeStateStorage::open(storage_path).await?)); @@ -486,17 +473,6 @@ impl metadata::MetadataStorage for DiskStorageManager { } } -#[async_trait] -impl chainstate::ChainStateStorage for DiskStorageManager { - async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()> { - self.chainstate.write().await.store_chain_state(state).await - } - - async fn load_chain_state(&self) -> StorageResult> { - self.chainstate.read().await.load_chain_state().await - } -} - #[async_trait] impl masternode::MasternodeStateStorage for DiskStorageManager { async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()> { diff --git a/dash-spv/src/sync/legacy/filters/download.rs b/dash-spv/src/sync/legacy/filters/download.rs deleted file mode 100644 index 87deec830..000000000 --- a/dash-spv/src/sync/legacy/filters/download.rs +++ /dev/null @@ -1,409 +0,0 @@ -//! CFilter download and verification logic. -//! -//! This module handles downloading individual compact block filters and verifying -//! them against their corresponding filter headers. -//! -//! ## Key Features -//! -//! - Filter request queue management -//! - Parallel filter downloads with concurrency limits -//! - Filter verification against CFHeaders -//! - Individual filter header downloads for blocks -//! - Progress tracking and gap detection - -use dashcore::{ - bip158::BlockFilter, network::message::NetworkMessage, network::message_filter::GetCFilters, - BlockHash, -}; - -use crate::error::{SyncError, SyncResult}; -use crate::network::NetworkManager; -use crate::storage::StorageManager; -use crate::types::SyncProgress; - -impl super::manager::FilterSyncManager { - pub async fn verify_cfilter_against_headers( - &self, - filter_data: &[u8], - height: u32, - storage: &S, - ) -> SyncResult { - // We expect filter headers to be synced before requesting filters. - // If we're at height 0 (genesis), skip verification because there is no previous header. - if height == 0 { - tracing::debug!("Skipping cfilter verification at genesis height 0"); - return Ok(true); - } - - // Load previous and expected headers - let prev_header = storage.get_filter_header(height - 1).await.map_err(|e| { - SyncError::Storage(format!("Failed to load previous filter header: {}", e)) - })?; - let expected_header = storage.get_filter_header(height).await.map_err(|e| { - SyncError::Storage(format!("Failed to load expected filter header: {}", e)) - })?; - - let (Some(prev_header), Some(expected_header)) = (prev_header, expected_header) else { - tracing::warn!( - "Missing filter headers in storage for height {} (prev and/or expected)", - height - ); - return Ok(false); - }; - - // Compute the header from the received filter bytes and compare - let filter = BlockFilter::new(filter_data); - let computed_header = filter.filter_header(&prev_header); - - let matches = computed_header == expected_header; - if !matches { - tracing::error!( - "CFilter header mismatch at height {}: computed={:?}, expected={:?}", - height, - computed_header, - expected_header - ); - } - - Ok(matches) - } - - pub async fn sync_filters( - &mut self, - network: &mut N, - storage: &mut S, - start_height: Option, - count: Option, - ) -> SyncResult { - if self.syncing_filters { - return Err(SyncError::SyncInProgress); - } - - self.syncing_filters = true; - - // Clear any stale state from previous attempts - self.clear_filter_sync_state(); - - // Build the queue of filter requests - self.build_filter_request_queue(storage, start_height, count).await?; - - // Start processing the queue - self.process_filter_request_queue(network, storage).await?; - - // Note: Actual completion will be tracked by the monitoring loop - // This method just queues up requests and starts the flow control process - tracing::info!( - "✅ Filter sync initiated ({} requests queued, {} active)", - self.pending_filter_requests.len(), - self.active_filter_requests.len() - ); - - // Don't set syncing_filters to false here - it should remain true during download - // It will be cleared when sync completes or fails - - Ok(SyncProgress { - filters_downloaded: 0, // Will be updated by monitoring loop - ..SyncProgress::default() - }) - } - - /// Mark a filter as received and check for batch completion. - pub async fn mark_filter_received( - &mut self, - block_hash: BlockHash, - storage: &S, - ) -> SyncResult<()> { - // Record the received filter - self.record_individual_filter_received(block_hash, storage).await?; - - // Check which active requests are now complete - let mut completed_requests = Vec::new(); - - for (start, end) in self.active_filter_requests.keys() { - if self.is_request_complete(*start, *end).await? { - completed_requests.push((*start, *end)); - } - } - - // Remove completed requests from active tracking - for range in &completed_requests { - self.active_filter_requests.remove(range); - tracing::debug!("✅ Filter request range {}-{} completed", range.0, range.1); - } - - // Log current state periodically - { - let guard = self.received_filter_heights.lock().await; - if guard.len() % 1000 == 0 { - tracing::info!( - "Filter sync state: {} filters received, {} active requests, {} pending requests", - guard.len(), - self.active_filter_requests.len(), - self.pending_filter_requests.len() - ); - } - } - - Ok(()) - } - - async fn is_request_complete(&self, start: u32, end: u32) -> SyncResult { - let received_heights = self.received_filter_heights.lock().await; - for height in start..=end { - if !received_heights.contains(&height) { - return Ok(false); - } - } - Ok(true) - } - - async fn record_individual_filter_received( - &mut self, - block_hash: BlockHash, - storage: &S, - ) -> SyncResult<()> { - // Look up height for the block hash - if let Some(height) = storage.get_header_height_by_hash(&block_hash).await.map_err(|e| { - SyncError::Storage(format!("Failed to get header height by hash: {}", e)) - })? { - // Record in received filter heights - let mut heights = self.received_filter_heights.lock().await; - heights.insert(height); - tracing::trace!( - "📊 Recorded filter received at height {} for block {}", - height, - block_hash - ); - } else { - tracing::warn!("Could not find height for filter block hash {}", block_hash); - } - - Ok(()) - } - - pub async fn request_filters( - &mut self, - network: &mut N, - start_height: u32, - stop_hash: BlockHash, - ) -> SyncResult<()> { - let get_cfilters = GetCFilters { - filter_type: 0, // Basic filter type - start_height, - stop_hash, - }; - - tracing::debug!( - "Sending GetCFilters: start_height={}, stop_hash={}", - start_height, - stop_hash - ); - - network - .send_message(NetworkMessage::GetCFilters(get_cfilters)) - .await - .map_err(|e| SyncError::Network(format!("Failed to send GetCFilters: {}", e)))?; - - tracing::trace!("Requested filters from height {} to {}", start_height, stop_hash); - - Ok(()) - } - - pub(super) async fn find_height_for_block_hash( - &self, - block_hash: &BlockHash, - storage: &S, - start_height: u32, - end_height: u32, - ) -> SyncResult> { - // Use the efficient reverse index first. - // Contract: StorageManager::get_header_height_by_hash returns ABSOLUTE blockchain height. - if let Some(abs_height) = - storage.get_header_height_by_hash(block_hash).await.map_err(|e| { - SyncError::Storage(format!("Failed to get header height by hash: {}", e)) - })? - { - // Check if the absolute height is within the requested range - if abs_height >= start_height && abs_height <= end_height { - return Ok(Some(abs_height)); - } - } - - Ok(None) - } - - pub async fn store_filter_headers( - &mut self, - cfheaders: &dashcore::network::message_filter::CFHeaders, - storage: &mut S, - ) -> SyncResult<()> { - if cfheaders.filter_hashes.is_empty() { - tracing::debug!("No filter headers to store"); - return Ok(()); - } - - // Get the height range for this batch - let (start_height, stop_height, _header_tip_height) = - self.get_batch_height_range(cfheaders, storage).await?; - - tracing::info!( - "Received {} filter headers from height {} to {}", - cfheaders.filter_hashes.len(), - start_height, - stop_height - ); - - // Check current filter tip to see if we already have some/all of these headers - let current_filter_tip = storage - .get_filter_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get filter tip: {}", e)))? - .unwrap_or(0); - - // If we already have all these filter headers, skip processing - if current_filter_tip >= stop_height { - tracing::info!( - "Already have filter headers up to height {} (received up to {}), skipping", - current_filter_tip, - stop_height - ); - return Ok(()); - } - - // If there's partial overlap, we need to handle it carefully - if current_filter_tip >= start_height && start_height > 0 { - tracing::info!( - "Received overlapping filter headers. Current tip: {}, received range: {}-{}", - current_filter_tip, - start_height, - stop_height - ); - - // Verify that the overlapping portion matches what we have stored - // This is done by the verify_filter_header_chain method - // If verification fails, we'll skip storing to avoid corruption - } - - // Handle overlapping headers properly - if current_filter_tip >= start_height && start_height > 0 { - tracing::info!( - "Received overlapping filter headers. Current tip: {}, received range: {}-{}", - current_filter_tip, - start_height, - stop_height - ); - - // Use the handle_overlapping_headers method which properly handles the chain continuity - let expected_start = current_filter_tip + 1; - - match self.handle_overlapping_headers(cfheaders, expected_start, storage).await { - Ok((stored_count, _)) => { - if stored_count > 0 { - tracing::info!("✅ Successfully handled overlapping filter headers"); - } else { - tracing::info!("All filter headers in batch already stored"); - } - } - Err(e) => { - // If we can't find the connection point, it might be from a different peer - // with a different view of the chain - tracing::warn!( - "Failed to handle overlapping filter headers: {}. This may be due to data from different peers.", - e - ); - return Ok(()); - } - } - } else { - // Process the filter headers to convert them to the proper format - match self.process_filter_headers(cfheaders, start_height, storage).await { - Ok(new_filter_headers) => { - if !new_filter_headers.is_empty() { - // If this is the first batch (starting at height 1), store the genesis filter header first - if start_height == 1 && current_filter_tip < 1 { - let genesis_header = vec![cfheaders.previous_filter_header]; - storage.store_filter_headers(&genesis_header).await.map_err(|e| { - SyncError::Storage(format!( - "Failed to store genesis filter header: {}", - e - )) - })?; - tracing::debug!( - "Stored genesis filter header at height 0: {:?}", - cfheaders.previous_filter_header - ); - } - - // If this is the first batch after a checkpoint, store the checkpoint filter header - if self.sync_base_height > 0 - && start_height == self.sync_base_height + 1 - && current_filter_tip < self.sync_base_height - { - // Store the previous_filter_header as the filter header for the checkpoint block - let checkpoint_header = vec![cfheaders.previous_filter_header]; - storage - .store_filter_headers_at_height( - &checkpoint_header, - self.sync_base_height, - ) - .await - .map_err(|e| { - SyncError::Storage(format!( - "Failed to store checkpoint filter header: {}", - e - )) - })?; - tracing::info!( - "Stored checkpoint filter header at height {}: {:?}", - self.sync_base_height, - cfheaders.previous_filter_header - ); - } - - // Store the new filter headers - storage.store_filter_headers(&new_filter_headers).await.map_err(|e| { - SyncError::Storage(format!("Failed to store filter headers: {}", e)) - })?; - - tracing::info!( - "✅ Successfully stored {} new filter headers", - new_filter_headers.len() - ); - } - } - Err(e) => { - // If verification failed, it might be from a peer with different data - tracing::warn!( - "Failed to process filter headers: {}. This may be due to data from different peers.", - e - ); - return Ok(()); - } - } - } - - Ok(()) - } - - pub async fn send_next_filter_batch(&mut self, network: &mut N) -> SyncResult<()> { - let available_slots = self.get_available_request_slots(); - let requests_to_send = available_slots.min(self.pending_filter_requests.len()); - - if requests_to_send > 0 { - tracing::debug!( - "Sending {} more filter requests ({} queued, {} active)", - requests_to_send, - self.pending_filter_requests.len() - requests_to_send, - self.active_filter_requests.len() + requests_to_send - ); - - for _ in 0..requests_to_send { - if let Some(request) = self.pending_filter_requests.pop_front() { - self.send_filter_request(network, request).await?; - } - } - } - - Ok(()) - } -} diff --git a/dash-spv/src/sync/legacy/filters/headers.rs b/dash-spv/src/sync/legacy/filters/headers.rs deleted file mode 100644 index e1b53da1f..000000000 --- a/dash-spv/src/sync/legacy/filters/headers.rs +++ /dev/null @@ -1,965 +0,0 @@ -//! CFHeaders (filter header) synchronization logic. -//! -//! This module handles the synchronization of compact block filter headers (CFHeaders) -//! which are used to efficiently determine which blocks might contain transactions -//! relevant to watched addresses. -//! -//! ## Key Features -//! -//! - Sequential and flow-controlled CFHeaders synchronization -//! - Batch processing with configurable concurrency -//! - Timeout detection and automatic recovery -//! - Gap detection and overlap handling -//! - Filter header chain verification -//! - Stability checking before declaring sync complete - -use dashcore::hash_types::FilterHeader; -use dashcore::{ - network::message::NetworkMessage, - network::message_filter::{CFHeaders, GetCFHeaders}, - BlockHash, -}; -use dashcore_hashes::{sha256d, Hash}; - -use super::types::*; -use crate::error::{SyncError, SyncResult}; -use crate::network::NetworkManager; -use crate::storage::StorageManager; - -impl super::manager::FilterSyncManager { - pub(super) async fn find_available_header_at_or_before( - &self, - abs_height: u32, - min_abs_height: u32, - storage: &S, - ) -> Option<(BlockHash, u32)> { - if abs_height < min_abs_height { - return None; - } - - let mut scan_height = abs_height; - loop { - match storage.get_header(scan_height).await { - Ok(Some(header)) => { - tracing::info!("Found available header at blockchain height {}", scan_height); - return Some((header.block_hash(), scan_height)); - } - Ok(None) => { - tracing::debug!( - "Header missing at blockchain height {}, scanning back", - scan_height - ); - } - Err(e) => { - tracing::warn!( - "Error reading header at blockchain height {}: {}", - scan_height, - e - ); - } - } - - if scan_height == min_abs_height { - break; - } - scan_height = scan_height.saturating_sub(1); - } - - None - } - /// Calculate the start height of a CFHeaders batch. - fn calculate_batch_start_height(cf_headers: &CFHeaders, stop_height: u32) -> u32 { - let count = cf_headers.filter_hashes.len() as u32; - let offset = count.saturating_sub(1); - stop_height.saturating_sub(offset) - } - - /// Get the height range for a CFHeaders batch. - pub(super) async fn get_batch_height_range( - &self, - cf_headers: &CFHeaders, - storage: &S, - ) -> SyncResult<(u32, u32, u32)> { - let header_tip_height = storage.get_tip_height().await.ok_or_else(|| { - SyncError::Storage("No headers available for filter sync".to_string()) - })?; - - let stop_height = self - .find_height_for_block_hash(&cf_headers.stop_hash, storage, 0, header_tip_height) - .await? - .ok_or_else(|| { - SyncError::Validation(format!( - "Cannot find height for stop hash {} in CFHeaders", - cf_headers.stop_hash - )) - })?; - - let start_height = Self::calculate_batch_start_height(cf_headers, stop_height); - - // Best-effort: resolve the start block hash for additional diagnostics from headers storage - let start_hash_opt = - storage.get_header(start_height).await.ok().flatten().map(|h| h.block_hash()); - - // Always try to resolve the expected/requested start as well (current_sync_height) - // We don't have access to current_sync_height here, so we'll log both the batch - // start and a best-effort expected start in the caller. For this analysis log, - // avoid placeholder labels and prefer concrete values when known. - let prev_height = start_height.saturating_sub(1); - match start_hash_opt { - Some(h) => { - tracing::debug!( - "CFHeaders batch analysis: batch_start_hash={}, msg_prev_filter_header={}, msg_prev_height={}, stop_hash={}, stop_height={}, start_height={}, count={}, header_tip_height={}", - h, - cf_headers.previous_filter_header, - prev_height, - cf_headers.stop_hash, - stop_height, - start_height, - cf_headers.filter_hashes.len(), - header_tip_height - ); - } - None => { - tracing::debug!( - "CFHeaders batch analysis: batch_start_hash=, msg_prev_filter_header={}, msg_prev_height={}, stop_hash={}, stop_height={}, start_height={}, count={}, header_tip_height={}", - cf_headers.previous_filter_header, - prev_height, - cf_headers.stop_hash, - stop_height, - start_height, - cf_headers.filter_hashes.len(), - header_tip_height - ); - } - } - Ok((start_height, stop_height, header_tip_height)) - } - - pub async fn handle_cfheaders_message( - &mut self, - cf_headers: CFHeaders, - storage: &mut S, - network: &mut N, - ) -> SyncResult { - if !self.syncing_filter_headers { - // Not currently syncing, ignore - return Ok(true); - } - self.handle_filter_headers(cf_headers, storage, network).await - } - - pub async fn start_sync_headers( - &mut self, - network: &mut N, - storage: &mut S, - ) -> SyncResult { - if self.syncing_filter_headers { - return Err(SyncError::SyncInProgress); - } - - // Check if any connected peer supports compact filters - if !network - .has_peer_with_service(dashcore::network::constants::ServiceFlags::COMPACT_FILTERS) - .await - { - tracing::warn!( - "⚠️ No connected peers support compact filters (BIP 157/158). Skipping filter synchronization." - ); - tracing::warn!( - "⚠️ To enable filter sync, connect to peers that advertise NODE_COMPACT_FILTERS service bit." - ); - return Ok(false); // No sync started - } - - tracing::info!("🚀 Starting filter header synchronization"); - tracing::debug!("FilterSync start: sync_base_height={}", self.sync_base_height); - - // Get current filter tip - let current_filter_height = storage - .get_filter_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get filter tip height: {}", e)))? - .unwrap_or(0); - - // Get header tip (absolute blockchain height) - let header_tip_height = storage.get_tip_height().await.ok_or_else(|| { - SyncError::Storage("No headers available for filter sync".to_string()) - })?; - tracing::debug!( - "FilterSync context: header_tip_height={} (base={})", - header_tip_height, - self.sync_base_height - ); - - if current_filter_height >= header_tip_height { - tracing::info!("Filter headers already synced to header tip"); - return Ok(false); // Already synced - } - - // Determine next height to request - // In checkpoint sync, request from the checkpoint height itself. CFHeaders includes - // previous_filter_header for (start_height - 1), so we can compute the chain from the - // checkpoint and store its filter header as the first element. - let next_height = - if self.sync_base_height > 0 && current_filter_height < self.sync_base_height { - tracing::info!( - "Starting filter sync from checkpoint base {} (current filter height: {})", - self.sync_base_height, - current_filter_height - ); - self.sync_base_height - } else { - current_filter_height + 1 - }; - tracing::debug!( - "FilterSync plan: next_height={}, current_filter_height={}, header_tip_height={}", - next_height, - current_filter_height, - header_tip_height - ); - - if next_height > header_tip_height { - tracing::warn!( - "Filter sync requested but next height {} > header tip {}, nothing to sync", - next_height, - header_tip_height - ); - return Ok(false); - } - - // Set up sync state - self.syncing_filter_headers = true; - self.current_sync_height = next_height; - self.last_sync_progress = std::time::Instant::now(); - - // Get the stop hash (tip of headers) - let stop_hash = storage - .get_header(header_tip_height) - .await - .map_err(|e| { - SyncError::Storage(format!( - "Failed to get stop header at blockchain height {}: {}", - header_tip_height, e - )) - })? - .ok_or_else(|| { - SyncError::Storage(format!( - "Stop header not found at blockchain height {}", - header_tip_height - )) - })? - .block_hash(); - - // Initial request for first batch - let batch_end_height = - (self.current_sync_height + FILTER_BATCH_SIZE - 1).min(header_tip_height); - - tracing::debug!( - "Requesting filter headers batch: start={}, end={}, count={} (base={})", - self.current_sync_height, - batch_end_height, - batch_end_height - self.current_sync_height + 1, - self.sync_base_height - ); - - // Get the hash at batch_end_height for the stop_hash - let batch_stop_hash = if batch_end_height < header_tip_height { - // Try to get the header at the calculated height with fallback - match storage.get_header(batch_end_height).await { - Ok(Some(header)) => { - tracing::debug!( - "Found header for batch stop at blockchain height {}, hash={}", - batch_end_height, - header.block_hash() - ); - header.block_hash() - } - Ok(None) => { - tracing::warn!( - "Initial batch header not found at blockchain height {}, scanning for available header", - batch_end_height - ); - - match self - .find_available_header_at_or_before( - batch_end_height, - self.current_sync_height, - storage, - ) - .await - { - Some((hash, _height)) => hash, - None => { - // If we can't find any headers in the batch range, something is wrong - // Don't fall back to tip as that would create an oversized request - let start_idx = - self.header_abs_to_storage_index(self.current_sync_height); - let end_idx = self.header_abs_to_storage_index(batch_end_height); - return Err(SyncError::Storage(format!( - "No headers found in batch range {} to {} (header storage idx {:?} to {:?})", - self.current_sync_height, - batch_end_height, - start_idx, - end_idx - ))); - } - } - } - Err(e) => { - return Err(SyncError::Validation(format!( - "Failed to get initial batch stop header at height {}: {}", - batch_end_height, e - ))); - } - } - } else { - stop_hash - }; - - self.request_filter_headers(network, self.current_sync_height, batch_stop_hash).await?; - - Ok(true) // Sync started - } - - pub async fn request_filter_headers( - &mut self, - network: &mut N, - start_height: u32, - stop_hash: BlockHash, - ) -> SyncResult<()> { - // Validation: ensure this is a valid request - // Note: We can't easily get the stop height here without storage access, - // but we can at least check obvious invalid cases - if start_height == 0 { - tracing::error!("Invalid filter header request: start_height cannot be 0"); - return Err(SyncError::Validation( - "Invalid start_height 0 for filter headers".to_string(), - )); - } - - tracing::debug!( - "Sending GetCFHeaders: start_height={}, stop_hash={}, base_height={} (header storage idx {:?}, filter storage idx {:?})", - start_height, - stop_hash, - self.sync_base_height, - self.header_abs_to_storage_index(start_height), - self.filter_abs_to_storage_index(start_height) - ); - - let get_cf_headers = GetCFHeaders { - filter_type: 0, // Basic filter type - start_height, - stop_hash, - }; - - network - .send_message(NetworkMessage::GetCFHeaders(get_cf_headers)) - .await - .map_err(|e| SyncError::Network(format!("Failed to send GetCFHeaders: {}", e)))?; - - tracing::debug!("Requested filter headers from height {} to {}", start_height, stop_hash); - - Ok(()) - } - - /// Start synchronizing filter headers. - pub async fn start_sync_filter_headers( - &mut self, - network: &mut N, - storage: &mut S, - ) -> SyncResult { - if self.syncing_filter_headers { - return Err(SyncError::SyncInProgress); - } - - // Check if any connected peer supports compact filters - if !network - .has_peer_with_service(dashcore::network::constants::ServiceFlags::COMPACT_FILTERS) - .await - { - tracing::warn!( - "⚠️ No connected peers support compact filters (BIP 157/158). Skipping filter synchronization." - ); - return Ok(false); // No sync started - } - - tracing::info!("🚀 Starting filter header synchronization"); - - // Get current filter tip - let current_filter_height = storage - .get_filter_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get filter tip height: {}", e)))? - .unwrap_or(0); - - // Get header tip (absolute blockchain height) - let header_tip_height = storage.get_tip_height().await.ok_or_else(|| { - SyncError::Storage("No headers available for filter sync".to_string()) - })?; - - if current_filter_height >= header_tip_height { - tracing::info!("Filter headers already synced to header tip"); - return Ok(false); // Already synced - } - - // Determine next height to request - let next_height = - if self.sync_base_height > 0 && current_filter_height < self.sync_base_height { - tracing::info!( - "Starting filter sync from checkpoint base {} (current filter height: {})", - self.sync_base_height, - current_filter_height - ); - self.sync_base_height - } else { - current_filter_height + 1 - }; - - if next_height > header_tip_height { - tracing::warn!( - "Filter sync requested but next height {} > header tip {}, nothing to sync", - next_height, - header_tip_height - ); - return Ok(false); - } - - // Set up flow control state - self.syncing_filter_headers = true; - self.current_sync_height = next_height; - self.next_cfheader_height_to_process = next_height; - self.last_sync_progress = std::time::Instant::now(); - - // Build request queue - self.build_cfheader_request_queue(storage, next_height, header_tip_height).await?; - - // Send initial batch of requests - self.process_cfheader_request_queue(network).await?; - - tracing::info!( - "✅ CFHeaders sync initiated ({} requests queued, {} active)", - self.pending_cfheader_requests.len(), - self.active_cfheader_requests.len() - ); - - Ok(true) - } - - /// Build queue of CFHeaders requests from the specified range. - async fn build_cfheader_request_queue( - &mut self, - storage: &S, - start_height: u32, - end_height: u32, - ) -> SyncResult<()> { - // Clear any existing queue - self.pending_cfheader_requests.clear(); - self.active_cfheader_requests.clear(); - self.cfheader_retry_counts.clear(); - self.received_cfheader_batches.clear(); - - tracing::info!( - "🔄 Building CFHeaders request queue from height {} to {} ({} blocks)", - start_height, - end_height, - end_height - start_height + 1 - ); - - // Build requests in batches of FILTER_BATCH_SIZE (1999) - let mut current_height = start_height; - - while current_height <= end_height { - let batch_end = (current_height + FILTER_BATCH_SIZE - 1).min(end_height); - - // Get stop_hash for this batch - let stop_hash = storage - .get_header(batch_end) - .await - .map_err(|e| { - SyncError::Storage(format!( - "Failed to get stop header at height {}: {}", - batch_end, e - )) - })? - .ok_or_else(|| { - SyncError::Storage(format!("Stop header not found at height {}", batch_end)) - })? - .block_hash(); - - // Create CFHeaders request and add to queue - let request = CFHeaderRequest { - start_height: current_height, - stop_hash, - is_retry: false, - }; - - self.pending_cfheader_requests.push_back(request); - - tracing::debug!( - "Queued CFHeaders request for heights {} to {} (stop_hash: {})", - current_height, - batch_end, - stop_hash - ); - - current_height = batch_end + 1; - } - - tracing::info!( - "📋 CFHeaders request queue built with {} batches", - self.pending_cfheader_requests.len() - ); - - Ok(()) - } - - /// Process the CFHeaders request queue. - async fn process_cfheader_request_queue(&mut self, network: &mut N) -> SyncResult<()> { - // Send initial batch up to max_concurrent_cfheader_requests - let initial_send_count = - self.max_concurrent_cfheader_requests.min(self.pending_cfheader_requests.len()); - - for _ in 0..initial_send_count { - if let Some(request) = self.pending_cfheader_requests.pop_front() { - self.send_cfheader_request(network, request).await?; - } - } - - tracing::info!( - "🚀 Sent initial batch of {} CFHeaders requests ({} queued, {} active)", - initial_send_count, - self.pending_cfheader_requests.len(), - self.active_cfheader_requests.len() - ); - - Ok(()) - } - - /// Send a single CFHeaders request and track it as active. - async fn send_cfheader_request( - &mut self, - network: &mut N, - request: CFHeaderRequest, - ) -> SyncResult<()> { - // Send the actual network request - self.request_filter_headers(network, request.start_height, request.stop_hash).await?; - - // Track this request as active - let active_request = ActiveCFHeaderRequest { - sent_time: std::time::Instant::now(), - stop_hash: request.stop_hash, - }; - - self.active_cfheader_requests.insert(request.start_height, active_request); - - tracing::debug!( - "📡 Sent CFHeaders request for height {} (stop_hash: {}, now {} active)", - request.start_height, - request.stop_hash, - self.active_cfheader_requests.len() - ); - - Ok(()) - } - - /// Handle CFHeaders message (buffering and sequential processing). - async fn handle_filter_headers( - &mut self, - cf_headers: CFHeaders, - storage: &mut S, - network: &mut N, - ) -> SyncResult { - // Handle empty response - indicates end of sync - if cf_headers.filter_hashes.is_empty() { - tracing::info!("Received empty CFHeaders response - sync complete"); - self.syncing_filter_headers = false; - self.clear_filter_header_sync_state(); - return Ok(false); - } - - // Get the height range for this batch - let (batch_start_height, stop_height, _header_tip_height) = - self.get_batch_height_range(&cf_headers, storage).await?; - - tracing::debug!( - "Received CFHeaders batch: start={}, stop={}, count={}, next_expected={}", - batch_start_height, - stop_height, - cf_headers.filter_hashes.len(), - self.next_cfheader_height_to_process - ); - - // Mark this request as complete in active tracking - self.active_cfheader_requests.remove(&batch_start_height); - - // Check if this is the next expected batch - if batch_start_height == self.next_cfheader_height_to_process { - // Process this batch immediately - tracing::debug!("Processing expected batch at height {}", batch_start_height); - self.process_cfheader_batch(cf_headers, storage, network).await?; - - // Try to process any buffered batches that are now in sequence - self.process_buffered_cfheader_batches(storage, network).await?; - } else if batch_start_height > self.next_cfheader_height_to_process { - // Out of order - buffer for later - tracing::debug!( - "Buffering out-of-order batch at height {} (expected {})", - batch_start_height, - self.next_cfheader_height_to_process - ); - - let batch = ReceivedCFHeaderBatch { - cfheaders: cf_headers, - received_at: std::time::Instant::now(), - }; - - self.received_cfheader_batches.insert(batch_start_height, batch); - } else { - // Already processed - likely a duplicate or retry - tracing::debug!( - "Ignoring already-processed batch at height {} (current expected: {})", - batch_start_height, - self.next_cfheader_height_to_process - ); - } - - // Send next queued requests to fill available slots - self.process_next_queued_cfheader_requests(network).await?; - - // Check if sync is complete - if self.is_cfheader_sync_complete(storage).await? { - tracing::info!("✅ CFHeaders sync complete!"); - self.syncing_filter_headers = false; - self.clear_filter_header_sync_state(); - return Ok(false); - } - - Ok(true) - } - - /// Process a single CFHeaders batch (extracted from original handle_cfheaders logic). - async fn process_cfheader_batch( - &mut self, - cf_headers: CFHeaders, - storage: &mut S, - _network: &mut N, - ) -> SyncResult<()> { - let (batch_start_height, stop_height, _header_tip_height) = - self.get_batch_height_range(&cf_headers, storage).await?; - - // Verify and process the batch - match self.verify_filter_header_chain(&cf_headers, batch_start_height, storage).await { - Ok(true) => { - tracing::debug!( - "✅ Filter header chain verification successful for batch {}-{}", - batch_start_height, - stop_height - ); - - // Store the verified filter headers - self.store_filter_headers(&cf_headers, storage).await?; - - // Update next expected height - self.next_cfheader_height_to_process = stop_height + 1; - self.current_sync_height = stop_height + 1; - self.last_sync_progress = std::time::Instant::now(); - - tracing::debug!( - "Updated next expected height to {}, batch processed successfully", - self.next_cfheader_height_to_process - ); - } - Ok(false) => { - tracing::warn!( - "⚠️ Filter header chain verification failed for batch {}-{}", - batch_start_height, - stop_height - ); - return Err(SyncError::Validation( - "Filter header chain verification failed".to_string(), - )); - } - Err(e) => { - tracing::error!("❌ Filter header chain verification failed: {}", e); - return Err(e); - } - } - - Ok(()) - } - - /// Process buffered CFHeaders batches that are now in sequence. - async fn process_buffered_cfheader_batches( - &mut self, - storage: &mut S, - network: &mut N, - ) -> SyncResult<()> { - while let Some(batch) = - self.received_cfheader_batches.remove(&self.next_cfheader_height_to_process) - { - tracing::debug!( - "Processing buffered batch at height {}", - self.next_cfheader_height_to_process - ); - - self.process_cfheader_batch(batch.cfheaders, storage, network).await?; - } - - Ok(()) - } - - /// Process next requests from the queue when active requests complete. - pub(super) async fn process_next_queued_cfheader_requests( - &mut self, - network: &mut N, - ) -> SyncResult<()> { - let available_slots = self - .max_concurrent_cfheader_requests - .saturating_sub(self.active_cfheader_requests.len()); - - let mut sent_count = 0; - for _ in 0..available_slots { - if let Some(request) = self.pending_cfheader_requests.pop_front() { - self.send_cfheader_request(network, request).await?; - sent_count += 1; - } else { - break; - } - } - - if sent_count > 0 { - tracing::debug!( - "🚀 Sent {} additional CFHeaders requests from queue ({} queued, {} active)", - sent_count, - self.pending_cfheader_requests.len(), - self.active_cfheader_requests.len() - ); - } - - Ok(()) - } - - /// Check if CFHeaders sync is complete. - async fn is_cfheader_sync_complete(&self, storage: &S) -> SyncResult { - // Sync is complete if: - // 1. No pending requests - // 2. No active requests - // 3. No buffered batches - // 4. Current height >= header tip - - if !self.pending_cfheader_requests.is_empty() { - return Ok(false); - } - - if !self.active_cfheader_requests.is_empty() { - return Ok(false); - } - - if !self.received_cfheader_batches.is_empty() { - return Ok(false); - } - - let header_tip = storage.get_tip_height().await.unwrap_or(0); - - Ok(self.next_cfheader_height_to_process > header_tip) - } - - /// Clear sync state. - fn clear_filter_header_sync_state(&mut self) { - self.pending_cfheader_requests.clear(); - self.active_cfheader_requests.clear(); - self.cfheader_retry_counts.clear(); - self.received_cfheader_batches.clear(); - } - - pub(super) async fn handle_overlapping_headers( - &self, - cf_headers: &CFHeaders, - expected_start_height: u32, - storage: &mut S, - ) -> SyncResult<(usize, u32)> { - // Get the original height range for this CFHeaders batch - let (original_start_height, _stop_height, _header_tip_height) = - self.get_batch_height_range(cf_headers, storage).await?; - - // Determine how many headers overlap with what we already have - let headers_to_skip = expected_start_height.saturating_sub(original_start_height) as usize; - - // Complete overlap case - all headers already processed - if headers_to_skip >= cf_headers.filter_hashes.len() { - tracing::info!( - "✅ All {} headers in batch already processed, skipping", - cf_headers.filter_hashes.len() - ); - return Ok((0, expected_start_height)); - } - // Compute filter headers for the entire batch WITHOUT verifying against local chain yet. - // This lets us compare continuity precisely at the overlap boundary rather than the - // batch's original start (which may precede our local tip). - let mut computed_headers: Vec = - Vec::with_capacity(cf_headers.filter_hashes.len()); - let mut prev_header = cf_headers.previous_filter_header; - for filter_hash in &cf_headers.filter_hashes { - let mut data = [0u8; 64]; - data[..32].copy_from_slice(filter_hash.as_byte_array()); - data[32..].copy_from_slice(prev_header.as_byte_array()); - let header = FilterHeader::from_byte_array(sha256d::Hash::hash(&data).to_byte_array()); - computed_headers.push(header); - prev_header = header; - } - - // Verify continuity exactly at the expected overlap boundary: expected_start_height - 1 - let expected_prev_height = match expected_start_height.checked_sub(1) { - Some(h) => h, - None => { - // Should not happen since expected_start_height > 0 in overlap handling - return Ok((0, expected_start_height)); - } - }; - - // Determine the computed header at expected_prev_height using the batch data - let steps_to_expected_prev = expected_start_height.saturating_sub(original_start_height); - let computed_prev_at_expected = if steps_to_expected_prev == 0 { - cf_headers.previous_filter_header - } else { - // steps_to_expected_prev >= 1 implies index exists - computed_headers[(steps_to_expected_prev - 1) as usize] - }; - - // Load our local header at expected_prev_height - let local_prev_at_expected = match storage.get_filter_header(expected_prev_height).await { - Ok(Some(h)) => h, - Ok(None) => { - tracing::warn!( - "Missing local filter header at height {} while handling overlap; skipping batch", - expected_prev_height - ); - return Ok((0, expected_start_height)); - } - Err(e) => { - return Err(SyncError::Storage(format!( - "Failed to read local filter header at height {}: {}", - expected_prev_height, e - ))); - } - }; - - // If continuity at the overlap boundary doesn't match, ignore this overlapping batch - if computed_prev_at_expected != local_prev_at_expected { - tracing::warn!( - "Overlapping CFHeaders batch does not connect at height {}: computed={:?}, local={:?}. Ignoring batch.", - expected_prev_height, - computed_prev_at_expected, - local_prev_at_expected - ); - return Ok((0, expected_start_height)); - } - - // Store only the non-overlapping suffix starting at expected_start_height - let start_index = steps_to_expected_prev as usize; - let new_filter_headers = if start_index < computed_headers.len() { - computed_headers[start_index..].to_vec() - } else { - Vec::new() - }; - - if !new_filter_headers.is_empty() { - storage.store_filter_headers(&new_filter_headers).await.map_err(|e| { - SyncError::Storage(format!("Failed to store filter headers: {}", e)) - })?; - - tracing::info!( - "✅ Stored {} new filter headers (skipped {} overlapping)", - new_filter_headers.len(), - headers_to_skip - ); - - let new_current_height = expected_start_height + new_filter_headers.len() as u32; - Ok((new_filter_headers.len(), new_current_height)) - } else { - Ok((0, expected_start_height)) - } - } - - /// Verify filter header chain connects to our local chain. - /// This is a simplified version focused only on cryptographic chain verification, - /// with overlap detection handled by the dedicated overlap resolution system. - pub(super) async fn verify_filter_header_chain( - &self, - cf_headers: &CFHeaders, - start_height: u32, - storage: &S, - ) -> SyncResult { - if cf_headers.filter_hashes.is_empty() { - return Ok(true); - } - - // Skip verification for the first batch when starting from genesis or around checkpoint - // - Genesis sync: start_height == 1 (we don't have genesis filter header) - // - Checkpoint sync (expected first batch): start_height == sync_base_height + 1 - // - Checkpoint overlap batch: start_height == sync_base_height (peer included one extra) - if start_height <= 1 - || (self.sync_base_height > 0 - && (start_height == self.sync_base_height - || start_height == self.sync_base_height + 1)) - { - tracing::debug!( - "Skipping filter header chain verification for first batch (start_height={}, sync_base_height={})", - start_height, - self.sync_base_height - ); - return Ok(true); - } - - // Safety check to prevent underflow - if start_height == 0 { - tracing::error!( - "Invalid start_height=0 in filter header verification - this should never happen" - ); - return Err(SyncError::Validation( - "Invalid start_height=0 in filter header verification".to_string(), - )); - } - - // Get the expected previous filter header from our local chain - let prev_height = start_height - 1; - tracing::debug!( - "Verifying filter header chain: start_height={}, prev_height={}", - start_height, - prev_height - ); - - let expected_prev_header = storage - .get_filter_header(prev_height) - .await - .map_err(|e| { - SyncError::Storage(format!( - "Failed to get previous filter header at height {}: {}", - prev_height, e - )) - })? - .ok_or_else(|| { - SyncError::Storage(format!( - "Missing previous filter header at height {}", - prev_height - )) - })?; - - // Simple chain continuity check - the received headers should connect to our expected previous header - if cf_headers.previous_filter_header != expected_prev_header { - tracing::error!( - "Filter header chain verification failed: received previous_filter_header {:?} doesn't match expected header {:?} at height {}", - cf_headers.previous_filter_header, - expected_prev_header, - prev_height - ); - return Ok(false); - } - - tracing::trace!( - "Filter header chain verification passed for {} headers", - cf_headers.filter_hashes.len() - ); - Ok(true) - } -} diff --git a/dash-spv/src/sync/legacy/filters/manager.rs b/dash-spv/src/sync/legacy/filters/manager.rs deleted file mode 100644 index 1ae55501f..000000000 --- a/dash-spv/src/sync/legacy/filters/manager.rs +++ /dev/null @@ -1,298 +0,0 @@ -//! Filter synchronization manager - main coordinator. -//! -//! This module contains the FilterSyncManager struct and high-level coordination logic -//! that delegates to specialized sub-modules for headers, downloads, matching, etc. - -use crate::client::ClientConfig; -use crate::error::{SyncError, SyncResult}; -use crate::network::NetworkManager; -use crate::storage::StorageManager; -use crate::types::SharedFilterHeights; -use dashcore::{hash_types::FilterHeader, network::message_filter::CFHeaders, BlockHash}; -use dashcore_hashes::{sha256d, Hash}; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::time::Duration; - -// Import types and constants from the types module -use super::types::*; - -/// Manages BIP157 compact block filter synchronization. -/// -/// # Generic Parameters -/// -/// - `S: StorageManager` - Storage backend for filter headers and filters -/// - `N: NetworkManager` - Network for requesting filters from peers -/// -/// ## Why Generics? -/// -/// Filter synchronization involves: -/// - Downloading thousands of filter headers and filters -/// - Complex flow control with parallel requests -/// - Retry logic -/// - Storage operations for persistence -/// -/// Generic design enables: -/// - **Testing** without real network or disk I/O -/// - **Performance** through monomorphization (no vtable overhead) -/// - **Flexibility** for custom storage backends -/// -/// Production uses concrete types; tests use mocks. Both compile to efficient, -/// specialized code without runtime abstraction costs. -pub struct FilterSyncManager { - pub(super) _phantom_s: std::marker::PhantomData, - pub(super) _phantom_n: std::marker::PhantomData, - pub(super) _config: ClientConfig, - /// Whether filter header sync is currently in progress - pub(super) syncing_filter_headers: bool, - /// Current height being synced for filter headers - pub(super) current_sync_height: u32, - /// Base height for sync (typically from checkpoint) - pub(super) sync_base_height: u32, - /// Last time sync progress was made (for timeout detection) - pub(super) last_sync_progress: std::time::Instant, - /// Whether filter sync is currently in progress - pub(super) syncing_filters: bool, - /// Queue of blocks that have been requested and are waiting for response - pub(super) pending_block_downloads: VecDeque, - /// Blocks currently being downloaded (map for quick lookup) - pub(super) downloading_blocks: HashMap, - /// Blocks requested by the filter processing thread - pub(super) processing_thread_requests: std::sync::Arc>>, - /// Track individual filter heights that have been received (shared with stats) - pub(super) received_filter_heights: SharedFilterHeights, - /// Maximum retries for a filter range - pub(super) max_filter_retries: u32, - /// Retry attempts per range - pub(super) filter_retry_counts: HashMap<(u32, u32), u32>, - /// Queue of pending filter requests - pub(super) pending_filter_requests: VecDeque, - /// Currently active filter requests (limited by MAX_CONCURRENT_FILTER_REQUESTS) - pub(super) active_filter_requests: HashMap<(u32, u32), ActiveRequest>, - /// Queue of pending CFHeaders requests - pub(super) pending_cfheader_requests: VecDeque, - /// Currently active CFHeaders requests: (start_height, stop_height) -> ActiveCFHeaderRequest - pub(super) active_cfheader_requests: HashMap, - /// Retry counts per CFHeaders range: start_height -> retry_count - pub(super) cfheader_retry_counts: HashMap, - /// Maximum retries for CFHeaders - pub(super) max_cfheader_retries: u32, - /// Received CFHeaders batches waiting for sequential processing: start_height -> batch - pub(super) received_cfheader_batches: HashMap, - /// Next expected height for sequential processing - pub(super) next_cfheader_height_to_process: u32, - /// Maximum concurrent CFHeaders requests - pub(super) max_concurrent_cfheader_requests: usize, - /// Timeout for CFHeaders requests - pub(super) cfheader_request_timeout: std::time::Duration, -} - -impl FilterSyncManager { - /// Verify that the received compact filter hashes to the expected filter header - pub fn new(config: &ClientConfig, received_filter_heights: SharedFilterHeights) -> Self { - Self { - _config: config.clone(), - syncing_filter_headers: false, - current_sync_height: 0, - sync_base_height: 0, - last_sync_progress: std::time::Instant::now(), - syncing_filters: false, - pending_block_downloads: VecDeque::new(), - downloading_blocks: HashMap::new(), - processing_thread_requests: std::sync::Arc::new(tokio::sync::Mutex::new( - std::collections::HashSet::new(), - )), - received_filter_heights, - max_filter_retries: 3, - filter_retry_counts: HashMap::new(), - pending_filter_requests: VecDeque::new(), - active_filter_requests: HashMap::new(), - // CFHeaders fields - pending_cfheader_requests: VecDeque::new(), - active_cfheader_requests: HashMap::new(), - cfheader_retry_counts: HashMap::new(), - max_cfheader_retries: 3, - received_cfheader_batches: HashMap::new(), - next_cfheader_height_to_process: 0, - max_concurrent_cfheader_requests: 50, - _phantom_s: std::marker::PhantomData, - _phantom_n: std::marker::PhantomData, - cfheader_request_timeout: Duration::from_secs(30), - } - } - - /// Set the base height for sync (typically from checkpoint) - pub fn set_sync_base_height(&mut self, height: u32) { - self.sync_base_height = height; - } - - /// Convert absolute blockchain height to block header storage index. - /// Storage indexing is base-inclusive: at checkpoint base B, storage index 0 == absolute height B. - pub(super) fn header_abs_to_storage_index(&self, height: u32) -> Option { - if self.sync_base_height > 0 { - height.checked_sub(self.sync_base_height) - } else { - Some(height) - } - } - - /// Convert absolute blockchain height to filter header storage index. - /// Storage indexing is base-inclusive for filter headers as well. - pub(super) fn filter_abs_to_storage_index(&self, height: u32) -> Option { - if self.sync_base_height > 0 { - height.checked_sub(self.sync_base_height) - } else { - Some(height) - } - } - - // Note: previously had filter_storage_to_abs_height, but it was unused and removed for clarity. - - /// Set syncing filters state. - pub fn set_syncing_filters(&mut self, syncing: bool) { - self.syncing_filters = syncing; - } - - /// Check if filter sync is available (any peer supports compact filters). - pub async fn is_filter_sync_available(&self, network: &N) -> bool { - network - .has_peer_with_service(dashcore::network::constants::ServiceFlags::COMPACT_FILTERS) - .await - } - - /// Handle a CFHeaders message during filter header synchronization. - pub async fn process_filter_headers( - &self, - cf_headers: &CFHeaders, - start_height: u32, - storage: &S, - ) -> SyncResult> { - if cf_headers.filter_hashes.is_empty() { - return Ok(Vec::new()); - } - - tracing::debug!( - "Processing {} filter headers starting from height {}", - cf_headers.filter_hashes.len(), - start_height - ); - - // Verify filter header chain - if !self.verify_filter_header_chain(cf_headers, start_height, storage).await? { - return Err(SyncError::Validation( - "Filter header chain verification failed".to_string(), - )); - } - - // Convert filter hashes to filter headers - let mut new_filter_headers = Vec::with_capacity(cf_headers.filter_hashes.len()); - let mut prev_header = cf_headers.previous_filter_header; - - // For the first batch starting at height 1, we need to store the genesis filter header (height 0) - if start_height == 1 { - // The previous_filter_header is the genesis filter header at height 0 - // We need to store this so subsequent batches can verify against it - tracing::debug!("Storing genesis filter header: {:?}", prev_header); - // Note: We'll handle this in the calling function since we need mutable storage access - } - - for (i, filter_hash) in cf_headers.filter_hashes.iter().enumerate() { - // According to BIP157: filter_header = double_sha256(filter_hash || prev_filter_header) - let mut data = [0u8; 64]; - data[..32].copy_from_slice(filter_hash.as_byte_array()); - data[32..].copy_from_slice(prev_header.as_byte_array()); - - let filter_header = - FilterHeader::from_byte_array(sha256d::Hash::hash(&data).to_byte_array()); - - if i < 1 || i >= cf_headers.filter_hashes.len() - 1 { - tracing::trace!( - "Filter header {}: filter_hash={:?}, prev_header={:?}, result={:?}", - start_height + i as u32, - filter_hash, - prev_header, - filter_header - ); - } - - new_filter_headers.push(filter_header); - prev_header = filter_header; - } - - Ok(new_filter_headers) - } - - /// Handle overlapping filter headers by skipping already processed ones. - pub fn has_pending_downloads(&self) -> bool { - !self.pending_block_downloads.is_empty() || !self.downloading_blocks.is_empty() - } - - /// Get the number of pending block downloads. - pub fn pending_download_count(&self) -> usize { - self.pending_block_downloads.len() - } - - /// Get the number of active filter requests (for flow control). - pub fn active_request_count(&self) -> usize { - self.active_filter_requests.len() - } - - /// Check if there are pending filter requests in the queue. - pub fn has_pending_filter_requests(&self) -> bool { - !self.pending_filter_requests.is_empty() - } - - pub fn reset(&mut self) { - self.syncing_filter_headers = false; - self.syncing_filters = false; - self.pending_block_downloads.clear(); - self.downloading_blocks.clear(); - self.clear_filter_sync_state(); - } - - /// Clear filter sync state (for retries and recovery). - pub(super) fn clear_filter_sync_state(&mut self) { - // Clear request tracking - self.active_filter_requests.clear(); - self.pending_filter_requests.clear(); - - // Clear retry counts for fresh start - self.filter_retry_counts.clear(); - - // Note: We don't clear received_filter_heights as those are actually received - - tracing::debug!("Cleared filter sync state for retry/recovery"); - } - - /// Check if filter header sync is currently in progress. - pub fn is_syncing_filter_headers(&self) -> bool { - self.syncing_filter_headers - } - - /// Check if filter sync is currently in progress. - pub fn is_syncing_filters(&self) -> bool { - self.syncing_filters - || !self.active_filter_requests.is_empty() - || !self.pending_filter_requests.is_empty() - } - - pub fn reset_pending_requests(&mut self) { - // Clear all request tracking state - self.syncing_filter_headers = false; - self.syncing_filters = false; - self.pending_filter_requests.clear(); - self.active_filter_requests.clear(); - self.filter_retry_counts.clear(); - self.pending_block_downloads.clear(); - self.downloading_blocks.clear(); - self.last_sync_progress = std::time::Instant::now(); - tracing::debug!("Reset filter sync pending requests"); - } - - /// Fully clear filter tracking state, including received heights. - pub async fn clear_filter_state(&mut self) { - self.reset_pending_requests(); - let mut heights = self.received_filter_heights.lock().await; - heights.clear(); - tracing::info!("Cleared filter sync state and received heights"); - } -} diff --git a/dash-spv/src/sync/legacy/filters/matching.rs b/dash-spv/src/sync/legacy/filters/matching.rs deleted file mode 100644 index 6edcc5ba3..000000000 --- a/dash-spv/src/sync/legacy/filters/matching.rs +++ /dev/null @@ -1,244 +0,0 @@ -//! Filter matching and block download logic. -//! -//! This module handles matching compact block filters against watched scripts/addresses -//! and coordinating block downloads for matched filters. -//! -//! ## Key Features -//! -//! - Efficient filter matching using BIP158 algorithms -//! - Block download coordination for matches - -use dashcore::{ - bip158::{BlockFilterReader, Error as Bip158Error}, - network::message::NetworkMessage, - network::message_blockdata::Inventory, - BlockHash, ScriptBuf, -}; - -use crate::error::{SyncError, SyncResult}; -use crate::network::NetworkManager; -use crate::storage::StorageManager; - -impl super::manager::FilterSyncManager { - /// Check if filter matches any of the provided scripts using BIP158 GCS filter. - #[allow(dead_code)] - fn filter_matches_scripts( - &self, - filter_data: &[u8], - block_hash: &BlockHash, - scripts: &[ScriptBuf], - ) -> SyncResult { - if scripts.is_empty() { - return Ok(false); - } - - if filter_data.is_empty() { - tracing::debug!("Empty filter data, no matches possible"); - return Ok(false); - } - - // Create a BlockFilterReader with the block hash for proper key derivation - let filter_reader = BlockFilterReader::new(block_hash); - - // Convert scripts to byte slices for matching without heap allocation - let mut script_bytes = Vec::with_capacity(scripts.len()); - for script in scripts { - script_bytes.push(script.as_bytes()); - } - - // tracing::debug!("Checking filter against {} watch scripts using BIP158 GCS", scripts.len()); - - // Use the BIP158 filter to check if any scripts match - let mut filter_slice = filter_data; - match filter_reader.match_any(&mut filter_slice, script_bytes.into_iter()) { - Ok(matches) => { - if matches { - tracing::info!( - "BIP158 filter match found! Block {} contains watched scripts", - block_hash - ); - } else { - tracing::trace!("No BIP158 filter matches found for block {}", block_hash); - } - Ok(matches) - } - Err(Bip158Error::Io(e)) => { - Err(SyncError::Storage(format!("BIP158 filter IO error: {}", e))) - } - Err(Bip158Error::UtxoMissing(outpoint)) => { - Err(SyncError::Validation(format!("BIP158 filter UTXO missing: {}", outpoint))) - } - Err(_) => Err(SyncError::Validation("BIP158 filter error".to_string())), - } - } - - /// Store filter headers from a CFHeaders message. - /// This method is used when filter headers are received outside of the normal sync process, - pub async fn process_filter_matches_and_download( - &mut self, - filter_matches: Vec, - network: &mut N, - ) -> SyncResult> { - if filter_matches.is_empty() { - return Ok(filter_matches); - } - - tracing::info!("Processing {} filter matches for block downloads", filter_matches.len()); - - // Filter out blocks already being downloaded or queued - let mut new_downloads = Vec::new(); - let mut inventory_items = Vec::new(); - - for filter_match in filter_matches { - // Check if already downloading or queued - if self.downloading_blocks.contains_key(&filter_match.block_hash) { - tracing::debug!("Block {} already being downloaded", filter_match.block_hash); - continue; - } - - if self.pending_block_downloads.iter().any(|m| m.block_hash == filter_match.block_hash) - { - tracing::debug!("Block {} already queued for download", filter_match.block_hash); - continue; - } - - tracing::info!( - "📦 Queuing block download for {} at height {}", - filter_match.block_hash, - filter_match.height - ); - - // Add to inventory for bulk request - inventory_items.push(Inventory::Block(filter_match.block_hash)); - - // Mark as downloading and add to queue - self.downloading_blocks.insert(filter_match.block_hash, filter_match.height); - self.pending_block_downloads.push_back(filter_match.clone()); - new_downloads.push(filter_match); - } - - // Send single bundled GetData request for all blocks - if !inventory_items.is_empty() { - tracing::info!( - "📦 Requesting {} blocks in single GetData message", - inventory_items.len() - ); - - let getdata = NetworkMessage::GetData(inventory_items); - network.send_message(getdata).await.map_err(|e| { - SyncError::Network(format!("Failed to send bundled GetData for blocks: {}", e)) - })?; - - tracing::debug!( - "Added {} blocks to download queue (total queue size: {})", - new_downloads.len(), - self.pending_block_downloads.len() - ); - } - - Ok(new_downloads) - } - - pub async fn request_block_download( - &mut self, - filter_match: crate::types::FilterMatch, - network: &mut N, - ) -> SyncResult<()> { - // Check if already downloading or queued - if self.downloading_blocks.contains_key(&filter_match.block_hash) { - tracing::debug!("Block {} already being downloaded", filter_match.block_hash); - return Ok(()); - } - - if self.pending_block_downloads.iter().any(|m| m.block_hash == filter_match.block_hash) { - tracing::debug!("Block {} already queued for download", filter_match.block_hash); - return Ok(()); - } - - tracing::info!( - "📦 Requesting block download for {} at height {}", - filter_match.block_hash, - filter_match.height - ); - - // Create GetData message for the block - let inv = Inventory::Block(filter_match.block_hash); - - let getdata = vec![inv]; - - // Send the request - network - .send_message(NetworkMessage::GetData(getdata)) - .await - .map_err(|e| SyncError::Network(format!("Failed to send GetData for block: {}", e)))?; - - // Mark as downloading and add to queue - self.downloading_blocks.insert(filter_match.block_hash, filter_match.height); - let block_hash = filter_match.block_hash; - self.pending_block_downloads.push_back(filter_match); - - tracing::debug!( - "Added block {} to download queue (queue size: {})", - block_hash, - self.pending_block_downloads.len() - ); - - Ok(()) - } - - pub async fn handle_downloaded_block( - &mut self, - block: &dashcore::block::Block, - ) -> SyncResult> { - let block_hash = block.block_hash(); - - // Check if this block was requested by the sync manager - if let Some(height) = self.downloading_blocks.remove(&block_hash) { - tracing::info!("📦 Received expected block {} at height {}", block_hash, height); - - // Find and remove from pending queue - if let Some(pos) = - self.pending_block_downloads.iter().position(|m| m.block_hash == block_hash) - { - let mut filter_match = - self.pending_block_downloads.remove(pos).ok_or_else(|| { - SyncError::InvalidState("filter match should exist at position".to_string()) - })?; - filter_match.block_requested = true; - - tracing::debug!( - "Removed block {} from download queue (remaining: {})", - block_hash, - self.pending_block_downloads.len() - ); - - return Ok(Some(filter_match)); - } - } - - // Check if this block was requested by the filter processing thread - { - let mut processing_requests = self.processing_thread_requests.lock().await; - if processing_requests.remove(&block_hash) { - tracing::info!( - "📦 Received block {} requested by filter processing thread", - block_hash - ); - - // We don't have height information for processing thread requests, - // so we'll need to look it up - // Create a minimal FilterMatch to indicate this was a processing thread request - let filter_match = crate::types::FilterMatch { - block_hash, - height: 0, // Height unknown for processing thread requests - block_requested: true, - }; - - return Ok(Some(filter_match)); - } - } - - tracing::warn!("Received unexpected block: {}", block_hash); - Ok(None) - } -} diff --git a/dash-spv/src/sync/legacy/filters/mod.rs b/dash-spv/src/sync/legacy/filters/mod.rs deleted file mode 100644 index abf2b2d2b..000000000 --- a/dash-spv/src/sync/legacy/filters/mod.rs +++ /dev/null @@ -1,41 +0,0 @@ -//! BIP157 Compact Block Filter synchronization. -//! -//! This module was refactored from a single 4,000+ line file into organized sub-modules. -//! -//! ## Module Organization -//! -//! - `types` - Type definitions and constants -//! - `manager` - Main FilterSyncManager coordination -//! - `headers` - CFHeaders synchronization -//! - `download` - CFilter download logic -//! - `matching` - Filter matching against wallet -//! - `retry` - Retry and timeout logic -//! - `stats` - Statistics and progress tracking -//! - `requests` - Request queue management -//! -//! ## Thread Safety -//! -//! Lock acquisition order (to prevent deadlocks): -//! 1. pending_requests -//! 2. active_requests -//! 3. received_heights - -pub mod download; -pub mod headers; -pub mod manager; -pub mod matching; -pub mod requests; -pub mod retry; -pub mod stats; -pub mod types; - -// Re-export main types -pub use manager::FilterSyncManager; -pub use types::{ - ActiveCFHeaderRequest, ActiveRequest, CFHeaderRequest, FilterNotificationSender, FilterRequest, - ReceivedCFHeaderBatch, -}; -pub use types::{ - DEFAULT_FILTER_SYNC_RANGE, FILTER_BATCH_SIZE, FILTER_REQUEST_BATCH_SIZE, FILTER_RETRY_DELAY_MS, - MAX_CONCURRENT_FILTER_REQUESTS, REQUEST_TIMEOUT_SECONDS, SYNC_TIMEOUT_SECONDS, -}; diff --git a/dash-spv/src/sync/legacy/filters/requests.rs b/dash-spv/src/sync/legacy/filters/requests.rs deleted file mode 100644 index af448f1bb..000000000 --- a/dash-spv/src/sync/legacy/filters/requests.rs +++ /dev/null @@ -1,223 +0,0 @@ -//! Request queue management and flow control. -//! -//! This module handles: -//! - Building request queues for CFHeaders and CFilters -//! - Processing queues with concurrency limits (flow control) -//! - Tracking active requests and managing completion -//! - Sending individual requests to the network - -use super::types::*; -use crate::error::{SyncError, SyncResult}; -use crate::network::NetworkManager; -use crate::storage::StorageManager; - -impl super::manager::FilterSyncManager { - /// Build a queue of filter requests covering the specified range. - /// - /// If start_height is None, defaults to (filter_header_tip - DEFAULT_FILTER_SYNC_RANGE). - /// If count is None, syncs to filter_header_tip. - /// Splits the range into batches of FILTER_REQUEST_BATCH_SIZE. - pub(super) async fn build_filter_request_queue( - &mut self, - storage: &S, - start_height: Option, - count: Option, - ) -> SyncResult<()> { - // Clear any existing queue - self.pending_filter_requests.clear(); - - // Determine range to sync - // Note: get_filter_tip_height() returns the highest filter HEADER height, not filter height - let filter_header_tip_height = storage - .get_filter_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get filter header tip: {}", e)))? - .unwrap_or(0); - - let start = start_height - .unwrap_or_else(|| filter_header_tip_height.saturating_sub(DEFAULT_FILTER_SYNC_RANGE)); - - // Calculate the end height based on the requested count - // Do NOT cap at the current filter position - we want to sync UP TO the filter header tip - let end = if let Some(c) = count { - (start + c - 1).min(filter_header_tip_height) - } else { - filter_header_tip_height - }; - - let base_height = self.sync_base_height; - let clamped_start = start.max(base_height); - - if clamped_start > end { - tracing::warn!( - "⚠️ Filter sync requested from height {} but end height is {} - no filters to sync", - start, - end - ); - return Ok(()); - } - - tracing::info!( - "🔄 Building filter request queue from height {} to {} ({} blocks, filter headers available up to {})", - clamped_start, - end, - end - clamped_start + 1, - filter_header_tip_height - ); - - // Build requests in batches - let batch_size = FILTER_REQUEST_BATCH_SIZE; - let mut current_height = clamped_start; - - while current_height <= end { - let batch_end = (current_height + batch_size - 1).min(end); - - // Ensure the batch end height is within the stored header range - let stop_hash = storage - .get_header(batch_end) - .await - .map_err(|e| { - SyncError::Storage(format!( - "Failed to get stop header at height {}: {}", - batch_end, e - )) - })? - .ok_or_else(|| { - SyncError::Storage(format!("Stop header not found at height {}", batch_end)) - })? - .block_hash(); - - // Create filter request and add to queue - let request = FilterRequest { - start_height: current_height, - end_height: batch_end, - stop_hash, - is_retry: false, - }; - - self.pending_filter_requests.push_back(request); - - tracing::debug!( - "Queued filter request for heights {} to {}", - current_height, - batch_end - ); - - current_height = batch_end + 1; - } - - tracing::info!( - "📋 Filter request queue built with {} batches", - self.pending_filter_requests.len() - ); - - // Log the first few batches for debugging - for (i, request) in self.pending_filter_requests.iter().take(3).enumerate() { - tracing::debug!( - " Batch {}: heights {}-{} (stop hash: {})", - i + 1, - request.start_height, - request.end_height, - request.stop_hash - ); - } - if self.pending_filter_requests.len() > 3 { - tracing::debug!(" ... and {} more batches", self.pending_filter_requests.len() - 3); - } - - Ok(()) - } - - /// Process the filter request queue. - /// - /// Sends an initial batch of requests up to MAX_CONCURRENT_FILTER_REQUESTS. - /// Additional requests are sent as active requests complete. - pub(super) async fn process_filter_request_queue( - &mut self, - network: &mut N, - _storage: &S, - ) -> SyncResult<()> { - // Send initial batch up to MAX_CONCURRENT_FILTER_REQUESTS - let initial_send_count = - MAX_CONCURRENT_FILTER_REQUESTS.min(self.pending_filter_requests.len()); - - for _ in 0..initial_send_count { - if let Some(request) = self.pending_filter_requests.pop_front() { - self.send_filter_request(network, request).await?; - } - } - - tracing::info!( - "🚀 Sent initial batch of {} filter requests ({} queued, {} active)", - initial_send_count, - self.pending_filter_requests.len(), - self.active_filter_requests.len() - ); - - Ok(()) - } - - /// Send a single filter request and track it as active. - pub(super) async fn send_filter_request( - &mut self, - network: &mut N, - request: FilterRequest, - ) -> SyncResult<()> { - // Send the actual network request - self.request_filters(network, request.start_height, request.stop_hash).await?; - - // Track this request as active - let range = (request.start_height, request.end_height); - let active_request = ActiveRequest { - sent_time: std::time::Instant::now(), - }; - - self.active_filter_requests.insert(range, active_request); - - tracing::debug!( - "📡 Sent filter request for range {}-{} (now {} active)", - request.start_height, - request.end_height, - self.active_filter_requests.len() - ); - - // Apply delay only for retry requests to avoid hammering peers - if request.is_retry && FILTER_RETRY_DELAY_MS > 0 { - tokio::time::sleep(tokio::time::Duration::from_millis(FILTER_RETRY_DELAY_MS)).await; - } - - Ok(()) - } - - /// Mark a filter as received and check for batch completion. - /// - /// Returns list of completed request ranges (start_height, end_height). - /// Process next requests from the queue when active requests complete. - /// - /// Called after filter requests complete to send more from the queue. - pub async fn process_next_queued_requests(&mut self, network: &mut N) -> SyncResult<()> { - let available_slots = - MAX_CONCURRENT_FILTER_REQUESTS.saturating_sub(self.active_filter_requests.len()); - let mut sent_count = 0; - - for _ in 0..available_slots { - if let Some(request) = self.pending_filter_requests.pop_front() { - self.send_filter_request(network, request).await?; - sent_count += 1; - } else { - break; - } - } - - if sent_count > 0 { - tracing::debug!( - "🚀 Sent {} additional filter requests from queue ({} queued, {} active)", - sent_count, - self.pending_filter_requests.len(), - self.active_filter_requests.len() - ); - } - - Ok(()) - } -} diff --git a/dash-spv/src/sync/legacy/filters/retry.rs b/dash-spv/src/sync/legacy/filters/retry.rs deleted file mode 100644 index c0bd7fdbc..000000000 --- a/dash-spv/src/sync/legacy/filters/retry.rs +++ /dev/null @@ -1,334 +0,0 @@ -//! Timeout and retry logic for filter synchronization. -//! -//! This module handles: -//! - Detecting timed-out filter and CFHeader requests -//! - Retrying failed requests with exponential backoff -//! - Managing retry counts and giving up after max attempts -//! - Sync progress timeout detection - -use super::types::*; -use crate::error::{SyncError, SyncResult}; -use crate::network::NetworkManager; -use crate::storage::StorageManager; -use dashcore::BlockHash; - -impl super::manager::FilterSyncManager { - /// Check if filter header sync has timed out (no progress for SYNC_TIMEOUT_SECONDS). - /// - /// If timeout is detected, attempts recovery by re-sending the current batch request. - pub async fn check_sync_timeout( - &mut self, - storage: &mut S, - network: &mut N, - ) -> SyncResult { - if !self.syncing_filter_headers { - return Ok(false); - } - - if self.last_sync_progress.elapsed() > std::time::Duration::from_secs(SYNC_TIMEOUT_SECONDS) - { - tracing::warn!( - "📊 No filter header sync progress for {}+ seconds, re-sending filter header request", - SYNC_TIMEOUT_SECONDS - ); - - // Get header tip height for recovery - let header_tip_height = storage.get_tip_height().await.ok_or_else(|| { - SyncError::Storage("No headers available for filter sync".to_string()) - })?; - - // Re-calculate current batch parameters for recovery - let recovery_batch_end_height = - (self.current_sync_height + FILTER_BATCH_SIZE - 1).min(header_tip_height); - let recovery_batch_stop_hash = if recovery_batch_end_height < header_tip_height { - // Try to get the header at the calculated height with backward scanning - match storage.get_header(recovery_batch_end_height).await { - Ok(Some(header)) => header.block_hash(), - Ok(None) => { - tracing::warn!( - "Recovery header not found at blockchain height {}, scanning backwards", - recovery_batch_end_height - ); - - let min_height = self.current_sync_height; - match self - .find_available_header_at_or_before( - recovery_batch_end_height.saturating_sub(1), - min_height, - storage, - ) - .await - { - Some((hash, height)) => { - if height < self.current_sync_height { - tracing::warn!( - "Recovery: Found header at height {} which is less than current sync height {}. This indicates we already have filter headers up to {}. Marking sync as complete.", - height, - self.current_sync_height, - self.current_sync_height - 1 - ); - self.syncing_filter_headers = false; - return Ok(false); - } - hash - } - None => { - tracing::error!( - "No headers available for recovery between {} and {}", - min_height, - recovery_batch_end_height - ); - return Err(SyncError::Storage( - "No headers available for recovery".to_string(), - )); - } - } - } - Err(e) => { - return Err(SyncError::Storage(format!( - "Failed to get recovery batch stop header at height {}: {}", - recovery_batch_end_height, e - ))); - } - } - } else { - // Special handling for chain tip: if we can't find the exact tip header, - // try the previous header as we might be at the actual chain tip - match storage.get_header(header_tip_height).await { - Ok(Some(header)) => header.block_hash(), - Ok(None) if header_tip_height > 0 => { - tracing::debug!( - "Tip header not found at blockchain height {} during recovery, trying previous header", - header_tip_height - ); - // Try previous header when at chain tip - storage - .get_header(header_tip_height - 1) - .await - .map_err(|e| { - SyncError::Storage(format!( - "Failed to get previous header during recovery: {}", - e - )) - })? - .ok_or_else(|| { - SyncError::Storage(format!( - "Neither tip ({}) nor previous header found during recovery", - header_tip_height - )) - })? - .block_hash() - } - Ok(None) => { - return Err(SyncError::Validation(format!( - "Tip header not found at height {} (genesis) during recovery", - header_tip_height - ))); - } - Err(e) => { - return Err(SyncError::Validation(format!( - "Failed to get tip header during recovery: {}", - e - ))); - } - } - }; - - self.request_filter_headers( - network, - self.current_sync_height, - recovery_batch_stop_hash, - ) - .await?; - self.last_sync_progress = std::time::Instant::now(); - - return Ok(true); - } - - Ok(false) - } - - /// Check for timed out CFHeader requests and retry them. - /// - /// Called periodically to detect and recover from requests that never received responses. - pub async fn check_cfheader_request_timeouts( - &mut self, - network: &mut N, - storage: &S, - ) -> SyncResult<()> { - if !self.syncing_filter_headers { - return Ok(()); - } - - let now = std::time::Instant::now(); - let mut timed_out_requests = Vec::new(); - - // Check for timed out active requests - for (start_height, active_req) in &self.active_cfheader_requests { - if now.duration_since(active_req.sent_time) > self.cfheader_request_timeout { - timed_out_requests.push((*start_height, active_req.stop_hash)); - } - } - - // Handle timeouts: remove from active, retry or give up based on retry count - for (start_height, stop_hash) in timed_out_requests { - self.handle_cfheader_request_timeout(start_height, stop_hash, network, storage).await?; - } - - // Check queue status and send next batch if needed - self.process_next_queued_cfheader_requests(network).await?; - - Ok(()) - } - - /// Handle a specific CFHeaders request timeout. - async fn handle_cfheader_request_timeout( - &mut self, - start_height: u32, - stop_hash: BlockHash, - _network: &mut N, - _storage: &S, - ) -> SyncResult<()> { - let retry_count = self.cfheader_retry_counts.get(&start_height).copied().unwrap_or(0); - - // Remove from active requests - self.active_cfheader_requests.remove(&start_height); - - if retry_count >= self.max_cfheader_retries { - tracing::error!( - "❌ CFHeaders request for height {} failed after {} retries, giving up", - start_height, - retry_count - ); - return Ok(()); - } - - tracing::info!( - "🔄 Retrying timed out CFHeaders request for height {} (attempt {}/{})", - start_height, - retry_count + 1, - self.max_cfheader_retries - ); - - // Create new request and add back to queue for retry - let retry_request = CFHeaderRequest { - start_height, - stop_hash, - is_retry: true, - }; - - // Update retry count - self.cfheader_retry_counts.insert(start_height, retry_count + 1); - - // Add to front of queue for priority retry - self.pending_cfheader_requests.push_front(retry_request); - - Ok(()) - } - - /// Check for timed out filter requests and retry them. - pub async fn check_filter_request_timeouts( - &mut self, - network: &mut N, - storage: &S, - ) -> SyncResult<()> { - let now = std::time::Instant::now(); - let timeout_duration = std::time::Duration::from_secs(REQUEST_TIMEOUT_SECONDS); - - // Check for timed out active requests - let mut timed_out_requests = Vec::new(); - for ((start, end), active_req) in &self.active_filter_requests { - if now.duration_since(active_req.sent_time) > timeout_duration { - timed_out_requests.push((*start, *end)); - } - } - - // Handle timeouts: remove from active, retry or give up based on retry count - for range in timed_out_requests { - self.handle_request_timeout(range, network, storage).await?; - } - - // Check queue status and send next batch if needed - self.process_next_queued_requests(network).await?; - - Ok(()) - } - - /// Handle a specific filter request timeout. - async fn handle_request_timeout( - &mut self, - range: (u32, u32), - _network: &mut N, - storage: &S, - ) -> SyncResult<()> { - let (start, end) = range; - let retry_count = self.filter_retry_counts.get(&range).copied().unwrap_or(0); - - // Remove from active requests - self.active_filter_requests.remove(&range); - - if retry_count >= self.max_filter_retries { - tracing::error!( - "❌ Filter range {}-{} failed after {} retries, giving up", - start, - end, - retry_count - ); - return Ok(()); - } - - // Calculate stop hash for retry; ensure height is within the stored window - if self.header_abs_to_storage_index(end).is_none() { - tracing::debug!( - "Skipping retry for range {}-{} because end is below checkpoint base {}", - start, - end, - self.sync_base_height - ); - return Ok(()); - } - - match storage.get_header(end).await { - Ok(Some(header)) => { - let stop_hash = header.block_hash(); - - tracing::info!( - "🔄 Retrying timed out filter range {}-{} (attempt {}/{})", - start, - end, - retry_count + 1, - self.max_filter_retries - ); - - // Create new request and add back to queue for retry - let retry_request = FilterRequest { - start_height: start, - end_height: end, - stop_hash, - is_retry: true, - }; - - // Update retry count - self.filter_retry_counts.insert(range, retry_count + 1); - - // Add to front of queue for priority retry - self.pending_filter_requests.push_front(retry_request); - - Ok(()) - } - Ok(None) => { - tracing::error!( - "Cannot retry filter range {}-{}: header not found at height {}", - start, - end, - end - ); - Ok(()) - } - Err(e) => { - tracing::error!("Failed to get header at height {} for retry: {}", end, e); - Ok(()) - } - } - } -} diff --git a/dash-spv/src/sync/legacy/filters/stats.rs b/dash-spv/src/sync/legacy/filters/stats.rs deleted file mode 100644 index a51e4249a..000000000 --- a/dash-spv/src/sync/legacy/filters/stats.rs +++ /dev/null @@ -1,25 +0,0 @@ -//! Statistics and progress tracking for filter synchronization. - -use super::types::*; -use crate::network::NetworkManager; -use crate::storage::StorageManager; - -impl super::manager::FilterSyncManager { - /// Get state (pending count, active count). - pub fn get_filter_sync_state(&self) -> (usize, usize) { - (self.pending_filter_requests.len(), self.active_filter_requests.len()) - } - - /// Get number of available request slots. - pub fn get_available_request_slots(&self) -> usize { - MAX_CONCURRENT_FILTER_REQUESTS.saturating_sub(self.active_filter_requests.len()) - } - - /// Get the total number of filters received. - pub fn get_received_filter_count(&self) -> u32 { - match self.received_filter_heights.try_lock() { - Ok(heights) => heights.len() as u32, - Err(_) => 0, - } - } -} diff --git a/dash-spv/src/sync/legacy/filters/types.rs b/dash-spv/src/sync/legacy/filters/types.rs deleted file mode 100644 index 109926467..000000000 --- a/dash-spv/src/sync/legacy/filters/types.rs +++ /dev/null @@ -1,83 +0,0 @@ -//! Types and constants for filter synchronization. - -use dashcore::network::message_filter::CFHeaders; -use dashcore::BlockHash; -use std::time::Instant; -use tokio::sync::mpsc; - -// ============================================================================ -// Constants -// ============================================================================ - -/// Maximum size of a single CFHeaders request batch. -/// Stay under Dash Core's 2000 limit. Using 1999 helps reduce accidental overlaps. -pub const FILTER_BATCH_SIZE: u32 = 1999; - -/// Timeout for overall filter sync operations (seconds). -pub const SYNC_TIMEOUT_SECONDS: u64 = 5; - -/// Default range for filter synchronization. -pub const DEFAULT_FILTER_SYNC_RANGE: u32 = 100; - -/// Batch size for compact filter requests (CFilters). -pub const FILTER_REQUEST_BATCH_SIZE: u32 = 1000; - -/// Maximum concurrent filter batches allowed. -pub const MAX_CONCURRENT_FILTER_REQUESTS: usize = 50; - -/// Delay before retrying filter requests (milliseconds). -pub const FILTER_RETRY_DELAY_MS: u64 = 100; - -/// Timeout for individual filter requests (seconds). -pub const REQUEST_TIMEOUT_SECONDS: u64 = 30; - -// ============================================================================ -// Type Aliases -// ============================================================================ - -/// Handle for sending CFilter messages to the processing thread. -pub type FilterNotificationSender = - mpsc::UnboundedSender; - -// ============================================================================ -// Request Types -// ============================================================================ - -/// Represents a filter request to be sent or queued. -#[derive(Debug, Clone)] -pub struct FilterRequest { - pub start_height: u32, - pub end_height: u32, - pub stop_hash: BlockHash, - pub is_retry: bool, -} - -/// Represents an active filter request that has been sent and is awaiting response. -#[derive(Debug)] -pub struct ActiveRequest { - pub sent_time: Instant, -} - -/// Represents a CFHeaders request to be sent or queued. -#[derive(Debug, Clone)] -pub struct CFHeaderRequest { - pub start_height: u32, - pub stop_hash: BlockHash, - #[allow(dead_code)] - pub is_retry: bool, -} - -/// Represents an active CFHeaders request that has been sent and is awaiting response. -#[derive(Debug)] -pub struct ActiveCFHeaderRequest { - pub sent_time: Instant, - pub stop_hash: BlockHash, -} - -/// Represents a received CFHeaders batch waiting for sequential processing. -#[derive(Debug)] -pub struct ReceivedCFHeaderBatch { - pub cfheaders: CFHeaders, - #[allow(dead_code)] - pub received_at: Instant, -} diff --git a/dash-spv/src/sync/legacy/headers/manager.rs b/dash-spv/src/sync/legacy/headers/manager.rs deleted file mode 100644 index 4498c16d9..000000000 --- a/dash-spv/src/sync/legacy/headers/manager.rs +++ /dev/null @@ -1,723 +0,0 @@ -//! Header synchronization with fork detection and reorganization handling. - -use dashcore::{ - block::Header as BlockHeader, network::constants::NetworkExt, network::message::NetworkMessage, - network::message_blockdata::GetHeadersMessage, BlockHash, -}; -use dashcore_hashes::Hash; - -use crate::chain::checkpoints::{mainnet_checkpoints, testnet_checkpoints, CheckpointManager}; -use crate::chain::{ChainTip, ChainTipManager, ChainWork}; -use crate::client::ClientConfig; -use crate::error::{SyncError, SyncResult}; -use crate::network::NetworkManager; -use crate::storage::StorageManager; -use crate::types::{ChainState, HashedBlockHeader}; -use crate::validation::{BlockHeaderValidator, Validator}; -use crate::ValidationMode; -use std::sync::Arc; -use tokio::sync::RwLock; - -/// Configuration for reorg handling -pub struct ReorgConfig { - /// Maximum depth of reorganization to handle - pub max_reorg_depth: u32, - /// Whether to respect chain locks - pub respect_chain_locks: bool, - /// Maximum number of forks to track - pub max_forks: usize, - /// Whether to enforce checkpoint validation - pub enforce_checkpoints: bool, -} - -impl Default for ReorgConfig { - fn default() -> Self { - Self { - max_reorg_depth: 1000, - respect_chain_locks: true, - max_forks: 10, - enforce_checkpoints: true, - } - } -} - -/// Manages header synchronization with fork detection and reorganization support -pub struct HeaderSyncManager { - _phantom_s: std::marker::PhantomData, - _phantom_n: std::marker::PhantomData, - config: ClientConfig, - tip_manager: ChainTipManager, - checkpoint_manager: CheckpointManager, - reorg_config: ReorgConfig, - chain_state: Arc>, - syncing_headers: bool, - last_sync_progress: std::time::Instant, - // Cached flag for quick access without locking - cached_sync_base_height: u32, -} - -impl HeaderSyncManager { - /// Create a new header sync manager - pub fn new( - config: &ClientConfig, - reorg_config: ReorgConfig, - chain_state: Arc>, - ) -> SyncResult { - // WalletState removed - wallet functionality is now handled externally - - // Create checkpoint manager based on network - let checkpoints = match config.network { - dashcore::Network::Dash => mainnet_checkpoints(), - dashcore::Network::Testnet => testnet_checkpoints(), - _ => Vec::new(), // No checkpoints for other networks - }; - let checkpoint_manager = CheckpointManager::new(checkpoints); - - Ok(Self { - config: config.clone(), - tip_manager: ChainTipManager::new(reorg_config.max_forks), - checkpoint_manager, - reorg_config, - chain_state, - syncing_headers: false, - last_sync_progress: std::time::Instant::now(), - cached_sync_base_height: 0, - _phantom_s: std::marker::PhantomData, - _phantom_n: std::marker::PhantomData, - }) - } - - /// Load headers from storage into the chain state - pub async fn load_headers_from_storage(&mut self, storage: &S) { - // First, try to load the persisted chain state which may contain sync_base_height - if let Ok(Some(stored_chain_state)) = storage.load_chain_state().await { - tracing::info!( - "Loaded chain state from storage with sync_base_height: {}", - stored_chain_state.sync_base_height, - ); - // Update our chain state with the loaded one - { - self.cached_sync_base_height = stored_chain_state.sync_base_height; - let mut cs = self.chain_state.write().await; - *cs = stored_chain_state; - } - } - } - - /// Handle a Headers message - pub async fn handle_headers_message( - &mut self, - headers: &[BlockHeader], - storage: &mut S, - network: &mut N, - ) -> SyncResult { - tracing::info!("🔍 Handle headers message with {} headers", headers.len()); - - // Step 1: Handle Empty Batch - if headers.is_empty() { - tracing::info!( - "📊 Header sync complete - no more headers from peers. Total headers synced: {}, chain_state.tip_height: {}", - storage.get_stored_headers_len().await, - storage.get_tip_height().await.unwrap_or(0), - ); - self.syncing_headers = false; - return Ok(false); - } - - // Wrap headers in CachedHeader to avoid redundant X11 hashing - // This prevents recomputing hashes during validation, logging, and storage - let cached_headers: Vec<_> = headers.iter().map(HashedBlockHeader::from).collect(); - - // Step 2: Validate Batch - let first_cached = &cached_headers[0]; - let first_header = first_cached.header(); - - let tip_height = storage - .get_tip_height() - .await - .ok_or_else(|| SyncError::InvalidState("No tip height in storage".to_string()))?; - - let tip = storage - .get_header(tip_height) - .await - .ok() - .flatten() - .ok_or_else(|| SyncError::InvalidState("No tip header in storage".to_string()))?; - - // Check if the first header connects to our tip - // Cache tip hash to avoid recomputing it - let tip_cached = HashedBlockHeader::from(tip); - let tip_hash = tip_cached.hash(); - - if first_header.prev_blockhash != *tip_hash { - tracing::warn!( - "Received header batch that does not connect to our tip. Expected prev_hash: {}, got: {}. Dropping message.", - tip_hash, - first_header.prev_blockhash - ); - // Gracefully drop the message and let timeout mechanism handle re-requesting - return Ok(true); - } - - // Special handling for checkpoint sync validation - if self.is_synced_from_checkpoint() && !headers.is_empty() { - // Check if this might be a genesis or very early block - let is_genesis = first_header.prev_blockhash == BlockHash::from_byte_array([0; 32]); - let is_early_block = - first_header.bits.to_consensus() == 0x1e0ffff0 || first_header.time < 1400000000; - - if is_genesis || is_early_block { - tracing::error!( - "CHECKPOINT SYNC FAILED: Peer sent headers from genesis instead of connecting to checkpoint at height {}. \ - This indicates the checkpoint may not be valid for this network or the peer doesn't have it.", - self.get_sync_base_height() - ); - return Err(SyncError::InvalidState(format!( - "Checkpoint sync failed: peer doesn't recognize checkpoint at height {}", - self.get_sync_base_height() - ))); - } - } - - if self.config.validation_mode != ValidationMode::None { - BlockHeaderValidator::new().validate(&cached_headers).map_err(|e| { - let error = format!("Header validation failed: {}", e); - tracing::error!(error); - SyncError::Validation(error) - })?; - } - - self.last_sync_progress = std::time::Instant::now(); - - // Log details about the batch for debugging - if !cached_headers.is_empty() { - let last_cached = cached_headers.last().unwrap(); - // Use cached hashes to avoid redundant X11 computation - let first_hash = first_cached.hash(); - let last_hash = last_cached.hash(); - tracing::debug!( - "Received headers batch: first.prev_hash={}, first.hash={}, last.hash={}, count={}", - first_header.prev_blockhash, - first_hash, - last_hash, - cached_headers.len() - ); - } - - // Step 3: Process the Entire Validated Batch - - // Checkpoint Validation: Perform in-memory security check against checkpoints - for (index, cached_header) in cached_headers.iter().enumerate() { - let prospective_height = tip_height + (index as u32) + 1; - - if self.reorg_config.enforce_checkpoints { - // Use cached hash to avoid redundant X11 computation in loop - let header_hash = cached_header.hash(); - if !self.checkpoint_manager.validate_block(prospective_height, header_hash) { - return Err(SyncError::Validation(format!( - "Block at height {} does not match checkpoint", - prospective_height - ))); - } - } - } - - storage - .store_headers(headers) - .await - .map_err(|e| SyncError::Storage(format!("Failed to store headers batch: {}", e)))?; - - tracing::info!( - "Header sync progress: processed {} headers in batch, total_headers_synced: {}", - headers.len() as u32, - storage.get_stored_headers_len().await, - ); - - // Update chain tip manager with the last header in the batch - if let Some(last_header) = headers.last() { - let final_height = storage.get_tip_height().await.unwrap_or(0); - let chain_work = ChainWork::from_height_and_header(final_height, last_header); - let tip = ChainTip::new(*last_header, final_height, chain_work); - self.tip_manager - .add_tip(tip) - .map_err(|e| SyncError::Storage(format!("Failed to update tip: {}", e)))?; - } - - // Note: Fork detection is temporarily disabled for batch processing - // In a production implementation, we would need to handle fork detection - // at the batch level or in a separate phase - - if self.syncing_headers { - // During sync mode - request next batch - // Use the last cached header's hash to avoid redundant X11 computation - if let Some(last_cached) = cached_headers.last() { - let hash = last_cached.hash(); - self.request_headers(network, Some(*hash), storage).await?; - } - } - - Ok(true) - } - - /// Request headers from the network - pub async fn request_headers( - &mut self, - network: &mut N, - base_hash: Option, - storage: &S, - ) -> SyncResult<()> { - let block_locator = match base_hash { - Some(hash) => vec![hash], - None => { - // Check if we're syncing from a checkpoint - if self.is_synced_from_checkpoint() && storage.get_stored_headers_len().await > 0 { - let first_height = storage - .get_start_height() - .await - .ok_or(SyncError::Storage("Failed to get start height".to_string()))?; - let checkpoint_header = storage - .get_header(first_height) - .await - .map_err(|e| { - SyncError::Storage(format!("Failed to get first header: {}", e)) - })? - .ok_or(SyncError::Storage( - "Storage didn't return first header".to_string(), - ))?; - - // Use the checkpoint hash from chain state - let checkpoint_hash = checkpoint_header.block_hash(); - tracing::info!( - "📍 No base_hash provided but syncing from checkpoint at height {}. Using checkpoint hash: {}", - self.get_sync_base_height(), - checkpoint_hash - ); - vec![checkpoint_hash] - } else { - // Normal sync from genesis - let genesis_hash = self - .config - .network - .known_genesis_block_hash() - .unwrap_or(BlockHash::from_byte_array([0; 32])); - vec![genesis_hash] - } - } - }; - - let stop_hash = BlockHash::from_byte_array([0; 32]); - let getheaders_msg = GetHeadersMessage::new(block_locator.clone(), stop_hash); - - // Log the GetHeaders message details - tracing::info!( - "GetHeaders message - version: {}, locator_count: {}, locator: {:?}, stop_hash: {:?}", - getheaders_msg.version, - getheaders_msg.locator_hashes.len(), - getheaders_msg.locator_hashes, - getheaders_msg.stop_hash - ); - - // Log details about the request - tracing::info!( - "Preparing headers request - height: {}, base_hash: {:?}", - storage.get_tip_height().await.unwrap_or(0), - base_hash - ); - - tracing::debug!("Sending GetHeaders message"); - network - .send_message(NetworkMessage::GetHeaders(getheaders_msg)) - .await - .map_err(|e| SyncError::Network(format!("Failed to send GetHeaders: {}", e)))?; - - Ok(()) - } - - /// Prepare sync state without sending network requests. - /// This allows monitoring to be set up before requests are sent. - pub async fn prepare_sync(&mut self, storage: &mut S) -> SyncResult> { - if self.syncing_headers { - return Err(SyncError::SyncInProgress); - } - - tracing::info!("Preparing header synchronization"); - tracing::info!( - "Chain state before prepare_sync: sync_base_height={}, headers_count={}", - self.get_sync_base_height(), - storage.get_stored_headers_len().await - ); - - // Get current tip from storage - let current_tip_height = storage.get_tip_height().await; - - // If we're syncing from a checkpoint, we need to account for sync_base_height - let effective_tip_height = if self.is_synced_from_checkpoint() { - if let Some(tip_height) = current_tip_height { - tracing::info!( - "Syncing from checkpoint: sync_base_height={}, tip_height={}", - self.get_sync_base_height(), - tip_height - ); - Some(tip_height) - } else { - None - } - } else { - tracing::info!( - "Not syncing from checkpoint or no tip height. sync_base_height={}, current_tip_height={:?}", - self.get_sync_base_height(), - current_tip_height - ); - current_tip_height - }; - - // We're syncing from a checkpoint and have the checkpoint header - let first_height = storage - .get_start_height() - .await - .ok_or(SyncError::Storage("Failed to get start height".to_string()))?; - let checkpoint_header = storage - .get_header(first_height) - .await - .map_err(|e| SyncError::Storage(format!("Failed to get first header: {}", e)))? - .ok_or(SyncError::Storage("Storage didn't return first header".to_string()))?; - - let base_hash = match effective_tip_height { - None => { - // No headers in storage - check if we're syncing from a checkpoint - if self.is_synced_from_checkpoint() && storage.get_stored_headers_len().await > 0 { - let checkpoint_hash = checkpoint_header.block_hash(); - tracing::info!( - "No headers in storage but syncing from checkpoint at height {}. Using checkpoint hash: {}", - self.get_sync_base_height(), - checkpoint_hash - ); - Some(checkpoint_hash) - } else { - // Normal sync from genesis - tracing::info!("No tip height found, ensuring genesis block is stored"); - - // Get genesis header from chain state (which was initialized with genesis) - if let Some(genesis_header) = storage.get_header(0).await.map_err(|e| { - SyncError::Storage(format!( - "Error trying to get genesis block from storage: {}", - e - )) - })? { - // Store genesis in storage if not already there - if storage - .get_header(0) - .await - .map_err(|e| { - SyncError::Storage(format!("Failed to check genesis: {}", e)) - })? - .is_none() - { - tracing::info!("Storing genesis block in storage"); - storage.store_headers(&[genesis_header]).await.map_err(|e| { - SyncError::Storage(format!("Failed to store genesis: {}", e)) - })?; - } - - let genesis_hash = genesis_header.block_hash(); - tracing::info!("Starting from genesis block: {}", genesis_hash); - Some(genesis_hash) - } else { - // Check if we can start from a checkpoint - if let Some((height, hash)) = self.get_sync_starting_point() { - tracing::info!("Starting from checkpoint at height {}", height); - Some(hash) - } else { - // Use network genesis as fallback - let genesis_hash = - self.config.network.known_genesis_block_hash().ok_or_else( - || SyncError::Storage("No known genesis hash".to_string()), - )?; - tracing::info!("Starting from network genesis: {}", genesis_hash); - Some(genesis_hash) - } - } - } - } - Some(height) => { - tracing::info!("Current effective tip height: {}", height); - - // When syncing from a checkpoint, we need to use the checkpoint hash directly - // if we only have the checkpoint header stored - if self.is_synced_from_checkpoint() && height == self.get_sync_base_height() { - // We're at the checkpoint height - use the checkpoint hash from chain state - tracing::info!( - "At checkpoint height {}. Chain state has {} headers", - height, - storage.get_stored_headers_len().await - ); - - // The checkpoint header should be the first (and possibly only) header - if storage.get_stored_headers_len().await > 0 { - let hash = checkpoint_header.block_hash(); - tracing::info!("Using checkpoint hash for height {}: {}", height, hash); - Some(hash) - } else { - tracing::error!("Synced from checkpoint but no headers in chain state!"); - None - } - } else { - // Get the current tip hash from storage - let tip_header = storage.get_header(height).await.map_err(|e| { - SyncError::Storage(format!( - "Failed to get tip header at height {}: {}", - height, e - )) - })?; - let hash = tip_header.map(|h| h.block_hash()); - tracing::info!("Current tip hash at height {}: {:?}", height, hash); - hash - } - } - }; - - // Set sync state but don't send requests yet - self.syncing_headers = true; - self.last_sync_progress = std::time::Instant::now(); - tracing::info!( - "✅ Prepared header sync state, ready to request headers from {:?}", - base_hash - ); - - Ok(base_hash) - } - - /// Start synchronizing headers (initialize the sync state). - pub async fn start_sync(&mut self, network: &mut N, storage: &mut S) -> SyncResult { - tracing::info!("Starting header synchronization"); - - // Prepare sync state (this will check if sync is already in progress) - let base_hash = self.prepare_sync(storage).await?; - - // Request headers starting from our current tip or checkpoint - self.request_headers(network, base_hash, storage).await?; - - Ok(true) // Sync started - } - - /// Check if a sync timeout has occurred and handle recovery. - pub async fn check_sync_timeout( - &mut self, - storage: &mut S, - network: &mut N, - ) -> SyncResult { - if !self.syncing_headers { - return Ok(false); - } - - let timeout_duration = if network.peer_count() == 0 { - // More aggressive timeout when no peers - std::time::Duration::from_secs(10) - } else { - std::time::Duration::from_secs(5) - }; - - if self.last_sync_progress.elapsed() > timeout_duration { - if network.peer_count() == 0 { - tracing::warn!("📊 Header sync stalled - no connected peers"); - self.syncing_headers = false; // Reset state to allow restart - return Err(SyncError::Network("No connected peers for header sync".to_string())); - } - - tracing::warn!( - "📊 No header sync progress for {}+ seconds, re-sending header request", - timeout_duration.as_secs() - ); - - // Get current tip for recovery - let current_tip_height = storage.get_tip_height().await; - - let first_height = storage - .get_start_height() - .await - .ok_or(SyncError::Storage("Failed to get start height".to_string()))?; - let checkpoint_header = storage - .get_header(first_height) - .await - .map_err(|e| SyncError::Storage(format!("Failed to get first header: {}", e)))? - .ok_or(SyncError::Storage("Storage didn't return first header".to_string()))?; - - let recovery_base_hash = match current_tip_height { - None => { - // No headers in storage - check if we're syncing from a checkpoint - if self.is_synced_from_checkpoint() { - // Use the checkpoint hash from chain state - if storage.get_stored_headers_len().await > 0 { - let checkpoint_hash = checkpoint_header.block_hash(); - tracing::info!( - "Using checkpoint hash for recovery: {} (chain state has {} headers, first header time: {})", - checkpoint_hash, - storage.get_stored_headers_len().await, - checkpoint_header.time - ); - Some(checkpoint_hash) - } else { - tracing::warn!("No checkpoint header in chain state for recovery"); - None - } - } else { - None // Genesis - } - } - Some(height) => { - // When syncing from checkpoint, adjust the storage height - let storage_height = height; - - // Get the current tip hash - storage - .get_header(storage_height) - .await - .map_err(|e| { - SyncError::Storage(format!( - "Failed to get tip header for recovery at height {}: {}", - storage_height, e - )) - })? - .map(|h| h.block_hash()) - } - }; - - self.request_headers(network, recovery_base_hash, storage).await?; - self.last_sync_progress = std::time::Instant::now(); - - return Ok(true); - } - - Ok(false) - } - - /// Get the optimal starting point for sync based on checkpoints - pub fn get_sync_starting_point(&self) -> Option<(u32, BlockHash)> { - // For now, we can't check storage here without passing it as parameter - // The actual implementation would need to check if headers exist in storage - // before deciding to use checkpoints - - // No headers in storage, use checkpoint based on wallet creation time - // TODO: Pass wallet creation time from client config - if let Some(checkpoint) = self.checkpoint_manager.get_sync_checkpoint(None) { - // Return checkpoint as starting point - // Note: We'll need to prepopulate headers from checkpoints for this to work properly - return Some((checkpoint.height, checkpoint.block_hash)); - } - - // No suitable checkpoint, start from genesis - None - } - - /// Check if we can skip ahead to a checkpoint during sync - pub fn can_skip_to_checkpoint( - &self, - current_height: u32, - peer_height: u32, - ) -> Option<(u32, BlockHash)> { - // Don't skip if we're already close to the peer's tip - if peer_height.saturating_sub(current_height) < 1000 { - return None; - } - - // Find next checkpoint after current height - let checkpoint_heights = self.checkpoint_manager.checkpoint_heights(); - - for height in checkpoint_heights { - // Skip if checkpoint is: - // 1. After our current position - // 2. Before or at peer's height (peer has it) - // 3. Far enough ahead to be worth skipping (at least 500 blocks) - if *height > current_height && *height <= peer_height && *height > current_height + 500 - { - if let Some(checkpoint) = self.checkpoint_manager.get_checkpoint(*height) { - tracing::info!( - "Can skip from height {} to checkpoint at height {}", - current_height, - checkpoint.height - ); - return Some((checkpoint.height, checkpoint.block_hash)); - } - } - } - None - } - - /// Check if header sync is currently in progress - pub fn is_syncing(&self) -> bool { - self.syncing_headers - } - - /// Download a single header by hash - pub async fn download_single_header( - &mut self, - block_hash: BlockHash, - network: &mut N, - storage: &mut S, - ) -> SyncResult<()> { - // Check if we already have this header using the efficient reverse index - if let Some(height) = storage - .get_header_height_by_hash(&block_hash) - .await - .map_err(|e| SyncError::Storage(format!("Failed to check header existence: {}", e)))? - { - tracing::debug!("Header for block {} already exists at height {}", block_hash, height); - return Ok(()); - } - - tracing::info!("📥 Requesting header for block {}", block_hash); - - // Get current tip hash to use as locator - let current_tip = if let Some(tip_height) = storage.get_tip_height().await { - storage - .get_header(tip_height) - .await - .map_err(|e| SyncError::Storage(format!("Failed to get tip header: {}", e)))? - .map(|h| h.block_hash()) - .ok_or_else(|| SyncError::MissingDependency("no tip header found".to_string()))? - } else { - self.config.network.known_genesis_block_hash().ok_or_else(|| { - SyncError::MissingDependency("no genesis block hash for network".to_string()) - })? - }; - - // Create GetHeaders message with specific stop hash - let getheaders = GetHeadersMessage::new(vec![current_tip], block_hash); - - network - .send_message(NetworkMessage::GetHeaders(getheaders)) - .await - .map_err(|e| SyncError::Network(format!("Failed to send GetHeaders: {}", e)))?; - - Ok(()) - } - - /// Reset any pending requests after restart. - pub fn reset_pending_requests(&mut self) -> SyncResult<()> { - // Reset sync state - self.syncing_headers = false; - self.last_sync_progress = std::time::Instant::now(); - tracing::debug!("Reset header sync pending requests"); - Ok(()) - } - - /// Get the current chain height - pub async fn get_chain_height(&self, storage: &S) -> u32 { - storage.get_tip_height().await.unwrap_or(0) - } - - /// Get the sync base height (used when syncing from checkpoint) - pub fn get_sync_base_height(&self) -> u32 { - self.cached_sync_base_height - } - - /// Whether we're syncing from a checkpoint - pub fn is_synced_from_checkpoint(&self) -> bool { - self.cached_sync_base_height > 0 - } - - /// Update cached flags and totals based on an external state snapshot - pub fn update_cached_from_state_snapshot(&mut self, sync_base_height: u32) { - self.cached_sync_base_height = sync_base_height; - } -} diff --git a/dash-spv/src/sync/legacy/headers/mod.rs b/dash-spv/src/sync/legacy/headers/mod.rs deleted file mode 100644 index 830ce7958..000000000 --- a/dash-spv/src/sync/legacy/headers/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! Header synchronization with fork detection and reorganization handling. - -mod manager; - -pub use manager::{HeaderSyncManager, ReorgConfig}; diff --git a/dash-spv/src/sync/legacy/manager.rs b/dash-spv/src/sync/legacy/manager.rs deleted file mode 100644 index a2e64fbf0..000000000 --- a/dash-spv/src/sync/legacy/manager.rs +++ /dev/null @@ -1,369 +0,0 @@ -//! Core SyncManager struct and simple accessor methods. - -use super::phases::{PhaseTransition, SyncPhase}; -use super::transitions::TransitionManager; -use crate::client::ClientConfig; -use crate::error::SyncResult; -use crate::network::NetworkManager; -use crate::storage::StorageManager; -use crate::sync::legacy::{ - FilterSyncManager, HeaderSyncManager, MasternodeSyncManager, ReorgConfig, -}; -use crate::types::{SharedFilterHeights, SyncProgress}; -use crate::SyncError; -use dashcore::prelude::CoreBlockHeight; -use dashcore::BlockHash; -use key_wallet_manager::wallet_interface::WalletInterface; -use std::sync::Arc; -use std::time::{Duration, Instant}; -use tokio::sync::RwLock; - -/// Number of blocks back from a ChainLock's block height where we need the masternode list -/// for validation. ChainLock signatures are created by the masternode quorum that existed -/// 8 blocks before the ChainLock's block. -pub(super) const CHAINLOCK_VALIDATION_MASTERNODE_OFFSET: u32 = 8; - -/// Manages sequential synchronization of all blockchain data types. -/// -/// # Generic Parameters -/// -/// This manager uses generic trait parameters for the same reasons as [`DashSpvClient`]: -/// -/// - `S: StorageManager` - Allows swapping between persistent disk storage and in-memory storage for tests -/// - `N: NetworkManager` - Enables testing with mock network without network I/O -/// - `W: WalletInterface` - Supports custom wallet implementations and test wallets -/// -/// ## Why Generics Are Essential Here -/// -/// ### 1. **Testing Synchronization Logic** 🧪 -/// The sync manager coordinates complex blockchain synchronization across multiple phases. -/// Testing this logic requires: -/// - Mock network that doesn't make real connections -/// - Memory storage that doesn't touch the filesystem -/// - Test wallet that doesn't require real keys -/// -/// Generics allow these test implementations to be first-class types, not runtime hacks. -/// -/// ### 2. **Performance** ⚡ -/// Synchronization is performance-critical - we process thousands of headers and filters. -/// Generic monomorphization allows the compiler to: -/// - Inline storage operations -/// - Eliminate vtable overhead -/// - Optimize across trait boundaries -/// -/// ### 3. **Delegation Pattern** 🔗 -/// The sync manager delegates to specialized sub-managers (`HeaderSyncManager`, -/// `FilterSyncManager`, `MasternodeSyncManager`), each also generic over `S` and `N`. -/// This maintains type consistency throughout the sync pipeline. -/// -/// ### 4. **Zero Runtime Cost** 📦 -/// Despite being generic, production builds contain only one instantiation because -/// test-only storage/network types are behind `#[cfg(test)]`. -/// -/// The generic design enables comprehensive testing while maintaining zero-cost abstraction. -/// -/// [`DashSpvClient`]: crate::client::DashSpvClient -pub struct SyncManager { - pub(super) _phantom_s: std::marker::PhantomData, - pub(super) _phantom_n: std::marker::PhantomData, - /// Current synchronization phase - pub(super) current_phase: SyncPhase, - - /// Phase transition manager - pub(super) transition_manager: TransitionManager, - - /// Existing sync managers (wrapped and controlled) - pub(super) header_sync: HeaderSyncManager, - pub(super) filter_sync: FilterSyncManager, - pub(super) masternode_sync: MasternodeSyncManager, - - /// Configuration - pub(super) config: ClientConfig, - - /// Phase transition history - pub(super) phase_history: Vec, - - /// Start time of the entire sync process - pub(super) sync_start_time: Option, - - /// Timeout duration for each phase - pub(super) phase_timeout: Duration, - - /// Maximum retries per phase before giving up - pub(super) max_phase_retries: u32, - - /// Current retry count for the active phase - pub(super) current_phase_retries: u32, - - /// Optional wallet reference for filter checking - pub(super) wallet: std::sync::Arc>, -} - -impl SyncManager { - /// Create a new sequential sync manager - pub fn new( - config: &ClientConfig, - received_filter_heights: SharedFilterHeights, - wallet: Arc>, - chain_state: Arc>, - ) -> SyncResult { - // Create reorg config with sensible defaults - let reorg_config = ReorgConfig::default(); - - Ok(Self { - current_phase: SyncPhase::Idle, - transition_manager: TransitionManager::new(config), - header_sync: HeaderSyncManager::new(config, reorg_config, chain_state).map_err( - |e| SyncError::InvalidState(format!("Failed to create header sync manager: {}", e)), - )?, - filter_sync: FilterSyncManager::new(config, received_filter_heights), - masternode_sync: MasternodeSyncManager::new(config), - config: config.clone(), - phase_history: Vec::new(), - sync_start_time: None, - phase_timeout: Duration::from_secs(60), // 1 minute default timeout per phase - max_phase_retries: 3, - current_phase_retries: 0, - wallet, - _phantom_s: std::marker::PhantomData, - _phantom_n: std::marker::PhantomData, - }) - } - - /// Load headers from storage into the sync managers - pub async fn load_headers_from_storage(&mut self, storage: &S) { - // Load headers into the header sync manager - self.header_sync.load_headers_from_storage(storage).await; - } - - /// Get the earliest wallet birth height hint for the configured network, if available. - pub async fn wallet_birth_height_hint(&self) -> CoreBlockHeight { - // Only acquire the wallet lock if we have a valid network mapping - let wallet_guard = self.wallet.read().await; - let result = wallet_guard.earliest_required_height().await; - drop(wallet_guard); - result - } - - /// Get the configured start height hint, if any. - pub fn config_start_height(&self) -> Option { - self.config.start_from_height - } - - /// Start the sequential sync process - pub async fn start_sync(&mut self, network: &mut N, storage: &mut S) -> SyncResult { - if self.current_phase.is_syncing() { - return Err(SyncError::SyncInProgress); - } - - tracing::info!("🚀 Starting sequential sync process"); - tracing::info!("📊 Current phase: {}", self.current_phase.name()); - self.sync_start_time = Some(Instant::now()); - - // Transition from Idle to first phase - self.transition_to_next_phase(storage, network, "Starting sync").await?; - - // The actual header request will be sent when we have peers - match &self.current_phase { - SyncPhase::DownloadingHeaders { - .. - } => { - // Just prepare the sync, don't execute yet - tracing::info!( - "📋 Sequential sync prepared, waiting for peers to send initial requests" - ); - // Prepare the header sync without sending requests - let base_hash = self.header_sync.prepare_sync(storage).await?; - tracing::debug!("Starting from base hash: {:?}", base_hash); - } - _ => { - // If we're not in headers phase, something is wrong - return Err(SyncError::InvalidState( - "Expected to be in DownloadingHeaders phase".to_string(), - )); - } - } - - Ok(true) - } - - /// Send initial sync requests (called after peers are connected) - pub async fn send_initial_requests( - &mut self, - network: &mut N, - storage: &mut S, - ) -> SyncResult<()> { - match &self.current_phase { - SyncPhase::DownloadingHeaders { - .. - } => { - tracing::info!("📡 Sending initial header requests for sequential sync"); - // If header sync is already prepared, just send the request - if self.header_sync.is_syncing() { - // Get current tip from storage to determine base hash - let base_hash = self.get_base_hash_from_storage(storage).await?; - - // Request headers starting from our current tip - self.header_sync.request_headers(network, base_hash, storage).await?; - } else { - // Otherwise start sync normally - self.header_sync.start_sync(network, storage).await?; - } - } - _ => { - tracing::warn!("send_initial_requests called but not in DownloadingHeaders phase"); - } - } - Ok(()) - } - - /// Reset any pending requests after restart. - pub fn reset_pending_requests(&mut self) { - // Reset all sync manager states - let _ = self.header_sync.reset_pending_requests(); - self.filter_sync.reset_pending_requests(); - // Masternode sync doesn't have pending requests to reset - - // Reset phase tracking - self.current_phase_retries = 0; - - tracing::debug!("Reset sequential sync manager pending requests"); - } - - /// Helper method to get base hash from storage - pub(super) async fn get_base_hash_from_storage( - &self, - storage: &S, - ) -> SyncResult> { - let current_tip_height = storage.get_tip_height().await; - - let base_hash = match current_tip_height { - None => None, - Some(height) => { - let tip_header = storage - .get_header(height) - .await - .map_err(|e| SyncError::Storage(format!("Failed to get tip header: {}", e)))?; - tip_header.map(|h| h.block_hash()) - } - }; - - Ok(base_hash) - } - - /// Get current sync progress template. - /// - /// **IMPORTANT**: This method returns a TEMPLATE ONLY. It does NOT query storage or network - /// for actual progress values. The returned `SyncProgress` struct contains: - /// - Accurate sync phase status flags based on the current phase - /// - PLACEHOLDER (zero/default) values for all heights, counts, and network data - /// - /// **Callers MUST populate the following fields with actual values from storage and network:** - /// - `header_height`: Should be queried from storage (e.g., `storage.get_tip_height()`) - /// - `filter_header_height`: Should be queried from storage (e.g., `storage.get_filter_tip_height()`) - /// - `masternode_height`: Should be queried from masternode state in storage - /// - `peer_count`: Should be queried from the network manager - /// - `filters_downloaded`: Should be calculated from storage - /// - `last_synced_filter_height`: Should be queried from storage - /// - /// # Examples - /// ```ignore - /// let mut progress = sync_manager.get_progress(); - /// progress.header_height = storage.get_tip_height().await?.unwrap_or(0); - /// progress.filter_header_height = storage.get_filter_tip_height().await?.unwrap_or(0); - /// progress.peer_count = network.peer_count() as u32; - /// // ... populate other fields as needed - /// ``` - pub fn get_progress(&self) -> SyncProgress { - // WARNING: This method returns a TEMPLATE with PLACEHOLDER values. - // Callers MUST populate header_height, filter_header_height, masternode_height, - // peer_count, filters_downloaded, and last_synced_filter_height with actual values - // from storage and network queries. - - // Create a basic progress report template - let _phase_progress = self.current_phase.progress(); - - SyncProgress { - header_height: 0, // PLACEHOLDER: Caller MUST query storage.get_tip_height() - filter_header_height: 0, // PLACEHOLDER: Caller MUST query storage.get_filter_tip_height() - masternode_height: 0, // PLACEHOLDER: Caller MUST query masternode state from storage - peer_count: 0, // PLACEHOLDER: Caller MUST query network.peer_count() - filters_downloaded: 0, // PLACEHOLDER: Caller MUST calculate from storage - last_synced_filter_height: None, // PLACEHOLDER: Caller MUST query from storage - sync_start: std::time::SystemTime::now(), - last_update: std::time::SystemTime::now(), - filter_sync_available: self.config.enable_filters, - } - } - - /// Check if sync is complete - pub fn is_synced(&self) -> bool { - matches!(self.current_phase, SyncPhase::FullySynced { .. }) - } - - /// Check if the current phase needs to be executed - /// This is true for phases that haven't been started yet - pub(super) fn current_phase_needs_execution(&self) -> bool { - match &self.current_phase { - SyncPhase::DownloadingCFHeaders { - .. - } => { - // Check if filter sync hasn't started yet (no progress time) - self.current_phase.last_progress_time().is_none() - } - SyncPhase::DownloadingFilters { - .. - } => { - // Check if filter download hasn't started yet - self.current_phase.last_progress_time().is_none() - } - _ => false, // Other phases are started by messages or initial sync - } - } - - /// Check if currently in the downloading blocks phase - pub fn is_in_downloading_blocks_phase(&self) -> bool { - matches!(self.current_phase, SyncPhase::DownloadingBlocks { .. }) - } - - /// Get current phase - pub fn current_phase(&self) -> &SyncPhase { - &self.current_phase - } - - /// Get a reference to the masternode list engine. - /// Returns None if masternode sync is not enabled in config. - pub fn masternode_list_engine( - &self, - ) -> Option<&dashcore::sml::masternode_list_engine::MasternodeListEngine> { - self.masternode_sync.engine() - } - - /// Update the chain state (used for checkpoint sync initialization) - pub fn update_chain_state_cache(&mut self, sync_base_height: u32) { - self.header_sync.update_cached_from_state_snapshot(sync_base_height); - } - - /// Get reference to the masternode engine if available. - /// Returns None if masternodes are disabled or engine is not initialized. - pub fn get_masternode_engine( - &self, - ) -> Option<&dashcore::sml::masternode_list_engine::MasternodeListEngine> { - self.masternode_sync.engine() - } - - /// Get a reference to the filter sync manager. - pub fn filter_sync(&self) -> &FilterSyncManager { - &self.filter_sync - } - - /// Get a mutable reference to the filter sync manager. - pub fn filter_sync_mut(&mut self) -> &mut FilterSyncManager { - &mut self.filter_sync - } - - /// Get the actual blockchain height from storage height, accounting for checkpoints - pub(super) async fn get_blockchain_height_from_storage(&self, storage: &S) -> u32 { - storage.get_tip_height().await.unwrap_or(0) - } -} diff --git a/dash-spv/src/sync/legacy/masternodes/embedded_data.rs b/dash-spv/src/sync/legacy/masternodes/embedded_data.rs deleted file mode 100644 index 37ecebe85..000000000 --- a/dash-spv/src/sync/legacy/masternodes/embedded_data.rs +++ /dev/null @@ -1,59 +0,0 @@ -//! Embedded masternode list diffs for faster initial sync. -//! -//! This module contains pre-computed MNListDiff data embedded at compile time -//! to speed up initial synchronization by starting from a known good state. - -use dashcore::{consensus::deserialize, network::message_sml::MnListDiff, Network}; - -// Embed the mainnet MNListDiff from height 0 to 2227096 -const MAINNET_MNLIST_DIFF_0_2227096: &[u8] = - include_bytes!("../../../../../dash/artifacts/mn_list_diff_0_2227096.bin"); - -// Embed the testnet MNListDiff from height 0 to 1296600 -const TESTNET_MNLIST_DIFF_0_1296600: &[u8] = - include_bytes!("../../../../../dash/artifacts/mn_list_diff_testnet_0_1296600.bin"); - -/// Information about an embedded MNListDiff -pub struct EmbeddedDiff { - pub diff: MnListDiff, - pub base_height: u32, - pub target_height: u32, -} - -/// Get the embedded MNListDiff for a specific network, if available. -pub fn get_embedded_diff(network: Network) -> Option { - match network { - Network::Dash => { - let bytes = MAINNET_MNLIST_DIFF_0_2227096; - match deserialize::(bytes) { - Ok(diff) => Some(EmbeddedDiff { - diff, - base_height: 0, - target_height: 2227096, - }), - Err(e) => { - tracing::warn!("Failed to deserialize embedded mainnet MNListDiff: {}", e); - None - } - } - } - Network::Testnet => { - let bytes = TESTNET_MNLIST_DIFF_0_1296600; - match deserialize::(bytes) { - Ok(diff) => Some(EmbeddedDiff { - diff, - base_height: 0, - target_height: 1296600, - }), - Err(e) => { - tracing::warn!("Failed to deserialize embedded testnet MNListDiff: {}", e); - None - } - } - } - _ => { - // No embedded data for other networks (regtest, devnet, etc.) - None - } - } -} diff --git a/dash-spv/src/sync/legacy/masternodes/manager.rs b/dash-spv/src/sync/legacy/masternodes/manager.rs deleted file mode 100644 index 38b35918c..000000000 --- a/dash-spv/src/sync/legacy/masternodes/manager.rs +++ /dev/null @@ -1,925 +0,0 @@ -//! Simplified masternode synchronization based on dash-evo-tool approach. -//! -//! This implementation directly follows the fetch_rotated_quorum_info pattern -//! from dash-evo-tool for simple, reliable QRInfo sync. - -use dashcore::{ - network::constants::NetworkExt, - network::message::NetworkMessage, - network::message_qrinfo::{GetQRInfo, QRInfo}, - network::message_sml::MnListDiff, - sml::masternode_list_engine::MasternodeListEngine, - BlockHash, QuorumHash, -}; -use std::collections::HashMap; -use std::time::{Duration, Instant}; - -use crate::client::ClientConfig; -use crate::error::{SyncError, SyncResult}; -use crate::network::NetworkManager; -use crate::storage::StorageManager; - -/// Simplified masternode synchronization following dash-evo-tool pattern. -pub struct MasternodeSyncManager { - _phantom_s: std::marker::PhantomData, - _phantom_n: std::marker::PhantomData, - config: ClientConfig, - engine: Option, - - // Simple caches matching dash-evo-tool pattern - mnlist_diffs: HashMap<(u32, u32), MnListDiff>, - qr_infos: HashMap, - - // Track last successful QRInfo block for progressive sync - last_qrinfo_block_hash: Option, - - // Simple error handling - error: Option, - - // Sync state - sync_in_progress: bool, - last_sync_time: Option, - - // Track pending MnListDiff requests (for quorum validation) - // This ensures we don't transition to the next phase before receiving all responses - pending_mnlistdiff_requests: usize, - - // Track when we started waiting for MnListDiff responses (for timeout detection) - mnlistdiff_wait_start: Option, - - // Track retry attempts for MnListDiff requests - mnlistdiff_retry_count: u8, -} - -impl MasternodeSyncManager { - /// Create a new masternode sync manager. - pub fn new(config: &ClientConfig) -> Self { - let (engine, mnlist_diffs) = if config.enable_masternodes { - // Try to load embedded MNListDiff data for faster initial sync - if let Some(embedded) = super::embedded_data::get_embedded_diff(config.network) { - tracing::info!( - "📦 Using embedded MNListDiff for {} - starting from height {}", - config.network, - embedded.target_height - ); - - // Initialize engine with the embedded diff - match MasternodeListEngine::initialize_with_diff_to_height( - embedded.diff.clone(), - embedded.target_height, - config.network, - ) { - Ok(engine) => { - // Store the embedded diff in our cache - let mut diffs = HashMap::new(); - diffs.insert((embedded.base_height, embedded.target_height), embedded.diff); - (Some(engine), diffs) - } - Err(e) => { - tracing::warn!( - "Failed to initialize engine with embedded diff: {}. Falling back to default.", - e - ); - let mut engine = MasternodeListEngine::default_for_network(config.network); - // Feed genesis block hash at height 0 - if let Some(genesis_hash) = config.network.known_genesis_block_hash() { - engine.feed_block_height(0, genesis_hash); - } - (Some(engine), HashMap::new()) - } - } - } else { - tracing::info!( - "No embedded MNListDiff available for {} - starting from genesis", - config.network - ); - let mut engine = MasternodeListEngine::default_for_network(config.network); - // Feed genesis block hash at height 0 - if let Some(genesis_hash) = config.network.known_genesis_block_hash() { - engine.feed_block_height(0, genesis_hash); - } - (Some(engine), HashMap::new()) - } - } else { - (None, HashMap::new()) - }; - - Self { - config: config.clone(), - engine, - mnlist_diffs, - qr_infos: HashMap::new(), - last_qrinfo_block_hash: None, - error: None, - sync_in_progress: false, - last_sync_time: None, - pending_mnlistdiff_requests: 0, - mnlistdiff_wait_start: None, - mnlistdiff_retry_count: 0, - _phantom_s: std::marker::PhantomData, - _phantom_n: std::marker::PhantomData, - } - } - - /// Request QRInfo - simplified non-blocking implementation - pub async fn request_qrinfo( - &mut self, - network: &mut N, - base_block_hash: BlockHash, - block_hash: BlockHash, - ) -> Result<(), String> { - // Step 1: Collect known block hashes from existing diffs (dash-evo-tool pattern) - let mut known_block_hashes: Vec<_> = - self.mnlist_diffs.values().map(|mn_list_diff| mn_list_diff.block_hash).collect(); - known_block_hashes.push(base_block_hash); - tracing::info!( - "Requesting QRInfo with known_block_hashes: {}, block_request_hash: {}", - known_block_hashes.iter().map(|bh| bh.to_string()).collect::>().join(", "), - block_hash - ); - - // Step 2: Send P2P request (non-blocking) - if let Err(e) = self.request_qr_info(network, known_block_hashes, block_hash).await { - let error_msg = format!("Failed to send QRInfo request: {}", e); - self.error = Some(error_msg.clone()); - return Err(error_msg); - } - - tracing::info!( - "📤 QRInfo request sent successfully, processing will happen when message arrives" - ); - Ok(()) - } - - /// Insert masternode list diff - direct translation of dash-evo-tool implementation - async fn insert_mn_list_diff(&mut self, mn_list_diff: &MnListDiff, storage: &S) { - let base_block_hash = mn_list_diff.base_block_hash; - let base_height = match self.get_height_for_hash(&base_block_hash, storage).await { - Ok(height) => height, - Err(e) => { - let error_msg = - format!("Failed to get height for base block hash {}: {}", base_block_hash, e); - tracing::error!("❌ MnListDiff insertion failed: {}", error_msg); - self.error = Some(error_msg); - return; - } - }; - - let block_hash = mn_list_diff.block_hash; - let height = match self.get_height_for_hash(&block_hash, storage).await { - Ok(height) => height, - Err(e) => { - let error_msg = - format!("Failed to get height for block hash {}: {}", block_hash, e); - tracing::error!("❌ MnListDiff insertion failed: {}", error_msg); - self.error = Some(error_msg); - return; - } - }; - - self.mnlist_diffs.insert((base_height, height), mn_list_diff.clone()); - - tracing::debug!( - "✅ Inserted masternode list diff: base_height={}, height={}, base_hash={}, hash={}, new_masternodes={}, deleted_masternodes={}", - base_height, height, base_block_hash, block_hash, - mn_list_diff.new_masternodes.len(), - mn_list_diff.deleted_masternodes.len() - ); - } - - /// Helper to get height for block hash using storage (consistent with dynamic callback) - async fn get_height_for_hash( - &self, - block_hash: &BlockHash, - storage: &S, - ) -> Result { - // Special case: Handle genesis block which isn't stored when syncing from checkpoints - if let Some(genesis_hash) = self.config.network.known_genesis_block_hash() { - if *block_hash == genesis_hash { - return Ok(0); - } - } - - // Regular storage lookup for all other blocks - match storage.get_header_height_by_hash(block_hash).await { - Ok(Some(height)) => Ok(height), - Ok(None) => Err(format!("Height not found for block hash: {}", block_hash)), - Err(e) => { - Err(format!("Storage error looking up height for block hash {}: {}", block_hash, e)) - } - } - } - - /// Make QRInfo P2P request (simplified non-blocking) - async fn request_qr_info( - &mut self, - network: &mut N, - known_block_hashes: Vec, - block_request_hash: BlockHash, - ) -> Result<(), String> { - let get_qr_info_msg = NetworkMessage::GetQRInfo(GetQRInfo { - base_block_hashes: known_block_hashes, - block_request_hash, - extra_share: true, - }); - - // Send request (no state coordination needed - message handler will process response) - network - .send_message(get_qr_info_msg) - .await - .map_err(|e| format!("Failed to send QRInfo request: {}", e))?; - - tracing::info!("📤 Sent QRInfo request (unified processing)"); - Ok(()) - } - - /// Log detailed QRInfo statistics - fn log_qrinfo_details(&self, qr_info: &QRInfo, prefix: &str) { - let h4c_count = if qr_info.quorum_snapshot_and_mn_list_diff_at_h_minus_4c.is_some() { - 1 - } else { - 0 - }; - let core_diff_count = 5 + h4c_count; // tip, h, h-c, h-2c, h-3c, plus optional h-4c - - tracing::info!( - "{} with {} core diffs, {} additional diffs, {} additional snapshots", - prefix, - core_diff_count, - qr_info.mn_list_diff_list.len(), - qr_info.quorum_snapshot_list.len() - ); - - tracing::debug!( - "📋 QRInfo core data: tip={}, h={}, h-c={}, h-2c={}, h-3c={}, h-4c={}, commitments={}", - qr_info.mn_list_diff_tip.block_hash, - qr_info.mn_list_diff_h.block_hash, - qr_info.mn_list_diff_at_h_minus_c.block_hash, - qr_info.mn_list_diff_at_h_minus_2c.block_hash, - qr_info.mn_list_diff_at_h_minus_3c.block_hash, - qr_info - .quorum_snapshot_and_mn_list_diff_at_h_minus_4c - .as_ref() - .map(|(_, diff)| diff.block_hash.to_string()) - .unwrap_or_else(|| "None".to_string()), - qr_info.last_commitment_per_index.len() - ); - } - - /// Feed QRInfo block heights to the masternode engine (dash-evo-tool pattern) - async fn feed_qrinfo_block_heights( - &mut self, - qr_info: &QRInfo, - storage: &mut S, - ) -> Result<(), String> { - if let Some(engine) = &mut self.engine { - tracing::debug!("🔗 Feeding QRInfo block heights to masternode engine"); - - // Collect all block hashes from QRInfo MnListDiffs - let mut block_hashes = vec![ - qr_info.mn_list_diff_tip.block_hash, - qr_info.mn_list_diff_h.block_hash, - qr_info.mn_list_diff_at_h_minus_c.block_hash, - qr_info.mn_list_diff_at_h_minus_2c.block_hash, - qr_info.mn_list_diff_at_h_minus_3c.block_hash, - ]; - - if let Some((_, diff)) = &qr_info.quorum_snapshot_and_mn_list_diff_at_h_minus_4c { - block_hashes.push(diff.block_hash); - } - - for diff in &qr_info.mn_list_diff_list { - block_hashes.push(diff.block_hash); - } - - // Also collect base block hashes - block_hashes.push(qr_info.mn_list_diff_tip.base_block_hash); - block_hashes.push(qr_info.mn_list_diff_h.base_block_hash); - block_hashes.push(qr_info.mn_list_diff_at_h_minus_c.base_block_hash); - block_hashes.push(qr_info.mn_list_diff_at_h_minus_2c.base_block_hash); - block_hashes.push(qr_info.mn_list_diff_at_h_minus_3c.base_block_hash); - - if let Some((_, diff)) = &qr_info.quorum_snapshot_and_mn_list_diff_at_h_minus_4c { - block_hashes.push(diff.base_block_hash); - } - - for diff in &qr_info.mn_list_diff_list { - block_hashes.push(diff.base_block_hash); - } - - // Remove duplicates - block_hashes.sort(); - block_hashes.dedup(); - - // Feed heights for all block hashes - let mut fed_count = 0; - for block_hash in block_hashes { - if let Ok(Some(height)) = storage.get_header_height_by_hash(&block_hash).await { - engine.feed_block_height(height, block_hash); - fed_count += 1; - tracing::debug!("🔗 Fed height {} for block {}", height, block_hash); - } else { - tracing::warn!( - "⚠️ Could not find height for block hash {} in storage", - block_hash - ); - } - } - - tracing::info!("🔗 Fed {} block heights to masternode engine", fed_count); - Ok(()) - } else { - Err("Masternode engine not initialized".to_string()) - } - } - - /// Process quorum snapshots from QRInfo (basic implementation) - fn process_quorum_snapshots(&mut self, qr_info: &QRInfo) { - tracing::debug!("🏛️ Processing quorum snapshots from QRInfo"); - - // Process core quorum snapshots - self.process_single_quorum_snapshot(&qr_info.quorum_snapshot_at_h_minus_c, "h-c"); - self.process_single_quorum_snapshot(&qr_info.quorum_snapshot_at_h_minus_2c, "h-2c"); - self.process_single_quorum_snapshot(&qr_info.quorum_snapshot_at_h_minus_3c, "h-3c"); - - // Process optional h-4c snapshot - if let Some((snapshot, _)) = &qr_info.quorum_snapshot_and_mn_list_diff_at_h_minus_4c { - self.process_single_quorum_snapshot(snapshot, "h-4c"); - } - - // Process additional snapshots - for (i, snapshot) in qr_info.quorum_snapshot_list.iter().enumerate() { - self.process_single_quorum_snapshot(snapshot, &format!("additional-{}", i)); - } - - tracing::debug!("🏛️ Quorum snapshot processing completed"); - } - - /// Process a single quorum snapshot (basic logging implementation) - fn process_single_quorum_snapshot( - &mut self, - snapshot: &dashcore::network::message_qrinfo::QuorumSnapshot, - context: &str, - ) { - tracing::debug!( - "🏛️ Processing quorum snapshot ({}): active_quorum_members={}, skip_list_mode={}, skip_list={}", - context, - snapshot.active_quorum_members.len(), - snapshot.skip_list_mode, - snapshot.skip_list.len() - ); - - // TODO: Implement actual quorum snapshot processing - // For now, we just log the basic information - // In a full implementation, this would: - // 1. Validate the quorum snapshot structure - // 2. Update the quorum state in the masternode engine - // 3. Cache the snapshot for future validation - // 4. Handle skip list updates - } - - /// Start masternode synchronization - pub async fn start_sync(&mut self, network: &mut N, storage: &mut S) -> SyncResult { - if self.sync_in_progress { - return Err(SyncError::SyncInProgress); - } - - self.sync_in_progress = true; - self.error = None; - self.mnlistdiff_retry_count = 0; // Reset retry counter for new sync - - // Get current chain tip - let tip_height = storage.get_tip_height().await.unwrap_or(0); - - let tip_header = storage - .get_header(tip_height) - .await - .map_err(|e| SyncError::Storage(format!("Failed to get tip header: {}", e)))? - .ok_or_else(|| SyncError::Storage("Tip header not found".to_string()))?; - let tip_hash = tip_header.block_hash(); - - // Determine base block hash using dash-evo-tool pattern: - // - First QRInfo request: use genesis block hash - // - Subsequent requests: use the last successfully processed QRInfo block - let base_hash = if let Some(last_qrinfo_hash) = self.last_qrinfo_block_hash { - // Use the last successfully processed QRInfo block - tracing::debug!("Using last successful QRInfo block as base: {}", last_qrinfo_hash); - last_qrinfo_hash - } else { - // First time - use genesis block - let genesis_hash = - self.config.network.known_genesis_block_hash().ok_or_else(|| { - SyncError::InvalidState("Genesis hash not available".to_string()) - })?; - tracing::debug!("Using genesis block as base: {}", genesis_hash); - genesis_hash - }; - - // Request QRInfo using simplified non-blocking approach - match self.request_qrinfo(network, base_hash, tip_hash).await { - Ok(()) => { - tracing::info!("🚀 QRInfo request initiated successfully, sync will complete when response arrives"); - // Keep sync_in_progress = true, will be set to false in handle_qrinfo_message - Ok(true) - } - Err(error_msg) => { - tracing::error!("❌ Failed to initiate QRInfo request: {}", error_msg); - self.sync_in_progress = false; - Err(SyncError::Validation(error_msg)) - } - } - } - - /// Handle incoming MnListDiff message - pub async fn handle_mnlistdiff_message( - &mut self, - diff: &MnListDiff, - storage: &mut S, - _network: &mut N, - ) -> SyncResult { - self.insert_mn_list_diff(diff, storage).await; - - // Decrement pending request counter if we were expecting this response - if self.pending_mnlistdiff_requests > 0 { - self.pending_mnlistdiff_requests -= 1; - tracing::info!( - "📥 Received MnListDiff response ({} pending remaining)", - self.pending_mnlistdiff_requests - ); - - // If this was the last pending request, mark sync as complete - if self.pending_mnlistdiff_requests == 0 && self.sync_in_progress { - tracing::info!( - "✅ All MnListDiff requests completed, marking masternode sync as done" - ); - self.sync_in_progress = false; - self.last_sync_time = Some(Instant::now()); - self.mnlistdiff_wait_start = None; // Clear wait timer - - // Persist masternode state so phase manager can detect completion - match storage.get_tip_height().await { - Some(tip_height) => { - let state = crate::storage::MasternodeState { - last_height: tip_height, - engine_state: Vec::new(), - last_update: std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .map(|d| d.as_secs()) - .unwrap_or(0), - }; - if let Err(e) = storage.store_masternode_state(&state).await { - tracing::warn!("⚠️ Failed to store masternode state: {}", e); - } - } - None => { - tracing::warn!( - "⚠️ Storage returned no tip height when persisting masternode state" - ); - } - } - } - } - - Ok(false) // Not used for sync completion in simple approach - } - - /// Check for sync timeout - pub async fn check_sync_timeout(&mut self, storage: &mut S, network: &mut N) -> SyncResult<()> { - // Check if we're waiting for MnListDiff responses and have timed out - if self.pending_mnlistdiff_requests > 0 { - if let Some(wait_start) = self.mnlistdiff_wait_start { - let timeout_duration = Duration::from_secs(15); - - if wait_start.elapsed() > timeout_duration { - // Timeout hit - if self.mnlistdiff_retry_count < 1 { - // First timeout - retry by restarting the QRInfo request - tracing::warn!( - "⏰ Timeout waiting for {} MnListDiff responses after {:?}, retrying QRInfo request...", - self.pending_mnlistdiff_requests, - wait_start.elapsed() - ); - - self.mnlistdiff_retry_count += 1; - self.pending_mnlistdiff_requests = 0; - self.mnlistdiff_wait_start = None; - - // Restart by re-initiating the sync - // Get current chain tip for the retry - let tip_height = storage.get_tip_height().await.unwrap_or(0); - - let tip_header = storage - .get_header(tip_height) - .await - .map_err(|e| { - SyncError::Storage(format!("Failed to get tip header: {}", e)) - })? - .ok_or_else(|| { - SyncError::Storage("Tip header not found".to_string()) - })?; - let tip_hash = tip_header.block_hash(); - - let base_hash = if let Some(last_qrinfo_hash) = self.last_qrinfo_block_hash - { - last_qrinfo_hash - } else { - self.config.network.known_genesis_block_hash().ok_or_else(|| { - SyncError::InvalidState("Genesis hash not available".to_string()) - })? - }; - - // Re-send the QRInfo request - match self.request_qrinfo(network, base_hash, tip_hash).await { - Ok(()) => { - tracing::info!("🔄 QRInfo retry request sent successfully"); - } - Err(e) => { - tracing::error!("❌ Failed to send retry QRInfo request: {}", e); - self.error = Some(format!("Failed to retry QRInfo: {}", e)); - self.sync_in_progress = false; - } - } - } else { - // Already retried once - give up and force completion - tracing::error!( - "❌ Failed to receive {} MnListDiff responses after {:?} and {} retry attempt(s)", - self.pending_mnlistdiff_requests, - wait_start.elapsed(), - self.mnlistdiff_retry_count - ); - tracing::warn!( - "⚠️ Proceeding without complete masternode data - quorum validation may be incomplete" - ); - - // Force completion to unblock sync - self.pending_mnlistdiff_requests = 0; - self.mnlistdiff_wait_start = None; - self.sync_in_progress = false; - self.error = Some("MnListDiff requests timed out after retry".to_string()); - - // Still persist what we have - if let Some(tip_height) = storage.get_tip_height().await { - let state = crate::storage::MasternodeState { - last_height: tip_height, - engine_state: Vec::new(), - last_update: std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .map(|d| d.as_secs()) - .unwrap_or(0), - }; - if let Err(e) = storage.store_masternode_state(&state).await { - tracing::warn!("⚠️ Failed to store masternode state: {}", e); - } - } - } - } - } - } - - Ok(()) - } - - /// Get engine reference - pub fn engine(&self) -> Option<&MasternodeListEngine> { - self.engine.as_ref() - } - - /// Check if sync is in progress - pub fn is_syncing(&self) -> bool { - self.sync_in_progress - } - - /// Get last error - pub fn last_error(&self) -> Option<&str> { - self.error.as_deref() - } - - /// Handle incoming QRInfo message (unified processing with block height feeding) - pub async fn handle_qrinfo_message( - &mut self, - qr_info: QRInfo, - storage: &mut S, - network: &mut N, - ) { - self.log_qrinfo_details(&qr_info, "📋 Masternode sync processing QRInfo (unified path)"); - - // Feed block heights to engine before processing (critical for hash lookups) - if let Err(e) = self.feed_qrinfo_block_heights(&qr_info, storage).await { - tracing::error!("❌ Failed to feed QRInfo block heights: {}", e); - self.error = Some(e); - return; - } - - // Insert all masternode list diffs from QRInfo (dash-evo-tool pattern) - self.insert_mn_list_diff(&qr_info.mn_list_diff_tip, storage).await; - self.insert_mn_list_diff(&qr_info.mn_list_diff_h, storage).await; - self.insert_mn_list_diff(&qr_info.mn_list_diff_at_h_minus_c, storage).await; - self.insert_mn_list_diff(&qr_info.mn_list_diff_at_h_minus_2c, storage).await; - self.insert_mn_list_diff(&qr_info.mn_list_diff_at_h_minus_3c, storage).await; - - if let Some((_, mn_list_diff_at_h_minus_4c)) = - &qr_info.quorum_snapshot_and_mn_list_diff_at_h_minus_4c - { - self.insert_mn_list_diff(mn_list_diff_at_h_minus_4c, storage).await; - } - - for diff in &qr_info.mn_list_diff_list { - self.insert_mn_list_diff(diff, storage).await; - } - - // Process quorum snapshots (comprehensive processing) - self.process_quorum_snapshots(&qr_info); - - // Feed QRInfo to engine and get additional MnListDiffs needed for quorum validation - // This is the critical step that dash-evo-tool performs after initial QRInfo processing - if let Err(e) = self.feed_qrinfo_and_get_additional_diffs(&qr_info, storage, network).await - { - tracing::error!("❌ Failed to process QRInfo follow-up diffs: {}", e); - self.error = Some(e); - return; - } - - // Cache the QRInfo using the requested block hash as key - let block_hash = qr_info.mn_list_diff_h.block_hash; - self.qr_infos.insert(block_hash, qr_info); - - // Update last successful QRInfo block for progressive sync - self.last_qrinfo_block_hash = Some(block_hash); - - // Check if we need to wait for MnListDiff responses - if self.pending_mnlistdiff_requests == 0 { - // No additional requests were sent (edge case: no quorum validation needed) - // Mark sync as complete immediately - tracing::info!("✅ QRInfo processing completed with no additional requests, masternode sync phase is done"); - self.sync_in_progress = false; - self.last_sync_time = Some(Instant::now()); - self.mnlistdiff_wait_start = None; // Ensure wait timer is cleared - - // Persist masternode state so phase manager can detect completion - match storage.get_tip_height().await { - Some(tip_height) => { - let state = crate::storage::MasternodeState { - last_height: tip_height, - engine_state: Vec::new(), - last_update: std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .map(|d| d.as_secs()) - .unwrap_or(0), - }; - if let Err(e) = storage.store_masternode_state(&state).await { - tracing::warn!("⚠️ Failed to store masternode state: {}", e); - } - } - None => { - tracing::warn!( - "⚠️ Storage returned no tip height when persisting masternode state" - ); - } - } - } else { - tracing::info!( - "⏳ Waiting for {} pending MnListDiff responses before completing masternode sync", - self.pending_mnlistdiff_requests - ); - // Keep sync_in_progress = true so we don't transition to the next phase yet - // Completion and state persistence will happen in handle_mnlistdiff_message - } - - tracing::info!("✅ QRInfo processing completed successfully (unified path)"); - } - - /// Feed QRInfo to engine and fetch additional MnListDiffs for quorum validation - /// This implements the critical follow-up step from dash-evo-tool's feed_qr_info_and_get_dmls() - async fn feed_qrinfo_and_get_additional_diffs( - &mut self, - qr_info: &QRInfo, - storage: &mut S, - network: &mut N, - ) -> Result<(), String> { - tracing::info!( - "🔗 Feeding QRInfo to engine and getting additional diffs for quorum validation" - ); - - // Step 1: Feed QRInfo to masternode list engine with dynamic on-demand height callback - let (quorum_hashes, _rotating_quorum_hashes) = if let Some(engine) = &mut self.engine { - // Create dynamic callback that fetches heights on-demand from storage - let height_lookup = |block_hash: &BlockHash| -> Result< - u32, - dashcore::sml::quorum_validation_error::ClientDataRetrievalError, - > { - // Use block_in_place to bridge async storage call to sync callback - tokio::task::block_in_place(|| { - tokio::runtime::Handle::current().block_on(async { - storage.get_header_height_by_hash(block_hash) - .await - .map_err(|_| dashcore::sml::quorum_validation_error::ClientDataRetrievalError::RequiredBlockNotPresent(*block_hash))? - .ok_or(dashcore::sml::quorum_validation_error::ClientDataRetrievalError::RequiredBlockNotPresent(*block_hash)) - }) - }) - }; - - match engine.feed_qr_info(qr_info.clone(), true, true, Some(height_lookup)) { - Ok(()) => { - tracing::info!("✅ Successfully fed QRInfo to masternode list engine"); - } - Err(e) => { - let error_msg = format!("Failed to feed QRInfo to engine: {}", e); - tracing::error!("❌ {}", error_msg); - return Err(error_msg); - } - } - - // Get quorum hashes for validation - let quorum_hashes = - engine.latest_masternode_list_non_rotating_quorum_hashes(&[], false); - let rotating_quorum_hashes = engine.latest_masternode_list_rotating_quorum_hashes(&[]); - - tracing::info!( - "🏛️ Retrieved {} non-rotating quorum hashes for validation", - quorum_hashes.len() - ); - tracing::info!("🔄 Retrieved {} rotating quorum hashes", rotating_quorum_hashes.len()); - - (quorum_hashes, rotating_quorum_hashes) - } else { - return Err("Masternode engine not initialized".to_string()); - }; - - // Step 3: Fetch additional MnListDiffs for quorum validation (avoiding borrow conflicts) - if let Err(e) = self.fetch_diffs_with_hashes(&quorum_hashes, storage, network).await { - let error_msg = - format!("Failed to fetch additional diffs for quorum validation: {}", e); - tracing::error!("❌ {}", error_msg); - return Err(error_msg); - } - - // Step 4: Verify quorums - if let Some(engine) = &mut self.engine { - match engine.verify_non_rotating_masternode_list_quorums(0, &[]) { - Ok(()) => { - tracing::info!("✅ Non-rotating quorum verification completed successfully"); - } - Err(e) => { - tracing::warn!("⚠️ Non-rotating quorum verification failed: {}", e); - // Don't fail completely - this might be expected in some cases - } - } - } - - Ok(()) - } - - /// Fetch additional MnListDiffs for quorum validation (dash-evo-tool pattern) - /// This implements the fetch_diffs_with_hashes logic from dash-evo-tool - async fn fetch_diffs_with_hashes( - &mut self, - quorum_hashes: &std::collections::BTreeSet, - storage: &mut S, - network: &mut N, - ) -> Result<(), String> { - use dashcore::network::message::NetworkMessage; - use dashcore::network::message_sml::GetMnListDiff; - - tracing::info!( - "🔍 Fetching {} additional MnListDiffs for quorum validation", - quorum_hashes.len() - ); - - // Track how many requests we're about to send - let mut requests_sent = 0; - - for quorum_hash in quorum_hashes.iter() { - tracing::info!("🔍 Processing quorum hash: {}", quorum_hash); - - // Get the quorum hash as BlockHash for height lookup (QuorumHash and BlockHash are the same type) - let quorum_block_hash = *quorum_hash; - // Look up the height for this quorum hash - let quorum_height = match storage.get_header_height_by_hash(&quorum_block_hash).await { - Ok(Some(height)) => height, - Ok(None) => { - tracing::warn!( - "⚠️ Height not found for quorum hash {} in storage, skipping", - quorum_block_hash - ); - continue; - } - Err(e) => { - tracing::warn!( - "⚠️ Failed to get height for quorum hash {}: {}, skipping", - quorum_block_hash, - e - ); - continue; - } - }; - - // Calculate validation height (height - 8, following dash-evo-tool pattern) - let validation_height = if quorum_height >= 8 { - quorum_height - 8 - } else { - tracing::warn!( - "⚠️ Quorum height {} is too low for validation (< 8), using height 0", - quorum_height - ); - 0 - }; - - tracing::info!( - "📏 Quorum at height {}, validation height: {}", - quorum_height, - validation_height - ); - - // Use blockchain heights directly with storage API - let storage_validation_height = validation_height; - let storage_quorum_height = quorum_height; - - tracing::debug!("🔄 Height conversion: blockchain validation_height={} -> storage_height={}, blockchain quorum_height={} -> storage_height={}", - validation_height, storage_validation_height, quorum_height, storage_quorum_height); - - // Get base block hash (blockchain height) - let base_header = match storage.get_header(storage_validation_height).await { - Ok(Some(header)) => header, - Ok(None) => { - tracing::warn!( - "⚠️ Base header not found at storage height {} (blockchain height {}), skipping", - storage_validation_height, validation_height); - continue; - } - Err(e) => { - tracing::warn!( - "⚠️ Failed to get base header at storage height {} (blockchain height {}): {}, skipping", - storage_validation_height, validation_height, e); - continue; - } - }; - let base_block_hash = base_header.block_hash(); - - // Get target block hash (blockchain height) - let target_header = match storage.get_header(storage_quorum_height).await { - Ok(Some(header)) => header, - Ok(None) => { - tracing::warn!( - "⚠️ Target header not found at storage height {} (blockchain height {}), skipping", - storage_quorum_height, quorum_height); - continue; - } - Err(e) => { - tracing::warn!( - "⚠️ Failed to get target header at storage height {} (blockchain height {}): {}, skipping", - storage_quorum_height, quorum_height, e); - continue; - } - }; - let target_block_hash = target_header.block_hash(); - - // Create GetMnListDiff request - let get_mnlist_diff = GetMnListDiff { - base_block_hash, - block_hash: target_block_hash, - }; - let network_message = NetworkMessage::GetMnListD(get_mnlist_diff); - - // Send the request (this matches dash-evo-tool's pattern of sending individual requests) - tracing::info!("📤 Requesting MnListDiff: base_height={}, target_height={}, base_hash={}, target_hash={}", - validation_height, quorum_height, base_block_hash, target_block_hash); - - if let Err(e) = network.send_message(network_message).await { - tracing::error!( - "❌ Failed to send MnListDiff request for quorum hash {}: {}", - quorum_hash, - e - ); - // Continue with other quorums instead of failing completely - continue; - } - - // Track that we sent a request - requests_sent += 1; - - tracing::info!( - "✅ Sent MnListDiff request for quorum hash {} (base: {} -> target: {})", - quorum_hash, - validation_height, - quorum_height - ); - } - - // Update the pending request counter - self.pending_mnlistdiff_requests += requests_sent; - - // Start tracking wait time if we sent any requests - if requests_sent > 0 { - self.mnlistdiff_wait_start = Some(Instant::now()); - tracing::info!( - "📋 Completed sending {} MnListDiff requests for quorum validation (total pending: {}), started timeout tracking", - requests_sent, - self.pending_mnlistdiff_requests - ); - } else { - tracing::info!("📋 No MnListDiff requests sent (all quorums already have data)"); - } - - Ok(()) - } -} diff --git a/dash-spv/src/sync/legacy/masternodes/mod.rs b/dash-spv/src/sync/legacy/masternodes/mod.rs deleted file mode 100644 index faef8d903..000000000 --- a/dash-spv/src/sync/legacy/masternodes/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -//! Masternode synchronization and embedded data. - -pub mod embedded_data; -mod manager; - -pub use manager::MasternodeSyncManager; diff --git a/dash-spv/src/sync/legacy/message_handlers.rs b/dash-spv/src/sync/legacy/message_handlers.rs deleted file mode 100644 index d015dcb89..000000000 --- a/dash-spv/src/sync/legacy/message_handlers.rs +++ /dev/null @@ -1,707 +0,0 @@ -//! Message handlers for synchronization phases. - -use dashcore::bip158::BlockFilter; -use dashcore::block::Block; -use dashcore::network::message::NetworkMessage; -use dashcore::network::message_blockdata::Inventory; -use std::collections::HashMap; -use std::time::Instant; - -use super::manager::SyncManager; -use super::phases::SyncPhase; -use crate::error::{SyncError, SyncResult}; -use crate::network::{Message, NetworkManager}; -use crate::storage::StorageManager; -use crate::types::HashedBlock; -use key_wallet_manager::wallet_interface::WalletInterface; -use key_wallet_manager::wallet_manager::{check_compact_filters_for_addresses, FilterMatchKey}; - -impl SyncManager { - /// Handle incoming network messages with phase filtering - pub async fn handle_message( - &mut self, - message: &Message, - network: &mut N, - storage: &mut S, - ) -> SyncResult<()> { - // Special handling for blocks - they can arrive at any time due to filter matches - if let NetworkMessage::Block(block) = message.inner() { - // Always handle blocks when they arrive, regardless of phase - // This is important because we request blocks when filters match - tracing::info!( - "📦 Received block {} (current phase: {})", - block.block_hash(), - self.current_phase.name() - ); - - // If we're in the DownloadingBlocks phase, handle it there - return if matches!(self.current_phase, SyncPhase::DownloadingBlocks { .. }) { - self.handle_block_message(block, network, storage).await - } else if matches!(self.current_phase, SyncPhase::DownloadingMnList { .. }) { - // During masternode sync, blocks are not processed - tracing::debug!("Block received during MnList phase - ignoring"); - Ok(()) - } else { - // Otherwise, just track that we received it but don't process for phase transitions - // The block will be processed by the client's block processor - tracing::debug!("Block received outside of DownloadingBlocks phase - will be processed by block processor"); - Ok(()) - }; - } - - // Check if this message is expected in the current phase - if !self.is_message_expected_in_phase(message.inner()) { - tracing::debug!( - "Ignoring unexpected {:?} message in phase {}", - std::mem::discriminant(message.inner()), - self.current_phase.name() - ); - return Ok(()); - } - - // Route to appropriate handler based on current phase - match (&mut self.current_phase, message.inner()) { - ( - SyncPhase::DownloadingHeaders { - .. - }, - NetworkMessage::Headers(headers), - ) => { - self.handle_headers_message(headers, network, storage).await?; - } - ( - SyncPhase::DownloadingMnList { - .. - }, - NetworkMessage::MnListDiff(diff), - ) => { - self.handle_mnlistdiff_message(diff, network, storage).await?; - } - - ( - SyncPhase::DownloadingCFHeaders { - .. - }, - NetworkMessage::CFHeaders(cfheaders), - ) => { - tracing::debug!( - "📨 Received CFHeaders ({} headers) from {} (stop_hash={})", - cfheaders.filter_hashes.len(), - message.peer_address(), - cfheaders.stop_hash - ); - self.handle_cfheaders_message(cfheaders, network, storage).await?; - } - - ( - SyncPhase::DownloadingFilters { - .. - }, - NetworkMessage::CFilter(cfilter), - ) => { - self.handle_cfilter_message(cfilter, network, storage).await?; - } - - // Handle headers when fully synced (from new block announcements) - ( - SyncPhase::FullySynced { - .. - }, - NetworkMessage::Headers(headers), - ) => { - self.handle_new_headers(headers, network, storage).await?; - } - - // Handle filter headers when fully synced - ( - SyncPhase::FullySynced { - .. - }, - NetworkMessage::CFHeaders(cfheaders), - ) => { - self.handle_post_sync_cfheaders(cfheaders, network, storage).await?; - } - - // Handle filters when fully synced - ( - SyncPhase::FullySynced { - .. - }, - NetworkMessage::CFilter(cfilter), - ) => { - self.handle_post_sync_cfilter(cfilter, network, storage).await?; - } - - // Handle masternode diffs when fully synced (for ChainLock validation) - ( - SyncPhase::FullySynced { - .. - }, - NetworkMessage::MnListDiff(diff), - ) => { - self.handle_post_sync_mnlistdiff(diff, network, storage).await?; - } - - // Handle QRInfo in masternode downloading phase - ( - SyncPhase::DownloadingMnList { - .. - }, - NetworkMessage::QRInfo(qr_info), - ) => { - self.handle_qrinfo_message(qr_info, network, storage).await?; - } - - // Handle QRInfo when fully synced - ( - SyncPhase::FullySynced { - .. - }, - NetworkMessage::QRInfo(qr_info), - ) => { - self.handle_qrinfo_message(qr_info, network, storage).await?; - } - - _ => { - tracing::debug!("Message type not handled in current phase"); - } - } - - Ok(()) - } - - /// Check if a message is expected in the current phase - fn is_message_expected_in_phase(&self, message: &NetworkMessage) -> bool { - match (&self.current_phase, message) { - ( - SyncPhase::DownloadingHeaders { - .. - }, - NetworkMessage::Headers(_), - ) => true, - ( - SyncPhase::DownloadingMnList { - .. - }, - NetworkMessage::MnListDiff(_), - ) => true, - ( - SyncPhase::DownloadingMnList { - .. - }, - NetworkMessage::QRInfo(_), - ) => true, // Allow QRInfo during masternode sync - ( - SyncPhase::DownloadingMnList { - .. - }, - NetworkMessage::Block(_), - ) => true, // Allow blocks during masternode sync - ( - SyncPhase::DownloadingCFHeaders { - .. - }, - NetworkMessage::CFHeaders(_), - ) => true, - ( - SyncPhase::DownloadingFilters { - .. - }, - NetworkMessage::CFilter(_), - ) => true, - ( - SyncPhase::DownloadingBlocks { - .. - }, - NetworkMessage::Block(_), - ) => true, - // During FullySynced phase, we need to accept sync maintenance messages - ( - SyncPhase::FullySynced { - .. - }, - NetworkMessage::Headers(_), - ) => true, - ( - SyncPhase::FullySynced { - .. - }, - NetworkMessage::CFHeaders(_), - ) => true, - ( - SyncPhase::FullySynced { - .. - }, - NetworkMessage::CFilter(_), - ) => true, - ( - SyncPhase::FullySynced { - .. - }, - NetworkMessage::MnListDiff(_), - ) => true, - ( - SyncPhase::FullySynced { - .. - }, - NetworkMessage::QRInfo(_), - ) => true, // Allow QRInfo when fully synced - _ => false, - } - } - - pub(super) async fn handle_headers_message( - &mut self, - headers: &[dashcore::block::Header], - network: &mut N, - storage: &mut S, - ) -> SyncResult<()> { - let continue_sync = - self.header_sync.handle_headers_message(headers, storage, network).await?; - - self.finalize_headers_sync( - continue_sync, - headers.len() as u32, - network, - storage, - "Headers sync complete", - ) - .await - } - - /// Common logic for finalizing header sync after processing headers (regular or compressed). - /// Updates phase state, marks completion, and triggers phase transition if needed. - async fn finalize_headers_sync( - &mut self, - continue_sync: bool, - headers_count: u32, - network: &mut N, - storage: &mut S, - transition_reason: &str, - ) -> SyncResult<()> { - let blockchain_height = self.get_blockchain_height_from_storage(storage).await; - - let should_transition = if let SyncPhase::DownloadingHeaders { - current_height, - headers_downloaded, - start_time, - headers_per_second, - received_empty_response, - last_progress, - .. - } = &mut self.current_phase - { - *current_height = blockchain_height; - - *headers_downloaded += headers_count; - let elapsed = start_time.elapsed().as_secs_f64(); - if elapsed > 0.0 { - *headers_per_second = *headers_downloaded as f64 / elapsed; - } - - // Mark sync complete - this flag is checked by are_headers_complete() during transition - if !continue_sync { - *received_empty_response = true; - } - - *last_progress = Instant::now(); - - !continue_sync - } else { - false - }; - - if should_transition { - self.transition_to_next_phase(storage, network, transition_reason).await?; - self.execute_current_phase(network, storage).await?; - } - - Ok(()) - } - - pub(super) async fn handle_mnlistdiff_message( - &mut self, - diff: &dashcore::network::message_sml::MnListDiff, - network: &mut N, - storage: &mut S, - ) -> SyncResult<()> { - self.masternode_sync.handle_mnlistdiff_message(diff, storage, network).await?; - - // Update phase state - if let SyncPhase::DownloadingMnList { - current_height, - diffs_processed, - .. - } = &mut self.current_phase - { - // Update current height from storage - if let Ok(Some(state)) = storage.load_masternode_state().await { - *current_height = state.last_height; - } - - *diffs_processed += 1; - self.current_phase.update_progress(); - - // Check if phase is complete by verifying masternode sync is no longer in progress - // This ensures we wait for all pending MnListDiff requests to be received - if !self.masternode_sync.is_syncing() { - // Masternode sync has completed - ensure phase state reflects this - // by updating target_height to match current_height before transition - if let SyncPhase::DownloadingMnList { - current_height, - target_height, - .. - } = &mut self.current_phase - { - // Force completion state by ensuring current >= target - if *current_height < *target_height { - *target_height = *current_height; - } - } - - tracing::info!("✅ All MnListDiff requests completed, transitioning to next phase"); - self.transition_to_next_phase(storage, network, "Masternode sync complete").await?; - - // Execute the next phase - self.execute_current_phase(network, storage).await?; - } - } - - Ok(()) - } - - pub(super) async fn handle_qrinfo_message( - &mut self, - qr_info: &dashcore::network::message_qrinfo::QRInfo, - network: &mut N, - storage: &mut S, - ) -> SyncResult<()> { - tracing::info!("🔄 Sequential sync manager handling QRInfo message (unified processing)"); - - // Get sync base height for height conversion - let sync_base_height = self.header_sync.get_sync_base_height(); - tracing::debug!( - "Using sync_base_height={} for masternode validation height conversion", - sync_base_height - ); - - // Process QRInfo with full block height feeding and comprehensive processing - self.masternode_sync.handle_qrinfo_message(qr_info.clone(), storage, network).await; - - // Check if QRInfo processing completed successfully - if let Some(error) = self.masternode_sync.last_error() { - tracing::error!("❌ QRInfo processing failed: {}", error); - return Err(SyncError::Validation(error.to_string())); - } - - // Update phase state - if let SyncPhase::DownloadingMnList { - current_height, - diffs_processed, - .. - } = &mut self.current_phase - { - // Update current height from storage - if let Ok(Some(state)) = storage.load_masternode_state().await { - *current_height = state.last_height; - } - *diffs_processed += 1; - self.current_phase.update_progress(); - - // Check if masternode sync is complete (all pending MnListDiff requests received) - if !self.masternode_sync.is_syncing() { - tracing::info!("✅ QRInfo processing completed with all MnListDiff requests, masternode sync phase finished"); - - // Transition to next phase (filter headers) - self.transition_to_next_phase(storage, network, "QRInfo processing completed") - .await?; - - // Immediately execute the next phase so CFHeaders begins without delay - self.execute_current_phase(network, storage).await?; - } else { - tracing::info!( - "⏳ QRInfo processing completed, waiting for pending MnListDiff responses before transitioning" - ); - } - } - - Ok(()) - } - - pub(super) async fn handle_cfheaders_message( - &mut self, - cfheaders: &dashcore::network::message_filter::CFHeaders, - network: &mut N, - storage: &mut S, - ) -> SyncResult<()> { - let continue_sync = - self.filter_sync.handle_cfheaders_message(cfheaders.clone(), storage, network).await?; - - // Update phase state - if let SyncPhase::DownloadingCFHeaders { - current_height, - cfheaders_downloaded, - start_time, - cfheaders_per_second, - .. - } = &mut self.current_phase - { - // Update current height - if let Ok(Some(tip)) = storage.get_filter_tip_height().await { - *current_height = tip; - } - - // Update progress - *cfheaders_downloaded += cfheaders.filter_hashes.len() as u32; - let elapsed = start_time.elapsed().as_secs_f64(); - if elapsed > 0.0 { - *cfheaders_per_second = *cfheaders_downloaded as f64 / elapsed; - } - - self.current_phase.update_progress(); - - // Check if phase is complete - if !continue_sync { - self.transition_to_next_phase(storage, network, "Filter headers sync complete") - .await?; - - // Execute the next phase - self.execute_current_phase(network, storage).await?; - } - } - - Ok(()) - } - - pub(super) async fn handle_cfilter_message( - &mut self, - cfilter: &dashcore::network::message_filter::CFilter, - network: &mut N, - storage: &mut S, - ) -> SyncResult<()> { - tracing::debug!("📨 Received CFilter for block {}", cfilter.block_hash); - - // Check filter against wallet if available - // First, verify filter data matches expected filter header chain - let height = storage - .get_header_height_by_hash(&cfilter.block_hash) - .await - .map_err(|e| SyncError::Storage(format!("Failed to get filter block height: {}", e)))? - .ok_or_else(|| { - SyncError::Validation(format!( - "Block height not found for cfilter block {}", - cfilter.block_hash - )) - })?; - - let header_ok = self - .filter_sync - .verify_cfilter_against_headers(&cfilter.filter, height, &*storage) - .await?; - - if !header_ok { - tracing::warn!( - "Rejecting CFilter for block {} at height {} due to header mismatch", - cfilter.block_hash, - height - ); - return Ok(()); - } - - // Store the verified filter to disk - storage - .store_filter(height, &cfilter.filter) - .await - .map_err(|e| SyncError::Storage(format!("Failed to store filter: {}", e)))?; - - self.wallet.write().await.update_synced_height(height); - - let key = FilterMatchKey::new(height, cfilter.block_hash); - let input = HashMap::from([(key, BlockFilter::new(&cfilter.filter))]); - let addresses = self.wallet.read().await.monitored_addresses(); - let matches = check_compact_filters_for_addresses(&input, addresses); - - if !matches.is_empty() { - tracing::info!("🎯 Filter match found! Requesting block {}", cfilter.block_hash); - // Request the full block - let inv = Inventory::Block(cfilter.block_hash); - network - .send_message(NetworkMessage::GetData(vec![inv])) - .await - .map_err(|e| SyncError::Network(format!("Failed to request block: {}", e)))?; - } - - // Handle filter message tracking - self.filter_sync.mark_filter_received(cfilter.block_hash, storage).await?; - - // Send more filter requests from the queue if we have available slots - if self.filter_sync.has_pending_filter_requests() { - let available_slots = self.filter_sync.get_available_request_slots(); - if available_slots > 0 { - tracing::debug!( - "Sending more filter requests: {} slots available, {} pending", - available_slots, - self.filter_sync.pending_download_count() - ); - self.filter_sync.send_next_filter_batch(network).await?; - } else { - tracing::trace!( - "No available slots for more filter requests (all {} slots in use)", - self.filter_sync.active_request_count() - ); - } - } else { - tracing::trace!("No more pending filter requests in queue"); - } - - // Update phase state - if let SyncPhase::DownloadingFilters { - completed_heights, - batches_processed, - total_filters, - .. - } = &mut self.current_phase - { - // Mark this height as completed - if let Ok(Some(height)) = storage.get_header_height_by_hash(&cfilter.block_hash).await { - completed_heights.insert(height); - - // Log progress periodically - if completed_heights.len() % 100 == 0 - || completed_heights.len() == *total_filters as usize - { - tracing::info!( - "📊 Filter download progress: {}/{} filters received", - completed_heights.len(), - total_filters - ); - } - } - - *batches_processed += 1; - self.current_phase.update_progress(); - - // Check if all filters are downloaded - // We need to track actual completion, not just request status - if let SyncPhase::DownloadingFilters { - total_filters, - completed_heights, - .. - } = &self.current_phase - { - // We need to check: - // 1. All expected filters have been received (completed_heights matches total_filters) - // 2. No more active or pending requests - let has_pending = self.filter_sync.pending_download_count() > 0 - || self.filter_sync.active_request_count() > 0; - - let all_received = - *total_filters > 0 && completed_heights.len() >= *total_filters as usize; - - // Only transition when we've received all filters AND no requests are pending - if all_received && !has_pending { - tracing::info!( - "All {} filters received and processed", - completed_heights.len() - ); - self.transition_to_next_phase(storage, network, "All filters downloaded") - .await?; - - // Execute the next phase - self.execute_current_phase(network, storage).await?; - } else if *total_filters == 0 && !has_pending { - // Edge case: no filters to download - self.transition_to_next_phase(storage, network, "No filters to download") - .await?; - - // Execute the next phase - self.execute_current_phase(network, storage).await?; - } else { - tracing::trace!( - "Filter sync progress: {}/{} received, {} active requests", - completed_heights.len(), - total_filters, - self.filter_sync.active_request_count() - ); - } - } - } - - Ok(()) - } - - pub(super) async fn handle_block_message( - &mut self, - block: &Block, - network: &mut N, - storage: &mut S, - ) -> SyncResult<()> { - let block_hash = block.block_hash(); - - // Process the block through the wallet if available - let mut wallet = self.wallet.write().await; - - // Get the block height from storage - let block_height = storage - .get_header_height_by_hash(&block_hash) - .await - .map_err(|e| SyncError::Storage(format!("Failed to get block height: {}", e)))? - .unwrap_or(0); - - let result = wallet.process_block(block, block_height).await; - - storage - .store_block(block_height, HashedBlock::from(block)) - .await - .map_err(|e| SyncError::Storage(e.to_string()))?; - - drop(wallet); - - let total_relevant = result.relevant_tx_count(); - if total_relevant > 0 { - tracing::info!( - "Found {} relevant transactions ({} new, {} existing) in block {} at height {}", - total_relevant, - result.new_txids.len(), - result.existing_txids.len(), - block_hash, - block_height - ); - for txid in result.relevant_txids() { - tracing::debug!(" - Transaction: {}", txid); - } - } - - // Handle block download and check if we need to transition - let should_transition = if let SyncPhase::DownloadingBlocks { - downloading, - completed, - last_progress, - .. - } = &mut self.current_phase - { - // Remove from downloading - downloading.remove(&block_hash); - - // Add to completed - completed.push(block_hash); - - // Update progress time - *last_progress = Instant::now(); - - // Check if all blocks are downloaded - downloading.is_empty() && self.no_more_pending_blocks() - } else { - false - }; - - if should_transition { - self.transition_to_next_phase(storage, network, "All blocks downloaded").await?; - - // Execute the next phase (if any) - self.execute_current_phase(network, storage).await?; - } - - Ok(()) - } -} diff --git a/dash-spv/src/sync/legacy/mod.rs b/dash-spv/src/sync/legacy/mod.rs deleted file mode 100644 index dbacd3c57..000000000 --- a/dash-spv/src/sync/legacy/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -//! Legacy synchronization modules for the Dash SPV client. -//! -//! This module contains the original sync implementation with sequential -//! phase-based synchronization. - -// Submodules -pub mod filters; -pub mod headers; -pub mod masternodes; - -// Sequential sync pipeline modules -pub mod manager; -pub mod message_handlers; -pub mod phase_execution; -pub mod phases; -pub mod post_sync; -pub mod transitions; - -// Re-exports -pub use filters::FilterSyncManager; -pub use headers::{HeaderSyncManager, ReorgConfig}; -pub use manager::SyncManager; -pub use masternodes::MasternodeSyncManager; -pub use phases::{PhaseTransition, SyncPhase}; -pub use transitions::TransitionManager; diff --git a/dash-spv/src/sync/legacy/phase_execution.rs b/dash-spv/src/sync/legacy/phase_execution.rs deleted file mode 100644 index 5922e43ca..000000000 --- a/dash-spv/src/sync/legacy/phase_execution.rs +++ /dev/null @@ -1,486 +0,0 @@ -//! Phase execution, transitions, timeout handling, and recovery logic. - -use std::time::Instant; - -use crate::error::{SyncError, SyncResult}; -use crate::network::NetworkManager; -use crate::storage::StorageManager; -use key_wallet_manager::wallet_interface::WalletInterface; - -use super::manager::SyncManager; -use super::phases::SyncPhase; - -impl SyncManager { - /// Execute the current sync phase - pub(super) async fn execute_current_phase( - &mut self, - network: &mut N, - storage: &mut S, - ) -> SyncResult<()> { - match &self.current_phase { - SyncPhase::DownloadingHeaders { - .. - } => { - tracing::info!("📥 Starting header download phase"); - // Don't call start_sync if already prepared - just send the request - if self.header_sync.is_syncing() { - // Already prepared, just send the initial request - let base_hash = self.get_base_hash_from_storage(storage).await?; - - self.header_sync.request_headers(network, base_hash, storage).await?; - } else { - // Not prepared yet, start sync normally - self.header_sync.start_sync(network, storage).await?; - } - } - - SyncPhase::DownloadingMnList { - .. - } => { - tracing::info!("📥 Starting masternode list download phase"); - - // Start masternode sync (unified processing) - match self.masternode_sync.start_sync(network, storage).await { - Ok(_) => { - tracing::info!("🚀 Masternode sync initiated successfully, will complete when QRInfo arrives"); - } - Err(e) => { - tracing::error!("❌ Failed to start masternode sync: {}", e); - return Err(e); - } - } - } - - SyncPhase::DownloadingCFHeaders { - .. - } => { - tracing::info!("📥 Starting filter header download phase"); - - // Get sync base height from header sync - let sync_base_height = self.header_sync.get_sync_base_height(); - if sync_base_height > 0 { - tracing::info!( - "Setting filter sync base height to {} for checkpoint sync", - sync_base_height - ); - self.filter_sync.set_sync_base_height(sync_base_height); - } - - let sync_started = - self.filter_sync.start_sync_filter_headers(network, storage).await?; - - if !sync_started { - // No peers support compact filters or already up to date - tracing::info!("Filter header sync not started (no peers support filters or already synced)"); - // Transition to next phase immediately - self.transition_to_next_phase( - storage, - network, - "Filter sync skipped - no peer support", - ) - .await?; - // Return early to let the main sync loop execute the next phase - return Ok(()); - } - } - - SyncPhase::DownloadingFilters { - .. - } => { - tracing::info!("📥 Starting filter download phase"); - - // Get the range of filters to download - // Note: get_filter_tip_height() now returns absolute blockchain height - let filter_header_tip = storage - .get_filter_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get filter tip: {}", e)))? - .unwrap_or(0); - - if filter_header_tip > 0 { - // Download all filters for complete blockchain history - // This ensures the wallet can find transactions from any point in history - let start_height = self.header_sync.get_sync_base_height().max(1); - let count = filter_header_tip - start_height + 1; - - tracing::info!( - "Starting filter download from height {} to {} ({} filters)", - start_height, - filter_header_tip, - count - ); - - // Update the phase to track the expected total - if let SyncPhase::DownloadingFilters { - total_filters, - .. - } = &mut self.current_phase - { - *total_filters = count; - } - - // Use the filter sync manager to download filters - self.filter_sync - .sync_filters(network, storage, Some(start_height), Some(count)) - .await?; - } else { - // No filter headers available, skip to next phase - self.transition_to_next_phase(storage, network, "No filter headers available") - .await?; - } - } - - SyncPhase::DownloadingBlocks { - .. - } => { - tracing::info!("📥 Starting block download phase"); - // Block download will be initiated based on filter matches - // For now, we'll complete the sync - self.transition_to_next_phase(storage, network, "No blocks to download").await?; - } - - SyncPhase::Idle - | SyncPhase::FullySynced { - .. - } => { - // Nothing to execute - } - } - - Ok(()) - } - - /// Transition to the next phase - pub(super) async fn transition_to_next_phase( - &mut self, - storage: &mut S, - network: &N, - reason: &str, - ) -> SyncResult<()> { - // Get the next phase - let next_phase = - self.transition_manager.get_next_phase(&self.current_phase, storage, network).await?; - - if let Some(next) = next_phase { - // Check if transition is allowed - if !self - .transition_manager - .can_transition_to(&self.current_phase, &next, storage) - .await? - { - return Err(SyncError::Validation(format!( - "Invalid phase transition from {} to {}", - self.current_phase.name(), - next.name() - ))); - } - - // Create transition record - let transition = self.transition_manager.create_transition( - &self.current_phase, - &next, - reason.to_string(), - ); - - tracing::info!( - "🔄 Phase transition: {} → {} (reason: {})", - transition.from_phase, - transition.to_phase, - transition.reason - ); - - // Log final progress of the phase - if let Some(ref progress) = transition.final_progress { - tracing::info!( - "📊 Phase {} completed: {} items in {:?} ({:.1} items/sec)", - transition.from_phase, - progress.items_completed, - progress.elapsed, - progress.rate - ); - } - - self.phase_history.push(transition); - self.current_phase = next; - self.current_phase_retries = 0; - - // Start the next phase - // Note: We can't execute the next phase here as we don't have network access - // The caller will need to execute the next phase - } else { - tracing::info!("✅ Sequential sync complete!"); - - // Calculate total sync stats - if let Some(start_time) = self.sync_start_time { - let total_time = start_time.elapsed(); - let headers_synced = self.calculate_total_headers_synced(); - let filters_synced = self.calculate_total_filters_synced(); - let blocks_downloaded = self.calculate_total_blocks_downloaded(); - - self.current_phase = SyncPhase::FullySynced { - sync_completed_at: Instant::now(), - total_sync_time: total_time, - headers_synced, - filters_synced, - blocks_downloaded, - }; - - tracing::info!( - "🎉 Sync completed in {:?} - {} headers, {} filters, {} blocks", - total_time, - headers_synced, - filters_synced, - blocks_downloaded - ); - } - } - - Ok(()) - } - - /// Check for timeouts and handle recovery - pub async fn check_timeout(&mut self, network: &mut N, storage: &mut S) -> SyncResult<()> { - // First check if the current phase needs to be executed (e.g., after a transition) - if self.current_phase_needs_execution() { - tracing::info!("Executing phase {} after transition", self.current_phase.name()); - self.execute_current_phase(network, storage).await?; - return Ok(()); - } - - if let Some(last_progress) = self.current_phase.last_progress_time() { - if last_progress.elapsed() > self.phase_timeout { - tracing::warn!( - "⏰ Phase {} timed out after {:?}", - self.current_phase.name(), - self.phase_timeout - ); - - // Attempt recovery - self.recover_from_timeout(network, storage).await?; - } - } - - // Also check phase-specific timeouts - match &self.current_phase { - SyncPhase::DownloadingHeaders { - .. - } => { - self.header_sync.check_sync_timeout(storage, network).await?; - } - SyncPhase::DownloadingCFHeaders { - .. - } => { - self.filter_sync.check_cfheader_request_timeouts(network, storage).await?; - } - SyncPhase::DownloadingMnList { - .. - } => { - self.masternode_sync.check_sync_timeout(storage, network).await?; - - // After checking timeout, see if sync completed (either normally or via timeout) - if !self.masternode_sync.is_syncing() { - tracing::info!("Masternode sync completed (detected in timeout check), transitioning to next phase"); - self.transition_to_next_phase(storage, network, "Masternode sync complete") - .await?; - self.execute_current_phase(network, storage).await?; - } - } - SyncPhase::DownloadingFilters { - .. - } => { - // Always check for timed out filter requests, not just during phase timeout - self.filter_sync.check_filter_request_timeouts(network, storage).await?; - - // For filter downloads, we need custom timeout handling - // since the filter sync manager's timeout is for filter headers - if let Some(last_progress) = self.current_phase.last_progress_time() { - if last_progress.elapsed() > self.phase_timeout { - tracing::warn!( - "⏰ Filter download phase timed out after {:?}", - self.phase_timeout - ); - - // Check if we have any active requests - let active_count = self.filter_sync.active_request_count(); - let pending_count = self.filter_sync.pending_download_count(); - - tracing::warn!( - "Filter sync status: {} active requests, {} pending", - active_count, - pending_count - ); - - // First check for timed out filter requests - self.filter_sync.check_filter_request_timeouts(network, storage).await?; - - // Try to recover by sending more requests if we have pending ones - if self.filter_sync.has_pending_filter_requests() && active_count < 10 { - tracing::info!("Attempting to recover by sending more filter requests"); - self.filter_sync.send_next_filter_batch(network).await?; - self.current_phase.update_progress(); - } else if active_count == 0 - && !self.filter_sync.has_pending_filter_requests() - { - // No active requests and no pending - we're stuck - tracing::error!( - "Filter sync stalled with no active or pending requests" - ); - - // Check if we received some filters but not all - let received_count = self.filter_sync.get_received_filter_count(); - if let SyncPhase::DownloadingFilters { - total_filters, - .. - } = &self.current_phase - { - if received_count > 0 && received_count < *total_filters { - tracing::warn!( - "Filter sync stalled at {}/{} filters - attempting recovery", - received_count, total_filters - ); - - // Retry the entire filter sync phase - self.current_phase_retries += 1; - if self.current_phase_retries <= self.max_phase_retries { - tracing::info!( - "🔄 Retrying filter sync (attempt {}/{})", - self.current_phase_retries, - self.max_phase_retries - ); - - // Clear the filter sync state and restart - self.filter_sync.reset(); - self.filter_sync.set_syncing_filters(false); // Allow restart - - // Update progress to prevent immediate timeout - self.current_phase.update_progress(); - - // Re-execute the phase - self.execute_current_phase(network, storage).await?; - return Ok(()); - } else { - tracing::error!( - "Filter sync failed after {} retries, forcing completion", - self.max_phase_retries - ); - } - } - } - - // Force transition to next phase to avoid permanent stall - self.transition_to_next_phase( - storage, - network, - "Filter sync timeout - forcing completion", - ) - .await?; - self.execute_current_phase(network, storage).await?; - } - } - } - } - SyncPhase::Idle - | SyncPhase::FullySynced { - .. - } - | SyncPhase::DownloadingBlocks { - .. - } => { - // Nothing to execute - } - } - - Ok(()) - } - - /// Recover from a timeout - async fn recover_from_timeout(&mut self, network: &mut N, storage: &mut S) -> SyncResult<()> { - self.current_phase_retries += 1; - - if self.current_phase_retries > self.max_phase_retries { - return Err(SyncError::Timeout(format!( - "Phase {} failed after {} retries", - self.current_phase.name(), - self.max_phase_retries - ))); - } - - tracing::warn!( - "🔄 Retrying phase {} (attempt {}/{})", - self.current_phase.name(), - self.current_phase_retries, - self.max_phase_retries - ); - - // Update progress time to prevent immediate re-timeout - self.current_phase.update_progress(); - - // Execute phase-specific recovery - match &self.current_phase { - SyncPhase::DownloadingHeaders { - .. - } => { - self.header_sync.check_sync_timeout(storage, network).await?; - } - SyncPhase::DownloadingMnList { - .. - } => { - self.masternode_sync.check_sync_timeout(storage, network).await?; - } - SyncPhase::DownloadingCFHeaders { - .. - } => { - self.filter_sync.check_cfheader_request_timeouts(network, storage).await?; - } - SyncPhase::Idle - | SyncPhase::DownloadingFilters { - .. - } - | SyncPhase::DownloadingBlocks { - .. - } - | SyncPhase::FullySynced { - .. - } => { - // For other phases, we'll need phase-specific recovery - } - } - - Ok(()) - } - - // Helper methods for calculating totals - - pub(super) fn calculate_total_headers_synced(&self) -> u32 { - self.phase_history - .iter() - .find(|t| t.from_phase == "Downloading Headers") - .and_then(|t| t.final_progress.as_ref()) - .map(|p| p.items_completed) - .unwrap_or(0) - } - - pub(super) fn calculate_total_filters_synced(&self) -> u32 { - self.phase_history - .iter() - .find(|t| t.from_phase == "Downloading Filters") - .and_then(|t| t.final_progress.as_ref()) - .map(|p| p.items_completed) - .unwrap_or(0) - } - - pub(super) fn calculate_total_blocks_downloaded(&self) -> u32 { - self.phase_history - .iter() - .find(|t| t.from_phase == "Downloading Blocks") - .and_then(|t| t.final_progress.as_ref()) - .map(|p| p.items_completed) - .unwrap_or(0) - } - - pub(super) fn no_more_pending_blocks(&self) -> bool { - // This would check if there are more blocks to download - // For now, return true - true - } -} diff --git a/dash-spv/src/sync/legacy/phases.rs b/dash-spv/src/sync/legacy/phases.rs deleted file mode 100644 index 19f329de5..000000000 --- a/dash-spv/src/sync/legacy/phases.rs +++ /dev/null @@ -1,459 +0,0 @@ -//! Phase definitions for sequential sync - -use std::collections::{HashMap, HashSet}; -use std::time::{Duration, Instant}; - -use dashcore::BlockHash; - -/// Represents the current synchronization phase -#[derive(Debug, Clone, PartialEq)] -pub enum SyncPhase { - /// Not currently syncing - Idle, - - /// Phase 1: Downloading block headers - DownloadingHeaders { - /// When this phase started - start_time: Instant, - /// Height when sync started - start_height: u32, - /// Current synchronized height - current_height: u32, - /// Target height (if known from peer announcements) - target_height: Option, - /// Last time we made progress - last_progress: Instant, - /// Headers downloaded in this phase - headers_downloaded: u32, - /// Average headers per second - headers_per_second: f64, - /// Whether we've received an empty headers response (indicating completion) - received_empty_response: bool, - }, - - /// Phase 2: Downloading masternode lists - DownloadingMnList { - /// When this phase started - start_time: Instant, - /// Starting height for masternode sync - start_height: u32, - /// Current masternode list height - current_height: u32, - /// Target height (should match header tip) - target_height: u32, - /// Last time we made progress - last_progress: Instant, - /// Number of masternode list diffs processed - diffs_processed: u32, - /// Total requests (QRInfo + MnListDiff) - requests_total: u32, - /// Completed requests - requests_completed: u32, - }, - - /// Phase 3: Downloading compact filter headers - DownloadingCFHeaders { - /// When this phase started - start_time: Instant, - /// Starting height - start_height: u32, - /// Current filter header height - current_height: u32, - /// Target height (should match header tip) - target_height: u32, - /// Last time we made progress - last_progress: Instant, - /// Filter headers downloaded in this phase - cfheaders_downloaded: u32, - /// Average filter headers per second - cfheaders_per_second: f64, - }, - - /// Phase 4: Downloading compact filters - DownloadingFilters { - /// When this phase started - start_time: Instant, - /// Filter ranges that have been requested: (start, end) -> request time - requested_ranges: HashMap<(u32, u32), Instant>, - /// Heights for which filters have been downloaded - completed_heights: HashSet, - /// Total number of filters to download - total_filters: u32, - /// Last time we made progress - last_progress: Instant, - /// Number of filter batches processed - batches_processed: u32, - }, - - /// Phase 5: Downloading full blocks - DownloadingBlocks { - /// When this phase started - start_time: Instant, - /// Blocks pending download: (hash, height) - pending_blocks: Vec<(BlockHash, u32)>, - /// Currently downloading blocks: hash -> request time - downloading: HashMap, - /// Successfully downloaded blocks - completed: Vec, - /// Last time we made progress - last_progress: Instant, - /// Total blocks to download - total_blocks: usize, - }, - - /// Fully synchronized with the network - FullySynced { - /// When sync completed - sync_completed_at: Instant, - /// Total time taken to sync - total_sync_time: Duration, - /// Number of headers synced - headers_synced: u32, - /// Number of filters synced - filters_synced: u32, - /// Number of blocks downloaded - blocks_downloaded: u32, - }, -} - -impl SyncPhase { - /// Get a human-readable name for the phase - pub fn name(&self) -> &'static str { - match self { - SyncPhase::Idle => "Idle", - SyncPhase::DownloadingHeaders { - .. - } => "Downloading Headers", - SyncPhase::DownloadingMnList { - .. - } => "Downloading Masternode Lists", - SyncPhase::DownloadingCFHeaders { - .. - } => "Downloading Filter Headers", - SyncPhase::DownloadingFilters { - .. - } => "Downloading Filters", - SyncPhase::DownloadingBlocks { - .. - } => "Downloading Blocks", - SyncPhase::FullySynced { - .. - } => "Fully Synced", - } - } - - /// Check if this phase is actively syncing - pub fn is_syncing(&self) -> bool { - !matches!(self, SyncPhase::Idle | SyncPhase::FullySynced { .. }) - } - - /// Get the last progress time for timeout detection - pub fn last_progress_time(&self) -> Option { - match self { - SyncPhase::DownloadingHeaders { - last_progress, - .. - } => Some(*last_progress), - SyncPhase::DownloadingMnList { - last_progress, - .. - } => Some(*last_progress), - SyncPhase::DownloadingCFHeaders { - last_progress, - .. - } => Some(*last_progress), - SyncPhase::DownloadingFilters { - last_progress, - .. - } => Some(*last_progress), - SyncPhase::DownloadingBlocks { - last_progress, - .. - } => Some(*last_progress), - SyncPhase::Idle - | SyncPhase::FullySynced { - .. - } => None, - } - } - - /// Update the last progress time - pub fn update_progress(&mut self) { - let now = Instant::now(); - match self { - SyncPhase::DownloadingHeaders { - last_progress, - .. - } => *last_progress = now, - SyncPhase::DownloadingMnList { - last_progress, - .. - } => *last_progress = now, - SyncPhase::DownloadingCFHeaders { - last_progress, - .. - } => *last_progress = now, - SyncPhase::DownloadingFilters { - last_progress, - .. - } => *last_progress = now, - SyncPhase::DownloadingBlocks { - last_progress, - .. - } => *last_progress = now, - SyncPhase::Idle - | SyncPhase::FullySynced { - .. - } => {} - } - } -} - -/// Progress information for a sync phase -#[derive(Debug, Clone)] -pub struct PhaseProgress { - /// Name of the phase - pub phase_name: &'static str, - /// Number of items completed - pub items_completed: u32, - /// Total items expected (if known) - pub items_total: Option, - /// Completion percentage (0-100) - pub percentage: f64, - /// Processing rate (items per second) - pub rate: f64, - /// Estimated time remaining - pub eta: Option, - /// Time elapsed in this phase - pub elapsed: Duration, -} - -impl SyncPhase { - /// Calculate progress for the current phase - pub fn progress(&self) -> PhaseProgress { - match self { - SyncPhase::DownloadingHeaders { - start_height, - current_height, - target_height, - headers_per_second, - start_time, - .. - } => { - let items_completed = current_height.saturating_sub(*start_height); - let items_total = target_height.map(|t| t.saturating_sub(*start_height)); - let percentage = if let Some(total) = items_total { - if total > 0 { - (items_completed as f64 / total as f64) * 100.0 - } else { - 100.0 - } - } else { - 0.0 - }; - - let eta = if *headers_per_second > 0.0 { - items_total.map(|total| { - let remaining = total.saturating_sub(items_completed); - Duration::from_secs_f64(remaining as f64 / headers_per_second) - }) - } else { - None - }; - - PhaseProgress { - phase_name: self.name(), - items_completed, - items_total, - percentage, - rate: *headers_per_second, - eta, - elapsed: start_time.elapsed(), - } - } - - SyncPhase::DownloadingMnList { - requests_completed, - requests_total, - start_time, - current_height, - start_height, - target_height, - .. - } => { - let percentage = if *requests_total > 0 { - (*requests_completed as f64 / *requests_total as f64) * 100.0 - } else if *target_height > *start_height { - let height_progress = current_height.saturating_sub(*start_height) as f64; - let height_total = target_height.saturating_sub(*start_height) as f64; - (height_progress / height_total) * 100.0 - } else { - 0.0 - }; - - let elapsed = start_time.elapsed(); - let rate = if elapsed.as_secs() > 0 && *requests_completed > 0 { - *requests_completed as f64 / elapsed.as_secs() as f64 - } else { - 0.0 - }; - - let eta = if rate > 0.0 && *requests_completed < *requests_total { - let remaining = requests_total.saturating_sub(*requests_completed); - Some(Duration::from_secs((remaining as f64 / rate) as u64)) - } else { - None - }; - - PhaseProgress { - phase_name: self.name(), - items_completed: *requests_completed, - items_total: Some(*requests_total), - percentage, - rate, - eta, - elapsed, - } - } - - SyncPhase::DownloadingCFHeaders { - start_height, - current_height, - target_height, - cfheaders_per_second, - start_time, - .. - } => { - let items_completed = current_height.saturating_sub(*start_height); - let items_total = target_height.saturating_sub(*start_height); - let percentage = if items_total > 0 { - (items_completed as f64 / items_total as f64) * 100.0 - } else { - 100.0 - }; - - let eta = if *cfheaders_per_second > 0.0 { - let remaining = items_total.saturating_sub(items_completed); - Some(Duration::from_secs_f64(remaining as f64 / cfheaders_per_second)) - } else { - None - }; - - PhaseProgress { - phase_name: self.name(), - items_completed, - items_total: Some(items_total), - percentage, - rate: *cfheaders_per_second, - eta, - elapsed: start_time.elapsed(), - } - } - - SyncPhase::DownloadingFilters { - completed_heights, - total_filters, - start_time, - .. - } => { - let items_completed = completed_heights.len() as u32; - let percentage = if *total_filters > 0 { - (items_completed as f64 / *total_filters as f64) * 100.0 - } else { - 0.0 - }; - - let elapsed = start_time.elapsed(); - let rate = if elapsed.as_secs() > 0 { - items_completed as f64 / elapsed.as_secs_f64() - } else { - 0.0 - }; - - let eta = if rate > 0.0 { - let remaining = total_filters.saturating_sub(items_completed); - Some(Duration::from_secs_f64(remaining as f64 / rate)) - } else { - None - }; - - PhaseProgress { - phase_name: self.name(), - items_completed, - items_total: Some(*total_filters), - percentage, - rate, - eta, - elapsed, - } - } - - SyncPhase::DownloadingBlocks { - completed, - total_blocks, - start_time, - .. - } => { - let items_completed = completed.len() as u32; - let items_total = *total_blocks as u32; - let percentage = if items_total > 0 { - (items_completed as f64 / items_total as f64) * 100.0 - } else { - 100.0 - }; - - let elapsed = start_time.elapsed(); - let rate = if elapsed.as_secs() > 0 { - items_completed as f64 / elapsed.as_secs_f64() - } else { - 0.0 - }; - - let eta = if rate > 0.0 { - let remaining = items_total.saturating_sub(items_completed); - Some(Duration::from_secs_f64(remaining as f64 / rate)) - } else { - None - }; - - PhaseProgress { - phase_name: self.name(), - items_completed, - items_total: Some(items_total), - percentage, - rate, - eta, - elapsed, - } - } - - SyncPhase::Idle - | SyncPhase::FullySynced { - .. - } => PhaseProgress { - phase_name: self.name(), - items_completed: 0, - items_total: None, - percentage: 0.0, - rate: 0.0, - eta: None, - elapsed: Duration::from_secs(0), - }, - } - } -} - -/// Represents a phase transition in the sync process -#[derive(Debug, Clone)] -pub struct PhaseTransition { - /// The phase we're transitioning from - pub from_phase: String, - /// The phase we're transitioning to - pub to_phase: String, - /// When the transition occurred - pub timestamp: Instant, - /// Reason for the transition - pub reason: String, - /// Progress info at transition time - pub final_progress: Option, -} diff --git a/dash-spv/src/sync/legacy/post_sync.rs b/dash-spv/src/sync/legacy/post_sync.rs deleted file mode 100644 index e18ae7ef3..000000000 --- a/dash-spv/src/sync/legacy/post_sync.rs +++ /dev/null @@ -1,524 +0,0 @@ -//! Post-sync message handlers (messages that arrive after initial sync is complete). - -use dashcore::block::Header as BlockHeader; -use dashcore::network::message::NetworkMessage; -use dashcore::network::message_blockdata::Inventory; -use dashcore::BlockHash; - -use crate::error::{SyncError, SyncResult}; -use crate::network::NetworkManager; -use crate::storage::StorageManager; -use key_wallet_manager::wallet_interface::WalletInterface; - -use super::manager::{SyncManager, CHAINLOCK_VALIDATION_MASTERNODE_OFFSET}; -use super::phases::SyncPhase; - -impl SyncManager { - /// Handle inventory messages for sequential sync - pub async fn handle_inventory( - &mut self, - inv: Vec, - network: &mut N, - storage: &mut S, - ) -> SyncResult<()> { - // Only process inventory when we're fully synced - if !matches!(self.current_phase, SyncPhase::FullySynced { .. }) { - tracing::debug!("Ignoring inventory during sync phase: {}", self.current_phase.name()); - return Ok(()); - } - - // Process inventory items - for inv_item in inv { - match inv_item { - Inventory::Block(block_hash) => { - tracing::info!("📨 New block announced: {}", block_hash); - - // Get our current tip to use as locator - use the helper method - let base_hash = self.get_base_hash_from_storage(storage).await?; - - // Build locator hashes based on base hash - let locator_hashes = match base_hash { - Some(hash) => { - tracing::info!("📍 Using tip hash as locator: {}", hash); - vec![hash] - } - None => { - // No headers found - this should only happen on initial sync - tracing::info!("📍 No headers found in storage, using empty locator for initial sync"); - Vec::new() - } - }; - - // Request headers starting from our tip - // Use the same protocol version as during initial sync - let get_headers = NetworkMessage::GetHeaders( - dashcore::network::message_blockdata::GetHeadersMessage { - version: dashcore::network::constants::PROTOCOL_VERSION, - locator_hashes, - stop_hash: BlockHash::from_raw_hash(dashcore::hashes::Hash::all_zeros()), - }, - ); - - tracing::info!( - "📤 Sending GetHeaders with protocol version {}", - dashcore::network::constants::PROTOCOL_VERSION - ); - network.send_message(get_headers).await.map_err(|e| { - SyncError::Network(format!("Failed to request headers: {}", e)) - })?; - - // After we receive the header, we'll need to: - // 1. Request filter headers - // 2. Request the filter - // 3. Check if it matches - // 4. Request the block if it matches - } - - Inventory::ChainLock(chainlock_hash) => { - tracing::info!("🔒 ChainLock announced: {}", chainlock_hash); - // Request the ChainLock - let get_data = - NetworkMessage::GetData(vec![Inventory::ChainLock(chainlock_hash)]); - network.send_message(get_data).await.map_err(|e| { - SyncError::Network(format!("Failed to request chainlock: {}", e)) - })?; - - // ChainLocks can help us detect if we're behind - // The ChainLock handler will check if we need to catch up - } - - Inventory::InstantSendLock(islock_hash) => { - tracing::info!("⚡ InstantSend lock announced: {}", islock_hash); - // Request the InstantSend lock - let get_data = - NetworkMessage::GetData(vec![Inventory::InstantSendLock(islock_hash)]); - network.send_message(get_data).await.map_err(|e| { - SyncError::Network(format!("Failed to request islock: {}", e)) - })?; - } - - Inventory::Transaction(txid) => { - // We don't track individual transactions in SPV mode - tracing::debug!("Transaction announced: {} (ignored)", txid); - } - - _ => { - tracing::debug!("Unhandled inventory type: {:?}", inv_item); - } - } - } - - Ok(()) - } - - /// Handle new headers that arrive after initial sync (from inventory) - pub async fn handle_new_headers( - &mut self, - headers: &[BlockHeader], - network: &mut N, - storage: &mut S, - ) -> SyncResult<()> { - // Only process new headers when we're fully synced - if !matches!(self.current_phase, SyncPhase::FullySynced { .. }) { - tracing::debug!( - "Ignoring headers - not in FullySynced phase (current: {})", - self.current_phase.name() - ); - return Ok(()); - } - - if headers.is_empty() { - tracing::debug!("No new headers to process"); - // Check if we might be behind based on ChainLocks we've seen - // This is handled elsewhere, so just return for now - return Ok(()); - } - - tracing::info!("📥 Processing {} new headers after sync", headers.len()); - tracing::info!( - "🔗 First header: {} Last header: {}", - headers.first().map(|h| h.block_hash().to_string()).unwrap_or_default(), - headers.last().map(|h| h.block_hash().to_string()).unwrap_or_default() - ); - - // Store the new headers - storage - .store_headers(headers) - .await - .map_err(|e| SyncError::Storage(format!("Failed to store headers: {}", e)))?; - - // First, check if we need to catch up on masternode lists for ChainLock validation - if self.config.enable_masternodes && !headers.is_empty() { - // Get the current masternode state to check for gaps - let mn_state = storage.load_masternode_state().await.map_err(|e| { - SyncError::Storage(format!("Failed to load masternode state: {}", e)) - })?; - - if let Some(state) = mn_state { - // Get the height of the first new header - let first_height = storage - .get_header_height_by_hash(&headers[0].block_hash()) - .await - .map_err(|e| SyncError::Storage(format!("Failed to get block height: {}", e)))? - .ok_or(SyncError::InvalidState("Failed to get block height".to_string()))?; - - // Check if we have a gap (masternode lists are more than 1 block behind) - if state.last_height + 1 < first_height { - let gap_size = first_height - state.last_height - 1; - tracing::warn!( - "⚠️ Detected gap in masternode lists: last height {} vs new block {}, gap of {} blocks", - state.last_height, - first_height, - gap_size - ); - - // Request catch-up masternode diff for the gap - // We need to ensure we have lists for at least the last 8 blocks for ChainLock validation - let catch_up_start = state.last_height; - let catch_up_end = first_height.saturating_sub(1); - - if catch_up_end > catch_up_start { - let base_hash = storage - .get_header(catch_up_start) - .await - .map_err(|e| { - SyncError::Storage(format!( - "Failed to get catch-up base block: {}", - e - )) - })? - .map(|h| h.block_hash()) - .ok_or(SyncError::InvalidState( - "Catch-up base block not found".to_string(), - ))?; - - let stop_hash = storage - .get_header(catch_up_end) - .await - .map_err(|e| { - SyncError::Storage(format!( - "Failed to get catch-up stop block: {}", - e - )) - })? - .map(|h| h.block_hash()) - .ok_or(SyncError::InvalidState( - "Catch-up stop block not found".to_string(), - ))?; - - tracing::info!( - "📋 Requesting catch-up masternode diff from height {} to {} to fill gap", - catch_up_start, - catch_up_end - ); - - let catch_up_request = NetworkMessage::GetMnListD( - dashcore::network::message_sml::GetMnListDiff { - base_block_hash: base_hash, - block_hash: stop_hash, - }, - ); - - network.send_message(catch_up_request).await.map_err(|e| { - SyncError::Network(format!( - "Failed to request catch-up masternode diff: {}", - e - )) - })?; - } - } - } - } - - for header in headers { - let height = storage - .get_header_height_by_hash(&header.block_hash()) - .await - .map_err(|e| SyncError::Storage(format!("Failed to get block height: {}", e)))? - .ok_or(SyncError::InvalidState("Failed to get block height".to_string()))?; - - // The height from storage is already the absolute blockchain height - let blockchain_height = height; - - tracing::info!("📦 New block at height {}: {}", blockchain_height, header.block_hash()); - - // If we have masternodes enabled, request masternode list updates for ChainLock validation - if self.config.enable_masternodes { - // Use the latest persisted masternode state height as base to guarantee base < stop - let base_height = match storage.load_masternode_state().await { - Ok(Some(state)) => state.last_height, - _ => 0, - }; - - if base_height < height { - let base_block_hash = if base_height > 0 { - storage - .get_header(base_height) - .await - .map_err(|e| { - SyncError::Storage(format!( - "Failed to get masternode base block at {}: {}", - base_height, e - )) - })? - .map(|h| h.block_hash()) - .ok_or(SyncError::InvalidState( - "Masternode base block not found".to_string(), - ))? - } else { - // Genesis block case - dashcore::blockdata::constants::genesis_block(self.config.network) - .block_hash() - }; - - tracing::info!( - "📋 Requesting masternode list diff for block at height {} (base: {} -> target: {})", - blockchain_height, - base_height, - height - ); - - let getmnlistdiff = - NetworkMessage::GetMnListD(dashcore::network::message_sml::GetMnListDiff { - base_block_hash, - block_hash: header.block_hash(), - }); - - network.send_message(getmnlistdiff).await.map_err(|e| { - SyncError::Network(format!("Failed to request masternode diff: {}", e)) - })?; - } else { - tracing::debug!( - "Skipping masternode diff request: base_height {} >= target height {}", - base_height, - height - ); - } - - // The masternode diff will arrive via handle_message and be processed by masternode_sync - } - - // If we have filters enabled, request filter headers for the new blocks - if self.config.enable_filters { - // Determine stop as the previous block to avoid peer race on newly announced tip - let stop_hash = if height > 0 { - storage - .get_header(height - 1) - .await - .map_err(|e| { - SyncError::Storage(format!( - "Failed to get previous block for CFHeaders stop: {}", - e - )) - })? - .map(|h| h.block_hash()) - .ok_or(SyncError::InvalidState( - "Previous block not found for CFHeaders stop".to_string(), - ))? - } else { - dashcore::blockdata::constants::genesis_block(self.config.network).block_hash() - }; - - // Resolve the absolute blockchain height for stop_hash - let stop_height = storage - .get_header_height_by_hash(&stop_hash) - .await - .map_err(|e| { - SyncError::Storage(format!( - "Failed to get stop height for CFHeaders: {}", - e - )) - })? - .ok_or(SyncError::InvalidState("Stop block height not found".to_string()))?; - - // Current filter headers tip (absolute blockchain height) - let filter_tip = storage - .get_filter_tip_height() - .await - .map_err(|e| { - SyncError::Storage(format!("Failed to get filter tip height: {}", e)) - })? - .unwrap_or(0); - - // Check if we're already up-to-date before computing start_height - if filter_tip >= stop_height { - tracing::debug!( - "Skipping CFHeaders request: already up-to-date (filter_tip: {}, stop_height: {})", - filter_tip, - stop_height - ); - } else { - // Request from the first missing height after our current filter tip - // We already verified filter_tip < stop_height above - let start_height = filter_tip.saturating_add(1); - - tracing::info!( - "📋 Requesting filter headers up to height {} (start: {}, stop: {})", - stop_height, - start_height, - stop_hash - ); - - let get_cfheaders = NetworkMessage::GetCFHeaders( - dashcore::network::message_filter::GetCFHeaders { - filter_type: 0, // Basic filter - start_height, - stop_hash, - }, - ); - - network.send_message(get_cfheaders).await.map_err(|e| { - SyncError::Network(format!("Failed to request filter headers: {}", e)) - })?; - - // The filter headers will arrive via handle_message, then we'll request filters - } - } - } - - Ok(()) - } - - /// Handle filter headers that arrive after initial sync - pub(super) async fn handle_post_sync_cfheaders( - &mut self, - cfheaders: &dashcore::network::message_filter::CFHeaders, - network: &mut N, - storage: &mut S, - ) -> SyncResult<()> { - tracing::info!("📥 Processing filter headers for new block after sync"); - - // Store the filter headers - let stop_hash = cfheaders.stop_hash; - self.filter_sync.store_filter_headers(cfheaders, storage).await?; - - // Get the height of the stop_hash - if let Some(height) = storage - .get_header_height_by_hash(&stop_hash) - .await - .map_err(|e| SyncError::Storage(format!("Failed to get filter header height: {}", e)))? - { - // Request the actual filter for this block - let get_cfilters = - NetworkMessage::GetCFilters(dashcore::network::message_filter::GetCFilters { - filter_type: 0, // Basic filter - start_height: height, - stop_hash, - }); - - network - .send_message(get_cfilters) - .await - .map_err(|e| SyncError::Network(format!("Failed to request filters: {}", e)))?; - } - - Ok(()) - } - - /// Handle filters that arrive after initial sync - pub(super) async fn handle_post_sync_cfilter( - &mut self, - cfilter: &dashcore::network::message_filter::CFilter, - _network: &mut N, - storage: &mut S, - ) -> SyncResult<()> { - tracing::info!("📥 Processing filter for new block after sync"); - - // Get the height for this filter's block - let height = storage - .get_header_height_by_hash(&cfilter.block_hash) - .await - .map_err(|e| SyncError::Storage(format!("Failed to get filter block height: {}", e)))? - .ok_or(SyncError::InvalidState("Filter block height not found".to_string()))?; - - // Verify against expected header chain before storing - let header_ok = self - .filter_sync - .verify_cfilter_against_headers(&cfilter.filter, height, &*storage) - .await?; - if !header_ok { - tracing::warn!( - "Rejecting post-sync CFilter for block {} at height {} due to header mismatch", - cfilter.block_hash, - height - ); - return Ok(()); - } - - // Store the filter - storage - .store_filter(height, &cfilter.filter) - .await - .map_err(|e| SyncError::Storage(format!("Failed to store filter: {}", e)))?; - - self.wallet.write().await.update_synced_height(height); - - // TODO: Check filter against wallet instead of watch items - // This will be integrated with wallet's check_compact_filter method - tracing::debug!("Filter checking disabled until wallet integration is complete"); - - Ok(()) - } - - /// Handle masternode list diffs that arrive after initial sync (for ChainLock validation) - pub(super) async fn handle_post_sync_mnlistdiff( - &mut self, - diff: &dashcore::network::message_sml::MnListDiff, - network: &mut N, - storage: &mut S, - ) -> SyncResult<()> { - // Get block heights for better logging (get_header_height_by_hash returns blockchain heights) - let base_blockchain_height = - storage.get_header_height_by_hash(&diff.base_block_hash).await.ok().flatten(); - let target_blockchain_height = - storage.get_header_height_by_hash(&diff.block_hash).await.ok().flatten(); - - // Determine if we're syncing from a checkpoint for height conversion - let is_ckpt = self.header_sync.is_synced_from_checkpoint(); - let sync_base = self.header_sync.get_sync_base_height(); - - tracing::info!( - "📥 Processing post-sync masternode diff for block {} at height {:?} (base: {} at height {:?})", - diff.block_hash, - target_blockchain_height, - diff.base_block_hash, - base_blockchain_height - ); - - // Process the diff through the masternode sync manager - // This will update the masternode engine's state - self.masternode_sync.handle_mnlistdiff_message(diff, storage, network).await?; - - // Log the current masternode state after update - if let Ok(Some(mn_state)) = storage.load_masternode_state().await { - // Convert masternode storage height to blockchain height - let mn_blockchain_height = if is_ckpt && sync_base > 0 { - sync_base + mn_state.last_height - } else { - mn_state.last_height - }; - - tracing::debug!( - "📊 Masternode state after update: last height = {}, can validate ChainLocks up to height {}", - mn_blockchain_height, - mn_blockchain_height + CHAINLOCK_VALIDATION_MASTERNODE_OFFSET - ); - } - - // After processing the diff, check if we have any pending ChainLocks that can now be validated - // TODO: Implement chain manager functionality for pending ChainLocks - // if let Ok(Some(chain_manager)) = storage.load_chain_manager().await { - // if chain_manager.has_pending_chainlocks() { - // tracing::info!( - // "🔒 Checking {} pending ChainLocks after masternode list update", - // chain_manager.pending_chainlocks_count() - // ); - // - // // The chain manager will handle validation of pending ChainLocks - // // when it receives the next ChainLock or during periodic validation - // } - // } - - Ok(()) - } -} diff --git a/dash-spv/src/sync/legacy/transitions.rs b/dash-spv/src/sync/legacy/transitions.rs deleted file mode 100644 index e8ce58e93..000000000 --- a/dash-spv/src/sync/legacy/transitions.rs +++ /dev/null @@ -1,443 +0,0 @@ -//! Phase transition logic for sequential sync - -use crate::client::ClientConfig; -use crate::error::{SyncError, SyncResult}; -use crate::network::NetworkManager; -use crate::storage::StorageManager; -use dashcore::network::constants::ServiceFlags; - -use super::phases::{PhaseTransition, SyncPhase}; -use std::time::Instant; - -/// Manages phase transitions and validation -pub struct TransitionManager { - config: ClientConfig, -} - -impl TransitionManager { - /// Create a new transition manager - pub fn new(config: &ClientConfig) -> Self { - Self { - config: config.clone(), - } - } - - /// Check if we can transition from current phase to target phase - pub async fn can_transition_to( - &self, - current_phase: &SyncPhase, - target_phase: &SyncPhase, - storage: &S, - ) -> SyncResult { - // Can't transition to the same phase - if std::mem::discriminant(current_phase) == std::mem::discriminant(target_phase) { - return Ok(false); - } - - // Check specific transition rules - match (current_phase, target_phase) { - // From Idle, can only go to DownloadingHeaders - ( - SyncPhase::Idle, - SyncPhase::DownloadingHeaders { - .. - }, - ) => Ok(true), - - // From DownloadingHeaders, check completion - ( - SyncPhase::DownloadingHeaders { - .. - }, - next_phase, - ) => { - // Headers must be complete - if !self.are_headers_complete(current_phase, storage).await? { - return Ok(false); - } - - // Can go to MnList if enabled, or skip to CFHeaders - match next_phase { - SyncPhase::DownloadingMnList { - .. - } => Ok(self.config.enable_masternodes), - SyncPhase::DownloadingCFHeaders { - .. - } => Ok(!self.config.enable_masternodes && self.config.enable_filters), - SyncPhase::FullySynced { - .. - } => Ok(!self.config.enable_masternodes && !self.config.enable_filters), - _ => Ok(false), - } - } - - // From DownloadingMnList - ( - SyncPhase::DownloadingMnList { - .. - }, - next_phase, - ) => { - // MnList must be complete - if !self.are_masternodes_complete(current_phase, storage).await? { - return Ok(false); - } - - match next_phase { - SyncPhase::DownloadingCFHeaders { - .. - } => Ok(self.config.enable_filters), - SyncPhase::FullySynced { - .. - } => Ok(!self.config.enable_filters), - _ => Ok(false), - } - } - - // From DownloadingCFHeaders - ( - SyncPhase::DownloadingCFHeaders { - .. - }, - next_phase, - ) => { - match next_phase { - SyncPhase::DownloadingFilters { - .. - } => { - // Normal case: download filters after cfheaders - // CFHeaders must be complete - Ok(self.are_cfheaders_complete(current_phase, storage).await?) - } - SyncPhase::FullySynced { - .. - } => { - // Allow skipping to FullySynced if no peers support filters - // Don't require cfheaders to be complete in this case - Ok(true) - } - _ => Ok(false), - } - } - - // From DownloadingFilters - ( - SyncPhase::DownloadingFilters { - .. - }, - next_phase, - ) => { - // Filters must be complete or no blocks needed - if !self.are_filters_complete(current_phase) { - return Ok(false); - } - - match next_phase { - SyncPhase::DownloadingBlocks { - .. - } => { - // Check if we have blocks to download - Ok(self.has_blocks_to_download(current_phase)) - } - SyncPhase::FullySynced { - .. - } => { - // Can go to synced if no blocks to download - Ok(!self.has_blocks_to_download(current_phase)) - } - _ => Ok(false), - } - } - - // From DownloadingBlocks - ( - SyncPhase::DownloadingBlocks { - .. - }, - SyncPhase::FullySynced { - .. - }, - ) => { - // All blocks must be downloaded - Ok(self.are_blocks_complete(current_phase)) - } - - // All other transitions are invalid - _ => Ok(false), - } - } - - /// Get the next phase based on current phase and configuration - pub async fn get_next_phase( - &self, - current_phase: &SyncPhase, - storage: &S, - network: &N, - ) -> SyncResult> { - match current_phase { - SyncPhase::Idle => { - // Always start with headers - let start_height = storage.get_tip_height().await.unwrap_or(0); - - Ok(Some(SyncPhase::DownloadingHeaders { - start_time: Instant::now(), - start_height, - current_height: start_height, - target_height: None, - last_progress: Instant::now(), - headers_downloaded: 0, - headers_per_second: 0.0, - received_empty_response: false, - })) - } - - SyncPhase::DownloadingHeaders { - .. - } => { - if self.config.enable_masternodes { - let header_tip = storage.get_tip_height().await.unwrap_or(0); - - let mn_height = match storage.load_masternode_state().await { - Ok(Some(state)) => state.last_height, - _ => 0, - }; - - Ok(Some(SyncPhase::DownloadingMnList { - start_time: Instant::now(), - start_height: mn_height, - current_height: mn_height, - target_height: header_tip, - last_progress: Instant::now(), - diffs_processed: 0, - requests_total: 0, - requests_completed: 0, - })) - } else if self.config.enable_filters { - self.create_cfheaders_phase(storage).await - } else { - self.create_fully_synced_phase(storage).await - } - } - - SyncPhase::DownloadingMnList { - .. - } => { - if self.config.enable_filters { - self.create_cfheaders_phase(storage).await - } else { - self.create_fully_synced_phase(storage).await - } - } - - SyncPhase::DownloadingCFHeaders { - .. - } => { - // Check if any peer supports compact filters - if !network.has_peer_with_service(ServiceFlags::COMPACT_FILTERS).await { - tracing::info!( - "No peers support compact filters, skipping filter download phase" - ); - // Skip directly to fully synced since we can't download filters - self.create_fully_synced_phase(storage).await - } else { - // After CFHeaders, we need to determine what filters to download - // For now, we'll create a filters phase that will be populated later - Ok(Some(SyncPhase::DownloadingFilters { - start_time: Instant::now(), - requested_ranges: std::collections::HashMap::new(), - completed_heights: std::collections::HashSet::new(), - total_filters: 0, // Will be determined based on watch items - last_progress: Instant::now(), - batches_processed: 0, - })) - } - } - - SyncPhase::DownloadingFilters { - .. - } => { - // Check if we have blocks to download - if self.has_blocks_to_download(current_phase) { - if let SyncPhase::DownloadingFilters { - .. - } = current_phase - { - Ok(Some(SyncPhase::DownloadingBlocks { - start_time: Instant::now(), - pending_blocks: Vec::new(), // Will be populated from filter matches - downloading: std::collections::HashMap::new(), - completed: Vec::new(), - last_progress: Instant::now(), - total_blocks: 0, // Will be set when we populate pending_blocks - })) - } else { - Ok(None) - } - } else { - self.create_fully_synced_phase(storage).await - } - } - - SyncPhase::DownloadingBlocks { - .. - } => self.create_fully_synced_phase(storage).await, - - SyncPhase::FullySynced { - .. - } => Ok(None), // Already synced - } - } - - /// Create a phase transition record - pub fn create_transition( - &self, - from_phase: &SyncPhase, - to_phase: &SyncPhase, - reason: String, - ) -> PhaseTransition { - PhaseTransition { - from_phase: from_phase.name().to_string(), - to_phase: to_phase.name().to_string(), - timestamp: Instant::now(), - reason, - final_progress: if from_phase.is_syncing() { - Some(from_phase.progress()) - } else { - None - }, - } - } - - // Helper methods for checking phase completion - - async fn are_headers_complete( - &self, - phase: &SyncPhase, - _storage: &S, - ) -> SyncResult { - if let SyncPhase::DownloadingHeaders { - received_empty_response, - .. - } = phase - { - // Headers are complete when we receive an empty response - Ok(*received_empty_response) - } else { - Ok(false) - } - } - - async fn are_masternodes_complete( - &self, - phase: &SyncPhase, - storage: &S, - ) -> SyncResult { - if let SyncPhase::DownloadingMnList { - current_height, - target_height, - .. - } = phase - { - // Check if we've reached the target - if current_height >= target_height { - return Ok(true); - } - - // Also check storage to be sure - if let Ok(Some(state)) = storage.load_masternode_state().await { - Ok(state.last_height >= *target_height) - } else { - Ok(false) - } - } else { - Ok(false) - } - } - - async fn are_cfheaders_complete( - &self, - phase: &SyncPhase, - _storage: &S, - ) -> SyncResult { - if let SyncPhase::DownloadingCFHeaders { - current_height, - target_height, - .. - } = phase - { - Ok(current_height >= target_height) - } else { - Ok(false) - } - } - - fn are_filters_complete(&self, phase: &SyncPhase) -> bool { - if let SyncPhase::DownloadingFilters { - completed_heights, - total_filters, - .. - } = phase - { - completed_heights.len() as u32 >= *total_filters - } else { - false - } - } - - fn are_blocks_complete(&self, phase: &SyncPhase) -> bool { - if let SyncPhase::DownloadingBlocks { - pending_blocks, - downloading, - .. - } = phase - { - pending_blocks.is_empty() && downloading.is_empty() - } else { - false - } - } - - fn has_blocks_to_download(&self, _phase: &SyncPhase) -> bool { - // This will be determined by filter matches - // For now, return false (no blocks to download) - false - } - - async fn create_cfheaders_phase( - &self, - storage: &S, - ) -> SyncResult> { - let header_tip = storage.get_tip_height().await.unwrap_or(0); - - let filter_tip = storage - .get_filter_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get filter tip: {}", e)))? - .unwrap_or(0); - - Ok(Some(SyncPhase::DownloadingCFHeaders { - start_time: Instant::now(), - start_height: filter_tip, - current_height: filter_tip, - target_height: header_tip, - last_progress: Instant::now(), - cfheaders_downloaded: 0, - cfheaders_per_second: 0.0, - })) - } - - async fn create_fully_synced_phase( - &self, - _storage: &S, - ) -> SyncResult> { - Ok(Some(SyncPhase::FullySynced { - sync_completed_at: Instant::now(), - total_sync_time: Duration::from_secs(0), // Will be calculated from phase history - headers_synced: 0, // Will be calculated from phase history - filters_synced: 0, // Will be calculated from phase history - blocks_downloaded: 0, // Will be calculated from phase history - })) - } -} - -use std::time::Duration; diff --git a/dash-spv/src/sync/mod.rs b/dash-spv/src/sync/mod.rs index 88defc285..d195e34a3 100644 --- a/dash-spv/src/sync/mod.rs +++ b/dash-spv/src/sync/mod.rs @@ -1,8 +1,5 @@ //! Synchronization management for the Dash SPV client. -// Legacy sync modules (moved to legacy/ subdirectory) -pub mod legacy; - mod block_headers; mod blocks; mod chainlock; diff --git a/dash-spv/src/terminal.rs b/dash-spv/src/terminal.rs deleted file mode 100644 index 1e26eabb8..000000000 --- a/dash-spv/src/terminal.rs +++ /dev/null @@ -1,222 +0,0 @@ -//! Terminal UI utilities for displaying status information. - -use crossterm::{ - cursor, execute, - style::{Print, Stylize}, - terminal::{self, ClearType}, - QueueableCommand, -}; -use std::io::{self, Write}; -use std::sync::Arc; -use tokio::sync::RwLock; -use tokio::time::{interval, Duration}; - -/// Status information to display in the terminal -#[derive(Clone, Default)] -pub struct TerminalStatus { - pub headers: u32, - pub filter_headers: u32, - pub chainlock_height: Option, - pub peer_count: usize, - pub network: String, -} - -/// Terminal UI manager for displaying status -pub struct TerminalUI { - status: Arc>, - enabled: bool, -} - -impl TerminalUI { - /// Create a new terminal UI manager - pub fn new(enabled: bool) -> Self { - Self { - status: Arc::new(RwLock::new(TerminalStatus::default())), - enabled, - } - } - - /// Get a handle to update the status - pub fn status_handle(&self) -> Arc> { - self.status.clone() - } - - /// Initialize the terminal UI - pub fn init(&self) -> io::Result<()> { - if !self.enabled { - return Ok(()); - } - - // Don't clear screen or hide cursor - we want normal log output - // Just add some space for the status bar - println!(); // Add blank line before status bar - - Ok(()) - } - - /// Cleanup terminal UI - pub fn cleanup(&self) -> io::Result<()> { - if !self.enabled { - return Ok(()); - } - - // Restore terminal - execute!(io::stdout(), cursor::Show, cursor::MoveTo(0, terminal::size()?.1))?; - - println!(); // Add a newline after the status bar - - Ok(()) - } - - /// Draw just the status bar at the bottom - pub async fn draw(&self) -> io::Result<()> { - if !self.enabled { - return Ok(()); - } - - let status = self.status.read().await; - let (width, height) = terminal::size()?; - - // Lock stdout for the entire draw operation - let mut stdout = io::stdout(); - - // Save cursor position - stdout.queue(cursor::SavePosition)?; - - // Check if terminal is large enough - if height < 2 { - // Terminal too small to draw status bar - stdout.queue(cursor::RestorePosition)?; - return stdout.flush(); - } - - // Draw separator line - stdout.queue(cursor::MoveTo(0, height - 2))?; - stdout.queue(terminal::Clear(ClearType::CurrentLine))?; - stdout.queue(Print("─".repeat(width as usize).dark_grey()))?; - - // Draw status bar - stdout.queue(cursor::MoveTo(0, height - 1))?; - stdout.queue(terminal::Clear(ClearType::CurrentLine))?; - - // Format status bar - let status_text = format!( - " {} {} │ {} {} │ {} {} │ {} {} │ {} {}", - "Headers:".cyan().bold(), - format_number(status.headers).white(), - "Filters:".cyan().bold(), - format_number(status.filter_headers).white(), - "ChainLock:".cyan().bold(), - status - .chainlock_height - .map(|h| format!("#{}", format_number(h))) - .unwrap_or_else(|| "None".to_string()) - .yellow(), - "Peers:".cyan().bold(), - status.peer_count.to_string().white(), - "Network:".cyan().bold(), - status.network.clone().green() - ); - - stdout.queue(Print(&status_text))?; - - // Add padding to fill the rest of the line - let status_len = strip_ansi_codes(&status_text).len(); - if status_len < width as usize { - stdout.queue(Print(" ".repeat(width as usize - status_len)))?; - } - - // Restore cursor position - stdout.queue(cursor::RestorePosition)?; - - stdout.flush()?; - - Ok(()) - } - - /// Update status and redraw - pub async fn update_status(&self, updater: F) -> io::Result<()> - where - F: FnOnce(&mut TerminalStatus), - { - { - let mut status = self.status.write().await; - updater(&mut status); - } - self.draw().await - } - - /// Start the UI update loop - pub fn start_update_loop(self: Arc) { - if !self.enabled { - return; - } - - tokio::spawn(async move { - let mut interval = interval(Duration::from_millis(100)); // Update 10 times per second - - loop { - interval.tick().await; - if let Err(e) = self.draw().await { - eprintln!("Terminal UI error: {}", e); - break; - } - } - }); - } -} - -/// Format a number with thousand separators -fn format_number(n: u32) -> String { - let s = n.to_string(); - let mut result = String::new(); - - for (count, ch) in s.chars().rev().enumerate() { - if count > 0 && count % 3 == 0 { - result.push(','); - } - result.push(ch); - } - - result.chars().rev().collect() -} - -/// Strip ANSI color codes for length calculation -fn strip_ansi_codes(s: &str) -> String { - // Simple implementation - in production you'd use a proper ANSI stripping library - let mut result = String::new(); - let mut in_escape = false; - - for ch in s.chars() { - if ch == '\x1b' { - in_escape = true; - } else if in_escape && ch == 'm' { - in_escape = false; - } else if !in_escape { - result.push(ch); - } - } - - result -} - -/// RAII guard for terminal UI cleanup -pub struct TerminalGuard { - ui: Arc, -} - -impl TerminalGuard { - pub fn new(ui: Arc) -> io::Result { - ui.init()?; - ui.clone().start_update_loop(); - Ok(Self { - ui, - }) - } -} - -impl Drop for TerminalGuard { - fn drop(&mut self) { - let _ = self.ui.cleanup(); - } -} diff --git a/dash-spv/src/test_utils/network.rs b/dash-spv/src/test_utils/network.rs index dbce759c6..29075949f 100644 --- a/dash-spv/src/test_utils/network.rs +++ b/dash-spv/src/test_utils/network.rs @@ -62,7 +62,7 @@ impl MockNetworkManager { let mut headers = Vec::new(); let mut prev_hash = genesis_hash; - // Skip genesis (height 0) as it's already in ChainState + // Skip genesis (height 0) as it's already in the storage for i in 1..count { let header = BlockHeader { version: dashcore::block::Version::from_consensus(1), diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index b86ef227c..87b89dd1f 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -1,41 +1,14 @@ //! Common type definitions for the Dash SPV client. -//! -//! # Architecture Note -//! This file has grown to 1,065 lines and should be split into: -//! - types/chain.rs - ChainState, CachedHeader -//! - types/sync.rs - SyncProgress, SyncStage -//! - types/events.rs - SpvEvent, MempoolRemovalReason -//! - types/balances.rs - AddressBalance, UnconfirmedTransaction -//! -//! # Thread Safety -//! Many types here are wrapped in `Arc` or `Arc` when used. -//! Always acquire locks in consistent order to prevent deadlocks: -//! 1. state (ChainState) -//! 2. mempool_state (MempoolState) - -use std::time::{Duration, Instant, SystemTime}; + +use std::time::{Duration, Instant}; use dashcore::{ block::Header as BlockHeader, consensus::{Decodable, Encodable}, - hash_types::FilterHeader, - network::constants::NetworkExt, - sml::masternode_list_engine::MasternodeListEngine, - Amount, Block, BlockHash, ChainLock, Network, Transaction, Txid, + Amount, Block, BlockHash, Transaction, Txid, }; use serde::{Deserialize, Serialize}; -/// Shared, mutex-protected set of filter heights used across components. -/// -/// # Why `Arc>`? -/// - Arc: Shared ownership between FilterSyncManager and SpvStats -/// - Mutex: Interior mutability for concurrent updates from filter download tasks -/// - HashSet: Fast O(1) membership testing for gap detection -/// -/// # Performance Note -/// Consider `Arc` if read contention becomes an issue (most operations are reads). -pub type SharedFilterHeights = std::sync::Arc>>; - /// A block header with its cached hash to avoid expensive X11 recomputation. /// /// During header sync, each header's hash is computed multiple times: @@ -184,273 +157,6 @@ impl std::fmt::Display for PeerId { } } -/// Sync progress information. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct SyncProgress { - /// Current height of synchronized headers. - pub header_height: u32, - - /// Current height of synchronized filter headers. - pub filter_header_height: u32, - - /// Current height of synchronized masternode list. - pub masternode_height: u32, - - /// Total number of peers connected. - pub peer_count: u32, - - /// Whether filter sync is available (peers support it). - pub filter_sync_available: bool, - - /// Number of compact filters downloaded. - pub filters_downloaded: u64, - - /// Last height where filters were synced/verified. - pub last_synced_filter_height: Option, - - /// Sync start time. - pub sync_start: SystemTime, - - /// Last update time. - pub last_update: SystemTime, -} - -impl Default for SyncProgress { - fn default() -> Self { - let now = SystemTime::now(); - Self { - header_height: 0, - filter_header_height: 0, - masternode_height: 0, - peer_count: 0, - filter_sync_available: false, - filters_downloaded: 0, - last_synced_filter_height: None, - sync_start: now, - last_update: now, - } - } -} - -/// Detailed sync progress with performance metrics. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct DetailedSyncProgress { - /// Snapshot of the core sync metrics for quick consumption. - pub sync_progress: SyncProgress, - pub peer_best_height: u32, - pub percentage: f64, - - /// Performance metrics - pub headers_per_second: f64, - pub bytes_per_second: u64, - pub estimated_time_remaining: Option, - - /// Detailed status - pub sync_stage: SyncStage, - pub total_headers_processed: u64, - pub total_bytes_downloaded: u64, - - /// Timing - pub sync_start_time: SystemTime, - pub last_update_time: SystemTime, -} - -/// Sync stage for detailed progress tracking. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub enum SyncStage { - Connecting, - QueryingPeerHeight, - DownloadingHeaders { - start: u32, - end: u32, - }, - ValidatingHeaders { - batch_size: usize, - }, - StoringHeaders { - batch_size: usize, - }, - DownloadingFilterHeaders { - current: u32, - target: u32, - }, - DownloadingFilters { - completed: u32, - total: u32, - }, - DownloadingBlocks { - pending: usize, - }, - Complete, - Failed(String), -} - -impl DetailedSyncProgress { - pub fn calculate_percentage(&self) -> f64 { - if self.peer_best_height == 0 { - return 0.0; - } - let current_height = self.sync_progress.header_height; - ((current_height as f64 / self.peer_best_height as f64) * 100.0).min(100.0) - } - - pub fn calculate_eta(&self) -> Option { - if self.headers_per_second <= 0.0 { - return None; - } - - let current_height = self.sync_progress.header_height; - let remaining = self.peer_best_height.saturating_sub(current_height); - if remaining == 0 { - return Some(Duration::from_secs(0)); - } - - let seconds = remaining as f64 / self.headers_per_second; - Some(Duration::from_secs_f64(seconds)) - } -} - -/// Chain state maintained by the SPV client. -/// -/// # CRITICAL: This is the heart of the SPV client's state -/// -/// ## Thread Safety -/// Almost always wrapped in `Arc>` for shared access. -/// Multiple readers can access simultaneously, but writes are exclusive. -/// -/// ## Checkpoint Sync -/// When syncing from a checkpoint (not genesis), `sync_base_height` is non-zero. -#[derive(Clone, Default)] -pub struct ChainState { - /// Last ChainLock height. - pub last_chainlock_height: Option, - - /// Last ChainLock hash. - pub last_chainlock_hash: Option, - - /// Current filter tip. - pub current_filter_tip: Option, - - /// Masternode list engine. - pub masternode_engine: Option, - - /// Last masternode diff height processed. - pub last_masternode_diff_height: Option, - - /// Base height when syncing from a checkpoint (0 if syncing from genesis). - pub sync_base_height: u32, -} - -impl ChainState { - /// Create a new empty chain state - pub fn new() -> Self { - Self::default() - } - - /// Create a new chain state for the given network. - pub fn new_for_network(network: Network) -> Self { - let mut state = Self::default(); - - // Initialize masternode engine for the network - let mut engine = MasternodeListEngine::default_for_network(network); - if let Some(genesis_hash) = network.known_genesis_block_hash() { - engine.feed_block_height(0, genesis_hash); - } - state.masternode_engine = Some(engine); - - // Initialize checkpoint fields - state.sync_base_height = 0; - - state - } - - /// Whether the chain was synced from a checkpoint rather than genesis. - pub fn synced_from_checkpoint(&self) -> bool { - self.sync_base_height > 0 - } - - /// Update chain lock status - pub fn update_chain_lock(&mut self, height: u32, hash: BlockHash) { - // Only update if this is a newer chain lock - if self.last_chainlock_height.is_none_or(|h| height > h) { - self.last_chainlock_height = Some(height); - self.last_chainlock_hash = Some(hash); - } - } - - /// Check if a block at given height is chain-locked - pub fn is_height_chain_locked(&self, height: u32) -> bool { - self.last_chainlock_height.is_some_and(|locked_height| height <= locked_height) - } - - /// Check if we have a chain lock - pub fn has_chain_lock(&self) -> bool { - self.last_chainlock_height.is_some() - } - - /// Get the last chain-locked height - pub fn get_last_chainlock_height(&self) -> Option { - self.last_chainlock_height - } - - /// Get filter matched heights (placeholder for now) - /// In a real implementation, this would track heights where filters matched wallet transactions - pub fn get_filter_matched_heights(&self) -> Option> { - // For now, return an empty vector as we don't track this yet - // This would typically be populated during filter sync when matches are found - Some(Vec::new()) - } - - /// Initialize chain state from a checkpoint. - pub fn init_from_checkpoint( - &mut self, - checkpoint_height: u32, - checkpoint_header: BlockHeader, - network: Network, - ) { - // Set sync base height to checkpoint - self.sync_base_height = checkpoint_height; - - tracing::info!( - "Initialized ChainState from checkpoint - height: {}, hash: {}, network: {:?}", - checkpoint_height, - checkpoint_header.block_hash(), - network - ); - - // Initialize masternode engine for the network, starting from checkpoint - let mut engine = MasternodeListEngine::default_for_network(network); - engine.feed_block_height(checkpoint_height, checkpoint_header.block_hash()); - self.masternode_engine = Some(engine); - } - - /// Get the absolute height for a given index in our headers vector. - pub fn index_to_height(&self, index: usize) -> u32 { - self.sync_base_height + index as u32 - } - - /// Get the index in our headers vector for a given absolute height. - pub fn height_to_index(&self, height: u32) -> Option { - if height < self.sync_base_height { - None - } else { - Some((height - self.sync_base_height) as usize) - } - } -} - -impl std::fmt::Debug for ChainState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("ChainState") - .field("last_chainlock_height", &self.last_chainlock_height) - .field("last_chainlock_hash", &self.last_chainlock_hash) - .field("current_filter_tip", &self.current_filter_tip) - .field("last_masternode_diff_height", &self.last_masternode_diff_height) - .field("sync_base_height", &self.sync_base_height) - .finish() - } -} - /// Validation mode for the SPV client. #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)] pub enum ValidationMode { @@ -478,36 +184,6 @@ pub struct FilterMatch { pub block_requested: bool, } -// WatchItem has been removed in favor of using key-wallet-manager's address tracking - -/// Balance information for an address. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct AddressBalance { - /// Confirmed balance (6+ confirmations or InstantLocked). - pub confirmed: dashcore::Amount, - - /// Unconfirmed balance (less than 6 confirmations). - pub unconfirmed: dashcore::Amount, - - /// Pending balance from mempool transactions (not InstantLocked). - pub pending: dashcore::Amount, - - /// Pending balance from InstantLocked mempool transactions. - pub pending_instant: dashcore::Amount, -} - -impl AddressBalance { - /// Get the total balance (confirmed + unconfirmed + pending). - pub fn total(&self) -> dashcore::Amount { - self.confirmed + self.unconfirmed + self.pending + self.pending_instant - } - - /// Get the available balance (confirmed + pending_instant). - pub fn available(&self) -> dashcore::Amount { - self.confirmed + self.pending_instant - } -} - /// Mempool balance information. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct MempoolBalance { @@ -518,210 +194,6 @@ pub struct MempoolBalance { pub pending_instant: dashcore::Amount, } -// Custom serialization for AddressBalance to handle Amount serialization -impl Serialize for AddressBalance { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - use serde::ser::SerializeStruct; - - let mut state = serializer.serialize_struct("AddressBalance", 4)?; - state.serialize_field("confirmed", &self.confirmed.to_sat())?; - state.serialize_field("unconfirmed", &self.unconfirmed.to_sat())?; - state.serialize_field("pending", &self.pending.to_sat())?; - state.serialize_field("pending_instant", &self.pending_instant.to_sat())?; - state.end() - } -} - -impl<'de> Deserialize<'de> for AddressBalance { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - use serde::de::{MapAccess, Visitor}; - use std::fmt; - - struct AddressBalanceVisitor; - - impl<'de> Visitor<'de> for AddressBalanceVisitor { - type Value = AddressBalance; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("an AddressBalance struct") - } - - fn visit_map(self, mut map: M) -> Result - where - M: MapAccess<'de>, - { - let mut confirmed: Option = None; - let mut unconfirmed: Option = None; - let mut pending: Option = None; - let mut pending_instant: Option = None; - - while let Some(key) = map.next_key::()? { - match key.as_str() { - "confirmed" => { - if confirmed.is_some() { - return Err(serde::de::Error::duplicate_field("confirmed")); - } - confirmed = Some(map.next_value()?); - } - "unconfirmed" => { - if unconfirmed.is_some() { - return Err(serde::de::Error::duplicate_field("unconfirmed")); - } - unconfirmed = Some(map.next_value()?); - } - "pending" => { - if pending.is_some() { - return Err(serde::de::Error::duplicate_field("pending")); - } - pending = Some(map.next_value()?); - } - "pending_instant" => { - if pending_instant.is_some() { - return Err(serde::de::Error::duplicate_field("pending_instant")); - } - pending_instant = Some(map.next_value()?); - } - _ => { - let _: serde::de::IgnoredAny = map.next_value()?; - } - } - } - - let confirmed = - confirmed.ok_or_else(|| serde::de::Error::missing_field("confirmed"))?; - let unconfirmed = - unconfirmed.ok_or_else(|| serde::de::Error::missing_field("unconfirmed"))?; - // Default to 0 for backwards compatibility - let pending = pending.unwrap_or(0); - let pending_instant = pending_instant.unwrap_or(0); - - Ok(AddressBalance { - confirmed: dashcore::Amount::from_sat(confirmed), - unconfirmed: dashcore::Amount::from_sat(unconfirmed), - pending: dashcore::Amount::from_sat(pending), - pending_instant: dashcore::Amount::from_sat(pending_instant), - }) - } - } - - deserializer.deserialize_struct( - "AddressBalance", - &["confirmed", "unconfirmed", "pending", "pending_instant"], - AddressBalanceVisitor, - ) - } -} - -/// Events emitted by the SPV client. -#[derive(Debug, Clone)] -pub enum SpvEvent { - /// Balance has been updated. - BalanceUpdate { - /// Confirmed balance in satoshis. - confirmed: u64, - /// Unconfirmed balance in satoshis. - unconfirmed: u64, - /// Total balance in satoshis. - total: u64, - }, - - /// New transaction detected. - TransactionDetected { - /// Transaction ID. - txid: String, - /// Whether the transaction is confirmed. - confirmed: bool, - /// Block height if confirmed. - block_height: Option, - /// Net amount change (positive for received, negative for sent). - amount: i64, - /// Addresses affected by this transaction. - addresses: Vec, - }, - - /// Block processed. - BlockProcessed { - /// Block height. - height: u32, - /// Block hash. - hash: String, - /// Total number of transactions in the block. - transactions_count: usize, - /// Number of relevant transactions. - relevant_transactions: usize, - }, - - /// Sync progress update. - SyncProgress { - /// Current block height. - current_height: u32, - /// Target block height. - target_height: u32, - /// Progress percentage. - percentage: f64, - }, - - /// ChainLock received and validated. - ChainLockReceived { - /// The complete ChainLock data. - chain_lock: ChainLock, - /// Whether the BLS signature was validated. - validated: bool, - }, - - /// InstantLock received and validated. - InstantLockReceived { - /// The complete InstantLock data. - instant_lock: dashcore::ephemerealdata::instant_lock::InstantLock, - /// Whether the BLS signature was validated. - validated: bool, - }, - - /// Unconfirmed transaction added to mempool. - MempoolTransactionAdded { - /// Transaction ID. - txid: Txid, - /// Raw transaction data. - transaction: Box, - /// Net amount change (positive for received, negative for sent). - amount: i64, - /// Addresses affected by this transaction. - addresses: Vec, - /// Whether this is an InstantSend transaction. - is_instant_send: bool, - }, - - /// Transaction confirmed (moved from mempool to block). - MempoolTransactionConfirmed { - /// Transaction ID. - txid: Txid, - /// Block height where confirmed. - block_height: u32, - /// Block hash where confirmed. - block_hash: BlockHash, - }, - - /// Transaction removed from mempool (expired, replaced, or double-spent). - MempoolTransactionRemoved { - /// Transaction ID. - txid: Txid, - /// Reason for removal. - reason: MempoolRemovalReason, - }, - - /// Compact filter matched for a block. - CompactFilterMatched { - /// Block hash that matched. - hash: String, - }, -} - /// Reason for removing a transaction from mempool. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum MempoolRemovalReason { diff --git a/dash-spv/tests/block_download_test.rs b/dash-spv/tests/block_download_test.rs deleted file mode 100644 index eb0b20f46..000000000 --- a/dash-spv/tests/block_download_test.rs +++ /dev/null @@ -1,187 +0,0 @@ -//! Tests for block downloading on filter match functionality. - -use dash_spv::test_utils::MockNetworkManager; -use std::collections::HashSet; -use std::sync::Arc; -use tempfile::TempDir; -use tokio::sync::Mutex; - -use dashcore::block::Block; - -use dash_spv::{ - client::ClientConfig, storage::DiskStorageManager, sync::legacy::FilterSyncManager, - types::FilterMatch, -}; - -fn create_test_config() -> ClientConfig { - ClientConfig::testnet() - .without_masternodes() - .with_validation_mode(dash_spv::types::ValidationMode::None) - .with_storage_path(TempDir::new().unwrap().path()) -} - -#[tokio::test] -async fn test_filter_sync_manager_creation() { - let config = create_test_config(); - let received_heights = Arc::new(Mutex::new(HashSet::new())); - let filter_sync: FilterSyncManager = - FilterSyncManager::new(&config, received_heights); - - assert!(!filter_sync.has_pending_downloads()); - assert_eq!(filter_sync.pending_download_count(), 0); -} - -#[tokio::test] -async fn test_request_block_download() { - let config = create_test_config(); - let received_heights = Arc::new(Mutex::new(HashSet::new())); - let mut filter_sync: FilterSyncManager = - FilterSyncManager::new(&config, received_heights); - let mut network = MockNetworkManager::new(); - - let filter_match = FilterMatch::dummy(100); - - // Request block download - let result = filter_sync.request_block_download(filter_match.clone(), &mut network).await; - assert!(result.is_ok()); - - // Check sync manager state - assert!(filter_sync.has_pending_downloads()); - assert_eq!(filter_sync.pending_download_count(), 1); -} - -#[tokio::test] -async fn test_duplicate_block_request_prevention() { - let config = create_test_config(); - let received_heights = Arc::new(Mutex::new(HashSet::new())); - let mut filter_sync: FilterSyncManager = - FilterSyncManager::new(&config, received_heights); - let mut network = MockNetworkManager::new(); - - let filter_match = FilterMatch::dummy(100); - - // Request block download twice - filter_sync.request_block_download(filter_match.clone(), &mut network).await.unwrap(); - filter_sync.request_block_download(filter_match.clone(), &mut network).await.unwrap(); - - // Should only track one download - assert_eq!(filter_sync.pending_download_count(), 1); -} - -#[tokio::test] -async fn test_handle_downloaded_block() { - let config = create_test_config(); - let received_heights = Arc::new(Mutex::new(HashSet::new())); - let mut filter_sync: FilterSyncManager = - FilterSyncManager::new(&config, received_heights); - let mut network = MockNetworkManager::new(); - - let block = Block::dummy(100, vec![]); - let block_hash = block.block_hash(); - let filter_match = FilterMatch::dummy(100); - - // Request the block - filter_sync.request_block_download(filter_match.clone(), &mut network).await.unwrap(); - - // Handle the downloaded block - let result = filter_sync.handle_downloaded_block(&block).await.unwrap(); - - // Should return the matched filter - assert!(result.is_some()); - let returned_match = result.unwrap(); - assert_eq!(returned_match.block_hash, block_hash); - assert_eq!(returned_match.height, 100); - assert!(returned_match.block_requested); - - // Should no longer have pending downloads - assert!(!filter_sync.has_pending_downloads()); - assert_eq!(filter_sync.pending_download_count(), 0); -} - -#[tokio::test] -async fn test_handle_unexpected_block() { - let config = create_test_config(); - let received_heights = Arc::new(Mutex::new(HashSet::new())); - let mut filter_sync: FilterSyncManager = - FilterSyncManager::new(&config, received_heights); - - let block = Block::dummy(0, vec![]); - - // Handle a block that wasn't requested - let result = filter_sync.handle_downloaded_block(&block).await.unwrap(); - - // Should return None for unexpected block - assert!(result.is_none()); -} - -#[tokio::test] -async fn test_process_multiple_filter_matches() { - let config = create_test_config(); - let received_heights = Arc::new(Mutex::new(HashSet::new())); - let mut filter_sync: FilterSyncManager = - FilterSyncManager::new(&config, received_heights); - let mut network = MockNetworkManager::new(); - - let filter_matches = - vec![FilterMatch::dummy(100), FilterMatch::dummy(101), FilterMatch::dummy(102)]; - - // Process filter matches and request downloads - let result = - filter_sync.process_filter_matches_and_download(filter_matches, &mut network).await; - assert!(result.is_ok()); - - // Should track 3 pending downloads - assert_eq!(filter_sync.pending_download_count(), 3); -} - -#[tokio::test] -async fn test_sync_manager_integration() {} - -#[tokio::test] -async fn test_filter_match_and_download_workflow() { - let config = create_test_config(); - let _storage = DiskStorageManager::new(&config).await.expect("Failed to create tmp storage"); - let received_heights = Arc::new(Mutex::new(HashSet::new())); - let mut filter_sync: FilterSyncManager = - FilterSyncManager::new(&config, received_heights); - let mut network = MockNetworkManager::new(); - - // Create test address (WatchItem replaced with wallet-based tracking) - // let address = create_test_address(); - - // This is a simplified test - in real usage, we'd need to: - // 1. Store filter headers and filters - // 2. Check filters for matches - // 3. Request block downloads for matches - // 4. Handle downloaded blocks - // 5. Extract wallet transactions from blocks - - // For now, just test that we can create filter matches and request downloads - let filter_matches = vec![FilterMatch::dummy(100)]; - - let result = - filter_sync.process_filter_matches_and_download(filter_matches, &mut network).await; - assert!(result.is_ok()); - - assert!(filter_sync.has_pending_downloads()); -} - -#[tokio::test] -async fn test_reset_clears_download_state() { - let config = create_test_config(); - let received_heights = Arc::new(Mutex::new(HashSet::new())); - let mut filter_sync: FilterSyncManager = - FilterSyncManager::new(&config, received_heights); - let mut network = MockNetworkManager::new(); - - let filter_match = FilterMatch::dummy(100); - - // Request block download - filter_sync.request_block_download(filter_match, &mut network).await.unwrap(); - assert!(filter_sync.has_pending_downloads()); - - // Reset should clear all state - filter_sync.reset(); - assert!(!filter_sync.has_pending_downloads()); - assert_eq!(filter_sync.pending_download_count(), 0); -} diff --git a/dash-spv/tests/edge_case_filter_sync_test.rs b/dash-spv/tests/edge_case_filter_sync_test.rs deleted file mode 100644 index ad32ebcde..000000000 --- a/dash-spv/tests/edge_case_filter_sync_test.rs +++ /dev/null @@ -1,96 +0,0 @@ -//! Tests for edge case handling in filter header sync, particularly at the tip. - -use dash_spv::test_utils::MockNetworkManager; -use dash_spv::{ - client::ClientConfig, - storage::{BlockHeaderStorage, DiskStorageManager, FilterHeaderStorage}, - sync::legacy::filters::FilterSyncManager, -}; -use dashcore::{ - block::Header as BlockHeader, hash_types::FilterHeader, network::message::NetworkMessage, - Network, -}; -use std::collections::HashSet; -use std::sync::Arc; -use tempfile::TempDir; -use tokio::sync::Mutex; - -#[tokio::test] -async fn test_filter_sync_at_tip_edge_case() { - let config = ClientConfig::new(Network::Dash).with_storage_path(TempDir::new().unwrap().path()); - - let received_heights = Arc::new(Mutex::new(HashSet::new())); - let mut filter_sync = FilterSyncManager::new(&config, received_heights); - - let mut storage = DiskStorageManager::new(&config).await.expect("Failed to create tmp storage"); - let mut network = MockNetworkManager::new(); - - // Set up storage with headers and filter headers at the same height (tip) - const TIP_HEIGHT: u32 = 100; - let headers = BlockHeader::dummy_batch(0..TIP_HEIGHT + 1); - let filter_headers = FilterHeader::dummy_batch(0..TIP_HEIGHT + 1); - - storage.store_headers(&headers).await.unwrap(); - storage.store_filter_headers(&filter_headers).await.unwrap(); - - // Verify initial state - let tip_height = storage.get_tip_height().await.unwrap(); - let filter_tip_height = storage.get_filter_tip_height().await.unwrap().unwrap(); - assert_eq!(tip_height, TIP_HEIGHT); // 0-indexed - assert_eq!(filter_tip_height, TIP_HEIGHT); // 0-indexed - - // Try to start filter sync when already at tip - let result = filter_sync.start_sync_headers(&mut network, &mut storage).await; - assert!(result.is_ok()); - assert!(!result.unwrap(), "Should not start sync when already at tip"); - - // Verify no messages were sent - let sent_messages = network.sent_messages(); - assert_eq!(sent_messages.len(), 0, "Should not send any messages when at tip"); -} - -#[tokio::test] -async fn test_no_invalid_getcfheaders_at_tip() { - let config = ClientConfig::new(Network::Dash).with_storage_path(TempDir::new().unwrap().path()); - - let received_heights = Arc::new(Mutex::new(HashSet::new())); - let mut filter_sync = FilterSyncManager::new(&config, received_heights); - - let mut storage = DiskStorageManager::new(&config).await.expect("Failed to create tmp storage"); - let mut network = MockNetworkManager::new(); - - // Create a scenario where we're one filter header behind - // FilterHeader at TIP_HEIGHT is the one missing - const TIP_HEIGHT: u32 = 99; - let headers = BlockHeader::dummy_batch(0..TIP_HEIGHT + 1); - let filter_headers = FilterHeader::dummy_batch(0..TIP_HEIGHT); - - storage.store_headers(&headers).await.unwrap(); - storage.store_filter_headers(&filter_headers).await.unwrap(); - - // Start filter sync - let result = filter_sync.start_sync_headers(&mut network, &mut storage).await; - assert!(result.is_ok()); - assert!(result.unwrap(), "Should start sync when behind by 1 block"); - - // Check the sent message - let sent_messages = network.sent_messages(); - assert_eq!(sent_messages.len(), 1, "Should send exactly one message"); - - match &sent_messages[0] { - NetworkMessage::GetCFHeaders(get_cf_headers) => { - // The critical check: start_height must be <= height of stop_hash - assert_eq!( - get_cf_headers.start_height, TIP_HEIGHT, - "Start height should be {}", - TIP_HEIGHT - ); - // We can't easily verify the stop_hash height here, but the request should be valid - println!( - "GetCFHeaders request: start_height={}, stop_hash={}", - get_cf_headers.start_height, get_cf_headers.stop_hash - ); - } - _ => panic!("Expected GetCFHeaders message"), - } -} diff --git a/dash-spv/tests/filter_header_verification_test.rs b/dash-spv/tests/filter_header_verification_test.rs deleted file mode 100644 index 7b4ed6312..000000000 --- a/dash-spv/tests/filter_header_verification_test.rs +++ /dev/null @@ -1,591 +0,0 @@ -//! Test to replicate the filter header chain verification failure observed in production. -//! -//! This test reproduces the exact scenario from the logs where: -//! 1. A batch of 1999 filter headers from height 616001-617999 is processed successfully -//! 2. The next batch starting at height 618000 fails verification because the -//! previous_filter_header doesn't match what we calculated and stored -//! -//! The failure indicates a race condition or inconsistency in how filter headers -//! are calculated, stored, or verified across multiple batches. - -use dash_spv::test_utils::MockNetworkManager; -use dash_spv::{ - client::ClientConfig, - error::SyncError, - storage::{BlockHeaderStorage, DiskStorageManager, FilterHeaderStorage}, - sync::legacy::filters::FilterSyncManager, -}; -use dashcore::{ - block::Header as BlockHeader, - hash_types::{FilterHash, FilterHeader}, - network::message_filter::CFHeaders, - BlockHash, Network, -}; -use dashcore_hashes::{sha256d, Hash}; -use std::collections::HashSet; -use std::sync::Arc; -use tempfile::TempDir; -use tokio::sync::Mutex; - -/// Create test filter headers with proper chain linkage -fn create_test_cfheaders_message( - start_height: u32, - count: u32, - previous_filter_header: FilterHeader, - block_hashes: &[BlockHash], -) -> CFHeaders { - // Create fake filter hashes - let mut filter_hashes = Vec::new(); - for i in 0..count { - let height = start_height + i; - let hash_bytes = [(height % 256) as u8; 32]; - let sha256d_hash = sha256d::Hash::from_byte_array(hash_bytes); - let filter_hash = FilterHash::from_raw_hash(sha256d_hash); - filter_hashes.push(filter_hash); - } - - // Use the last block hash as stop_hash - let stop_hash = block_hashes.last().copied().unwrap_or(BlockHash::all_zeros()); - - CFHeaders { - filter_type: 0, - stop_hash, - previous_filter_header, - filter_hashes, - } -} - -/// Calculate what the filter header should be for a given height -fn calculate_expected_filter_header( - filter_hash: FilterHash, - prev_filter_header: FilterHeader, -) -> FilterHeader { - let mut data = [0u8; 64]; - data[..32].copy_from_slice(filter_hash.as_byte_array()); - data[32..].copy_from_slice(prev_filter_header.as_byte_array()); - FilterHeader::from_byte_array(sha256d::Hash::hash(&data).to_byte_array()) -} - -#[ignore = "mock implementation incomplete"] -#[tokio::test] -async fn test_filter_header_verification_failure_reproduction() { - let _ = env_logger::try_init(); - - println!("=== Testing Filter Header Chain Verification Failure ==="); - - // Create storage and sync manager - let config = ClientConfig::new(Network::Dash).with_storage_path(TempDir::new().unwrap().path()); - - let mut storage = DiskStorageManager::new(&config).await.expect("Failed to create tmp storage"); - let mut network = MockNetworkManager::new(); - - let config = ClientConfig::new(Network::Dash); - let received_heights = Arc::new(Mutex::new(HashSet::new())); - let mut filter_sync: FilterSyncManager = - FilterSyncManager::new(&config, received_heights); - - // Step 1: Store initial headers to simulate having a synced header chain - println!("Step 1: Setting up initial header chain..."); - let initial_headers = BlockHeader::dummy_batch(1000..5000); // Headers 1000-4999 - storage.store_headers(&initial_headers).await.expect("Failed to store initial headers"); - - let tip_height = storage.get_tip_height().await.unwrap(); - println!("Initial header chain stored: tip height = {}", tip_height); - assert_eq!(tip_height, 4999); - - // Step 2: Start filter sync first (required for message processing) - println!("\nStep 2: Starting filter header sync..."); - filter_sync.start_sync_headers(&mut network, &mut storage).await.expect("Failed to start sync"); - - // Step 3: Process first batch of filter headers successfully (1-1999, 1999 headers) - println!("\nStep 3: Processing first batch of filter headers (1-1999)..."); - - let first_batch_start = 1; - let first_batch_count = 1999; - let first_batch_end = first_batch_start + first_batch_count - 1; // 1999 - - // Create block hashes for the first batch - let mut first_batch_block_hashes = Vec::new(); - for height in first_batch_start..=first_batch_end { - let header = storage.get_header(height).await.unwrap().unwrap(); - first_batch_block_hashes.push(header.block_hash()); - } - - // Use a known previous filter header (simulating genesis or previous sync) - let mut initial_prev_bytes = [0u8; 32]; - initial_prev_bytes[0] = 0x57; - initial_prev_bytes[1] = 0x1c; - initial_prev_bytes[2] = 0x4e; - let initial_prev_filter_header = FilterHeader::from_byte_array(initial_prev_bytes); - - let first_cfheaders = create_test_cfheaders_message( - first_batch_start, - first_batch_count, - initial_prev_filter_header, - &first_batch_block_hashes, - ); - - // Process first batch - this should succeed - let result = filter_sync - .handle_cfheaders_message(first_cfheaders.clone(), &mut storage, &mut network) - .await; - - match result { - Ok(continuing) => { - println!("First batch processed successfully, continuing: {}", continuing) - } - Err(e) => panic!("First batch should have succeeded, but failed: {:?}", e), - } - - // Verify first batch was stored correctly - let filter_tip = storage.get_filter_tip_height().await.unwrap().unwrap(); - println!("Filter tip after first batch: {}", filter_tip); - assert_eq!(filter_tip, first_batch_end); - - // Get the last filter header from the first batch to see what we calculated - let last_stored_filter_header = storage - .get_filter_header(first_batch_end) - .await - .unwrap() - .expect("Last filter header should exist"); - - println!("Last stored filter header from first batch: {:?}", last_stored_filter_header); - - // Step 3: Calculate what the filter header should be for the last height - // This simulates what we actually calculated and stored - let last_filter_hash = first_cfheaders.filter_hashes.last().unwrap(); - let second_to_last_height = first_batch_end - 1; - let second_to_last_stored = storage - .get_filter_header(second_to_last_height) - .await - .unwrap() - .expect("Second to last filter header should exist"); - - let calculated_last_header = - calculate_expected_filter_header(*last_filter_hash, second_to_last_stored); - println!("Our calculated last header: {:?}", calculated_last_header); - println!("Actually stored last header: {:?}", last_stored_filter_header); - - // They should match - assert_eq!(calculated_last_header, last_stored_filter_header); - - // Step 4: Now create the second batch that will fail (2000-2999, 1000 headers) - println!("\nStep 4: Creating second batch that should fail (2000-2999)..."); - - let second_batch_start = 2000; - let second_batch_count = 1000; - let second_batch_end = second_batch_start + second_batch_count - 1; // 2999 - - // Create block hashes for the second batch - let mut second_batch_block_hashes = Vec::new(); - for height in second_batch_start..=second_batch_end { - let header = storage.get_header(height).await.unwrap().unwrap(); - second_batch_block_hashes.push(header.block_hash()); - } - - // Here's the key: use a DIFFERENT previous_filter_header that doesn't match what we stored - // This simulates the issue from the logs where the peer sends a different value - let mut wrong_prev_bytes = [0u8; 32]; - wrong_prev_bytes[0] = 0xef; - wrong_prev_bytes[1] = 0x07; - wrong_prev_bytes[2] = 0xce; - let wrong_prev_filter_header = FilterHeader::from_byte_array(wrong_prev_bytes); - - println!("Expected previous filter header: {:?}", last_stored_filter_header); - println!("Peer's claimed previous filter header: {:?}", wrong_prev_filter_header); - println!("These don't match - this should cause verification failure!"); - - let second_cfheaders = create_test_cfheaders_message( - second_batch_start, - second_batch_count, - wrong_prev_filter_header, // This is the wrong value! - &second_batch_block_hashes, - ); - - // Step 5: Process second batch - this should fail - println!("\nStep 5: Processing second batch (should fail)..."); - - let result = - filter_sync.handle_cfheaders_message(second_cfheaders, &mut storage, &mut network).await; - - match result { - Ok(_) => panic!("Second batch should have failed verification!"), - Err(SyncError::Validation(msg)) => { - println!("✅ Expected failure occurred: {}", msg); - assert!(msg.contains("Filter header chain verification failed")); - } - Err(e) => panic!("Wrong error type: {:?}", e), - } - - println!("\n✅ Successfully reproduced the filter header verification failure!"); - println!("The issue is that different peers (or overlapping requests) provide"); - println!("different values for previous_filter_header, breaking chain continuity."); -} - -#[ignore = "mock implementation incomplete"] -#[tokio::test] -async fn test_overlapping_batches_from_different_peers() { - let _ = env_logger::try_init(); - - println!("=== Testing Overlapping Batches from Different Peers ==="); - println!("🐛 BUG REPRODUCTION TEST - This test should FAIL to demonstrate the bug!"); - - // This test simulates the REAL production scenario that causes crashes: - // - Peer A sends heights 1000-2000 - // - Peer B sends heights 1500-2500 (overlapping!) - // Each peer provides different (but potentially valid) previous_filter_header values - // - // The system should handle this gracefully, but currently it crashes. - // This test will FAIL until we implement the fix. - - let config = ClientConfig::new(Network::Dash).with_storage_path(TempDir::new().unwrap().path()); - - let mut storage = DiskStorageManager::new(&config).await.expect("Failed to create tmp storage"); - let mut network = MockNetworkManager::new(); - - let config = ClientConfig::new(Network::Dash); - let received_heights = Arc::new(Mutex::new(HashSet::new())); - let mut filter_sync: FilterSyncManager = - FilterSyncManager::new(&config, received_heights); - - // Step 1: Set up headers for the full range we'll need - println!("Step 1: Setting up header chain (heights 1-3000)..."); - let initial_headers = BlockHeader::dummy_batch(1..3000); // Headers 1-2999 - storage.store_headers(&initial_headers).await.expect("Failed to store initial headers"); - - let tip_height = storage.get_tip_height().await.unwrap(); - println!("Header chain stored: tip height = {}", tip_height); - assert_eq!(tip_height, 2999); - - // Step 2: Start filter sync - println!("\nStep 2: Starting filter header sync..."); - filter_sync.start_sync_headers(&mut network, &mut storage).await.expect("Failed to start sync"); - - // Step 3: Process Peer A's batch first (heights 1000-2000, 1001 headers) - println!("\nStep 3: Processing Peer A's batch (heights 1000-2000)..."); - - // We need to first process headers 1-999 to get to height 1000 - println!(" First processing initial batch (heights 1-999) to establish chain..."); - let initial_batch_start = 1; - let initial_batch_count = 999; - let initial_batch_end = initial_batch_start + initial_batch_count - 1; // 999 - - let mut initial_batch_block_hashes = Vec::new(); - for height in initial_batch_start..=initial_batch_end { - let header = storage.get_header(height).await.unwrap().unwrap(); - initial_batch_block_hashes.push(header.block_hash()); - } - - let genesis_prev_filter_header = FilterHeader::from_byte_array([0x00u8; 32]); // Genesis - - let initial_cfheaders = create_test_cfheaders_message( - initial_batch_start, - initial_batch_count, - genesis_prev_filter_header, - &initial_batch_block_hashes, - ); - - filter_sync - .handle_cfheaders_message(initial_cfheaders, &mut storage, &mut network) - .await - .expect("Initial batch should succeed"); - - println!(" Initial batch processed. Now processing Peer A's batch..."); - - // Now Peer A's batch: heights 1000-2000 (1001 headers) - let peer_a_start = 1000; - let peer_a_count = 1001; - let peer_a_end = peer_a_start + peer_a_count - 1; // 2000 - - let mut peer_a_block_hashes = Vec::new(); - for height in peer_a_start..=peer_a_end { - let header = storage.get_header(height).await.unwrap().unwrap(); - peer_a_block_hashes.push(header.block_hash()); - } - - // Peer A's previous_filter_header should be the header at height 999 - let peer_a_prev_filter_header = storage - .get_filter_header(999) - .await - .unwrap() - .expect("Should have filter header at height 999"); - - let peer_a_cfheaders = create_test_cfheaders_message( - peer_a_start, - peer_a_count, - peer_a_prev_filter_header, - &peer_a_block_hashes, - ); - - // Process Peer A's batch - let result_a = - filter_sync.handle_cfheaders_message(peer_a_cfheaders, &mut storage, &mut network).await; - - match result_a { - Ok(_) => println!(" ✅ Peer A's batch processed successfully"), - Err(e) => panic!("Peer A's batch should have succeeded: {:?}", e), - } - - // Verify Peer A's data was stored - let filter_tip_after_a = storage.get_filter_tip_height().await.unwrap().unwrap(); - println!(" Filter tip after Peer A: {}", filter_tip_after_a); - assert_eq!(filter_tip_after_a, peer_a_end); - - // Step 4: Now process Peer B's overlapping batch (heights 1500-2500, 1001 headers) - println!("\nStep 4: Processing Peer B's OVERLAPPING batch (heights 1500-2500)..."); - println!(" This overlaps with Peer A's batch by 501 headers (1500-2000)!"); - - let peer_b_start = 1500; - let peer_b_count = 1001; - let peer_b_end = peer_b_start + peer_b_count - 1; // 2500 - - let mut peer_b_block_hashes = Vec::new(); - for height in peer_b_start..=peer_b_end { - let header = storage.get_header(height).await.unwrap().unwrap(); - peer_b_block_hashes.push(header.block_hash()); - } - - // HERE'S THE KEY: Peer B provides a different previous_filter_header - // Peer B thinks the previous header should be at height 1499, but Peer A - // already processed through height 2000, so our stored chain is different - - // Simulate Peer B having a different view: use the header at height 1499 - // but Peer B calculated it differently (simulating different peer state) - let peer_b_prev_filter_header_stored = storage - .get_filter_header(1499) - .await - .unwrap() - .expect("Should have filter header at height 1499"); - - // Simulate Peer B having computed this header differently - create a slightly different value - let mut peer_b_prev_bytes = peer_b_prev_filter_header_stored.to_byte_array(); - peer_b_prev_bytes[0] ^= 0x01; // Flip one bit to make it different - let peer_b_prev_filter_header = FilterHeader::from_byte_array(peer_b_prev_bytes); - - println!(" Peer A's stored header at 1499: {:?}", peer_b_prev_filter_header_stored); - println!(" Peer B's claimed header at 1499: {:?}", peer_b_prev_filter_header); - println!(" These are DIFFERENT - simulating different peer views!"); - - let peer_b_cfheaders = create_test_cfheaders_message( - peer_b_start, - peer_b_count, - peer_b_prev_filter_header, // Different from what we have stored! - &peer_b_block_hashes, - ); - - // Step 5: Process Peer B's overlapping batch - this should expose the issue - println!("\nStep 5: Processing Peer B's batch (should fail due to inconsistent previous_filter_header)..."); - - let result_b = - filter_sync.handle_cfheaders_message(peer_b_cfheaders, &mut storage, &mut network).await; - - match result_b { - Ok(_) => { - println!(" ✅ Peer B's batch was accepted - overlap handling worked!"); - let final_tip = storage.get_filter_tip_height().await.unwrap().unwrap(); - println!(" Final filter tip: {}", final_tip); - println!( - " 🎯 This is what we want - the system should be resilient to overlapping data!" - ); - } - Err(e) => { - println!(" ❌ Peer B's batch failed: {:?}", e); - println!(" 🐛 BUG EXPOSED: The system crashed when receiving overlapping batches from different peers!"); - println!(" This is the production issue we need to fix - the system should handle overlapping data gracefully."); - - // FAIL THE TEST to show the bug exists - panic!("🚨 BUG REPRODUCED: System cannot handle overlapping filter headers from different peers. Error: {:?}", e); - } - } - - println!("\n🎯 SUCCESS: The system correctly handled overlapping batches!"); - println!( - "The fix is working - peers with different filter header views are handled gracefully." - ); -} - -#[ignore = "mock implementation incomplete"] -#[tokio::test] -async fn test_filter_header_verification_overlapping_batches() { - let _ = env_logger::try_init(); - - println!("=== Testing Overlapping Filter Header Batches ==="); - - // This test simulates what happens when we receive overlapping filter header batches - // due to recovery/retry mechanisms or multiple peers - - let config = ClientConfig::new(Network::Dash).with_storage_path(TempDir::new().unwrap().path()); - - let mut storage = DiskStorageManager::new(&config).await.expect("Failed to create tmp storage"); - let mut network = MockNetworkManager::new(); - - let received_heights = Arc::new(Mutex::new(HashSet::new())); - let mut filter_sync: FilterSyncManager = - FilterSyncManager::new(&config, received_heights); - - // Set up initial headers - start from 1 for proper sync - let initial_headers = BlockHeader::dummy_batch(1..2000); - storage.store_headers(&initial_headers).await.expect("Failed to store initial headers"); - - // Start filter sync first (required for message processing) - filter_sync.start_sync_headers(&mut network, &mut storage).await.expect("Failed to start sync"); - - // First batch: 1-500 (500 headers) - let batch1_start = 1; - let batch1_count = 500; - let batch1_end = batch1_start + batch1_count - 1; - - let mut batch1_block_hashes = Vec::new(); - for height in batch1_start..=batch1_end { - let header = storage.get_header(height).await.unwrap().unwrap(); - batch1_block_hashes.push(header.block_hash()); - } - - let prev_filter_header = FilterHeader::from_byte_array([0x01u8; 32]); - - let batch1_cfheaders = create_test_cfheaders_message( - batch1_start, - batch1_count, - prev_filter_header, - &batch1_block_hashes, - ); - - // Process first batch - filter_sync - .handle_cfheaders_message(batch1_cfheaders, &mut storage, &mut network) - .await - .expect("First batch should succeed"); - - let filter_tip = storage.get_filter_tip_height().await.unwrap().unwrap(); - assert_eq!(filter_tip, batch1_end); - - // Second batch: Overlapping range 400-1000 (601 headers) - // This overlaps with the previous batch by 100 headers - let batch2_start = 400; - let batch2_count = 601; - let batch2_end = batch2_start + batch2_count - 1; - - let mut batch2_block_hashes = Vec::new(); - for height in batch2_start..=batch2_end { - let header = storage.get_header(height).await.unwrap().unwrap(); - batch2_block_hashes.push(header.block_hash()); - } - - // Get the correct previous filter header for this overlapping batch - let overlap_prev_height = batch2_start - 1; - let correct_prev_filter_header = storage - .get_filter_header(overlap_prev_height) - .await - .unwrap() - .expect("Previous filter header should exist"); - - let batch2_cfheaders = create_test_cfheaders_message( - batch2_start, - batch2_count, - correct_prev_filter_header, - &batch2_block_hashes, - ); - - // Process overlapping batch - this should handle overlap gracefully - let result = - filter_sync.handle_cfheaders_message(batch2_cfheaders, &mut storage, &mut network).await; - - match result { - Ok(_) => println!("✅ Overlapping batch handled successfully"), - Err(e) => println!("❌ Overlapping batch failed: {:?}", e), - } - - // The filter tip should now be at the end of the second batch - let final_filter_tip = storage.get_filter_tip_height().await.unwrap().unwrap(); - println!("Final filter tip: {}", final_filter_tip); - assert!(final_filter_tip >= batch1_end); // Should be at least as high as before -} - -#[ignore = "mock implementation incomplete"] -#[tokio::test] -async fn test_filter_header_verification_race_condition_simulation() { - let _ = env_logger::try_init(); - - println!("=== Testing Race Condition Simulation ==="); - - // This test simulates the race condition that might occur when multiple - // filter header requests are in flight simultaneously - - let config = ClientConfig::new(Network::Dash).with_storage_path(TempDir::new().unwrap().path()); - - let mut storage = DiskStorageManager::new(&config).await.expect("Failed to create tmp storage"); - let mut network = MockNetworkManager::new(); - - let received_heights = Arc::new(Mutex::new(HashSet::new())); - let mut filter_sync: FilterSyncManager = - FilterSyncManager::new(&config, received_heights); - - // Set up headers - need enough for batch B (up to height 3000) - let initial_headers = BlockHeader::dummy_batch(1..3001); - storage.store_headers(&initial_headers).await.expect("Failed to store initial headers"); - - // Simulate: Start sync, send request for batch A - filter_sync.start_sync_headers(&mut network, &mut storage).await.expect("Failed to start sync"); - - // Simulate: Timeout occurs, recovery sends request for overlapping batch B - // Both requests come back, but in wrong order or with inconsistent data - - let base_start = 1; - - // Batch A: 1-1000 (original request) - let batch_a_count = 1000; - let mut batch_a_block_hashes = Vec::new(); - for height in base_start..(base_start + batch_a_count) { - let header = storage.get_header(height).await.unwrap().unwrap(); - batch_a_block_hashes.push(header.block_hash()); - } - - // Batch B: 1-2000 (recovery request, larger range) - let batch_b_count = 2000; - let mut batch_b_block_hashes = Vec::new(); - for height in base_start..(base_start + batch_b_count) { - let header = storage.get_header(height).await.unwrap().unwrap(); - batch_b_block_hashes.push(header.block_hash()); - } - - let prev_filter_header = FilterHeader::from_byte_array([0x02u8; 32]); - - // Create both batches with the same previous filter header - let batch_a = create_test_cfheaders_message( - base_start, - batch_a_count, - prev_filter_header, - &batch_a_block_hashes, - ); - - let batch_b = create_test_cfheaders_message( - base_start, - batch_b_count, - prev_filter_header, - &batch_b_block_hashes, - ); - - // Process batch A first - println!("Processing batch A (1000 headers)..."); - filter_sync - .handle_cfheaders_message(batch_a, &mut storage, &mut network) - .await - .expect("Batch A should succeed"); - - let tip_after_a = storage.get_filter_tip_height().await.unwrap().unwrap(); - println!("Filter tip after batch A: {}", tip_after_a); - - // Now process batch B (overlapping) - println!("Processing batch B (2000 headers, overlapping)..."); - let result = filter_sync.handle_cfheaders_message(batch_b, &mut storage, &mut network).await; - - match result { - Ok(_) => { - let tip_after_b = storage.get_filter_tip_height().await.unwrap().unwrap(); - println!("✅ Batch B processed successfully, tip: {}", tip_after_b); - } - Err(e) => { - println!("❌ Batch B failed: {:?}", e); - } - } -} diff --git a/dash-spv/tests/header_sync_test.rs b/dash-spv/tests/header_sync_test.rs deleted file mode 100644 index dbe3c7c56..000000000 --- a/dash-spv/tests/header_sync_test.rs +++ /dev/null @@ -1,130 +0,0 @@ -//! Integration tests for header synchronization functionality. - -use dash_spv::{ - client::{ClientConfig, DashSpvClient}, - network::PeerNetworkManager, - storage::{BlockHeaderStorage, ChainStateStorage, DiskStorageManager}, - sync::legacy::{HeaderSyncManager, ReorgConfig}, - types::{ChainState, ValidationMode}, -}; -use dashcore::{block::Header as BlockHeader, block::Version, Network}; -use dashcore_hashes::Hash; -use key_wallet::wallet::managed_wallet_info::ManagedWalletInfo; -use key_wallet_manager::wallet_manager::WalletManager; -use log::info; -use std::sync::Arc; -use std::time::Duration; -use tempfile::TempDir; -use test_case::test_case; -use tokio::sync::RwLock; -use tokio::time::timeout; - -#[tokio::test] -async fn test_header_sync_with_client_integration() { - let _ = env_logger::try_init(); - let temp_dir = tempfile::TempDir::new().expect("Failed to create temporary directory"); - - // Test header sync integration with the full client - let config = ClientConfig::new(Network::Dash) - .with_storage_path(temp_dir.path().to_path_buf()) - .with_validation_mode(ValidationMode::Basic); - - // Create network manager - let network_manager = - PeerNetworkManager::new(&config).await.expect("Failed to create network manager"); - - // Create storage manager - let storage_manager = DiskStorageManager::new(&config).await.expect("Failed to create storage"); - - // Create wallet manager - let wallet = Arc::new(RwLock::new(WalletManager::::new(config.network))); - - let client = DashSpvClient::new(config, network_manager, storage_manager, wallet).await; - assert!(client.is_ok(), "Client creation should succeed"); - - let mut client = client.unwrap(); - - // Verify client starts with empty state - client.start().await.unwrap(); - - // Poll until the headers progress becomes available (async managers may not be ready immediately) - let result = timeout(Duration::from_secs(5), async { - loop { - let progress = client.sync_progress(); - if let Ok(headers) = progress.headers() { - return headers.current_height(); - } - tokio::time::sleep(Duration::from_millis(50)).await; - } - }) - .await - .expect("Timed out waiting for headers progress to become available"); - - assert_eq!(result, 0); - - info!("Header sync client integration test completed"); -} - -// Helper functions for creating test data - -fn create_test_header_chain(count: usize) -> Vec { - create_test_header_chain_from(0, count) -} - -fn create_test_header_chain_from(start: usize, count: usize) -> Vec { - let mut headers = Vec::new(); - - for i in start..(start + count) { - let header = BlockHeader { - version: Version::from_consensus(1), - prev_blockhash: if i == 0 { - dashcore::BlockHash::all_zeros() - } else { - // Create a deterministic previous hash based on height - dashcore::BlockHash::from_byte_array([(i - 1) as u8; 32]) - }, - merkle_root: dashcore::TxMerkleNode::from_byte_array([(i + 1) as u8; 32]), - time: 1234567890 + i as u32, // Sequential timestamps - bits: dashcore::CompactTarget::from_consensus(0x1d00ffff), // Standard difficulty - nonce: i as u32, // Sequential nonces - }; - headers.push(header); - } - - headers -} - -#[test_case(0, 1 ; "genesis_1_block")] -#[test_case(0, 70000 ; "genesis_70000_blocks")] -#[test_case(5000, 1 ; "checkpoint_1_block")] -#[test_case(1000, 70000 ; "checkpoint_70000_blocks")] -#[tokio::test] -async fn test_prepare_sync(sync_base_height: u32, header_count: usize) { - let temp_dir = TempDir::new().expect("Failed to create temp dir"); - let config = ClientConfig::regtest().with_storage_path(temp_dir.path()); - let mut storage = DiskStorageManager::new(&config).await.expect("Failed to create storage"); - - let headers = create_test_header_chain(header_count); - let expected_tip_hash = headers.last().unwrap().block_hash(); - - // Create and store chain state - let mut chain_state = ChainState::new_for_network(Network::Dash); - chain_state.sync_base_height = sync_base_height; - storage.store_chain_state(&chain_state).await.expect("Failed to store chain state"); - storage.store_headers(&headers).await.expect("Failed to store headers"); - - // Create HeaderSyncManager and load from storage - let config = ClientConfig::new(Network::Dash); - let chain_state_arc = Arc::new(RwLock::new(ChainState::new_for_network(Network::Dash))); - let mut header_sync = HeaderSyncManager::::new( - &config, - ReorgConfig::default(), - chain_state_arc.clone(), - ) - .expect("Failed to create HeaderSyncManager"); - - // Call prepare_sync and verify it returns the correct hash - let result = header_sync.prepare_sync(&mut storage).await; - let returned_hash = result.unwrap().unwrap(); - assert_eq!(returned_hash, expected_tip_hash, "prepare_sync should return the correct tip hash"); -} diff --git a/dash-spv/tests/smart_fetch_integration_test.rs b/dash-spv/tests/smart_fetch_integration_test.rs deleted file mode 100644 index 60d96853c..000000000 --- a/dash-spv/tests/smart_fetch_integration_test.rs +++ /dev/null @@ -1,235 +0,0 @@ -use dash_spv::client::ClientConfig; -use dashcore::network::message_sml::MnListDiff; -use dashcore::sml::llmq_type::network::NetworkLLMQExt; -use dashcore::sml::llmq_type::{DKGWindow, LLMQType}; -use dashcore::transaction::special_transaction::quorum_commitment::QuorumEntry; -use dashcore::{BlockHash, Network, Transaction}; -use dashcore_hashes::Hash; - -#[tokio::test] -async fn test_smart_fetch_basic_dkg_windows() { - let network = Network::Testnet; - - // Create test data for DKG windows - let windows = network.get_all_dkg_windows(1000, 1100); - - // Should have windows for different quorum types - assert!(!windows.is_empty()); - - // Each window should be within our range - for window_list in windows.values() { - for window in window_list { - // Mining window should overlap with our range - assert!(window.mining_end >= 1000 || window.mining_start <= 1100); - } - } -} - -#[tokio::test] -async fn test_smart_fetch_state_initialization() { - // Create a simple config for testing - let config = ClientConfig::new(Network::Testnet); - - // Test that we can create the sync manager - // Note: We can't access private fields, but we can verify the structure exists - // Need to specify generic types for MasternodeSyncManager - use dash_spv::network::PeerNetworkManager; - use dash_spv::storage::DiskStorageManager; - let _sync_manager = dash_spv::sync::legacy::masternodes::MasternodeSyncManager::< - DiskStorageManager, - PeerNetworkManager, - >::new(&config); - - // The state should be initialized when requesting diffs - // Note: We can't test the full flow without a network connection, - // but we've verified the structure compiles correctly -} - -#[tokio::test] -async fn test_window_action_transitions() { - // Test the window struct construction - let window = DKGWindow { - cycle_start: 1000, - mining_start: 1010, - mining_end: 1018, - llmq_type: LLMQType::Llmqtype50_60, - }; - - // Verify window properties - assert_eq!(window.cycle_start, 1000); - assert_eq!(window.mining_start, 1010); - assert_eq!(window.mining_end, 1018); - assert_eq!(window.llmq_type, LLMQType::Llmqtype50_60); -} - -#[tokio::test] -async fn test_dkg_fetch_state_management() { - let network = Network::Testnet; - let windows = network.get_all_dkg_windows(1000, 1200); - - // Verify we get windows for the network - assert!(!windows.is_empty(), "Should have DKG windows in range"); - - // Check that windows are properly organized by height - for (height, window_list) in &windows { - assert!(*height >= 1000 || window_list.iter().any(|w| w.mining_end >= 1000)); - assert!(*height <= 1200 || window_list.iter().any(|w| w.mining_start <= 1200)); - } -} - -#[tokio::test] -async fn test_smart_fetch_quorum_discovery() { - // Simulate a masternode diff with quorums - let diff = MnListDiff { - version: 1, - base_block_hash: BlockHash::all_zeros(), - block_hash: BlockHash::all_zeros(), - total_transactions: 0, - merkle_hashes: vec![], - merkle_flags: vec![], - coinbase_tx: Transaction { - version: 1, - lock_time: 0, - input: vec![], - output: vec![], - special_transaction_payload: None, - }, - deleted_masternodes: vec![], - new_masternodes: vec![], - deleted_quorums: vec![], - new_quorums: vec![{ - let llmq_type = LLMQType::Llmqtype50_60; - let quorum_size = llmq_type.size() as usize; - QuorumEntry { - version: 1, - llmq_type, - quorum_hash: dashcore::QuorumHash::all_zeros(), - quorum_index: None, - signers: vec![true; quorum_size], - valid_members: vec![true; quorum_size], - quorum_public_key: dashcore::bls_sig_utils::BLSPublicKey::from([0; 48]), - quorum_vvec_hash: dashcore::hash_types::QuorumVVecHash::all_zeros(), - threshold_sig: dashcore::bls_sig_utils::BLSSignature::from([0; 96]), - all_commitment_aggregated_signature: dashcore::bls_sig_utils::BLSSignature::from( - [0; 96], - ), - } - }], - quorums_chainlock_signatures: vec![], - }; - - // Verify quorum was found - assert_eq!(diff.new_quorums.len(), 1); - assert_eq!(diff.new_quorums[0].llmq_type, LLMQType::Llmqtype50_60); -} - -#[tokio::test] -async fn test_smart_fetch_efficiency_metrics() { - let network = Network::Testnet; - - // Calculate expected efficiency for a large range - let start = 0; - let end = 30000; - - // Without smart fetch: would request all 30,000 blocks - let blocks_without_smart_fetch = end - start; - - // With smart fetch: only request blocks in DKG windows - let windows = network.get_all_dkg_windows(start, end); - let mut blocks_with_smart_fetch = 0; - - for window_list in windows.values() { - for window in window_list { - // Count blocks in each mining window - let window_start = window.mining_start.max(start); - let window_end = window.mining_end.min(end); - if window_end >= window_start { - blocks_with_smart_fetch += (window_end - window_start + 1) as usize; - } - } - } - - // Calculate efficiency - let efficiency = 1.0 - (blocks_with_smart_fetch as f64 / blocks_without_smart_fetch as f64); - - println!("Smart fetch efficiency: {:.2}%", efficiency * 100.0); - println!("Blocks without smart fetch: {}", blocks_without_smart_fetch); - println!("Blocks with smart fetch: {}", blocks_with_smart_fetch); - println!("Blocks saved: {}", blocks_without_smart_fetch as usize - blocks_with_smart_fetch); - - // Should achieve significant reduction - // Note: Testnet may have different efficiency due to different LLMQ configurations - assert!( - efficiency > 0.50, - "Smart fetch should reduce requests by at least 50% (got {:.2}%)", - efficiency * 100.0 - ); -} - -#[tokio::test] -async fn test_smart_fetch_edge_cases() { - let network = Network::Testnet; - - // Test edge case: range smaller than one DKG interval - let windows = network.get_all_dkg_windows(100, 110); - - // Should still find relevant windows - let total_windows: usize = windows.values().map(|v| v.len()).sum(); - assert!(total_windows > 0, "Should find windows even for small ranges"); - - // Test edge case: range starting at DKG boundary - let windows = network.get_all_dkg_windows(120, 144); - for window_list in windows.values() { - for window in window_list { - // Verify window properties - assert!(window.cycle_start <= 144); - assert!(window.mining_end >= 120 || window.mining_start <= 144); - } - } -} - -#[tokio::test] -async fn test_smart_fetch_rotating_quorums() { - let _network = Network::Testnet; - - // Test with rotating quorum type (60_75) - let llmq = LLMQType::Llmqtype60_75; - let windows = llmq.get_dkg_windows_in_range(1000, 2000); - - // Verify rotating quorum window calculation - for window in &windows { - assert_eq!(window.llmq_type, llmq); - - // For rotating quorums, mining window start is different - let params = llmq.params(); - let expected_mining_start = window.cycle_start - + params.signing_active_quorum_count - + params.dkg_params.phase_blocks * 5; - assert_eq!(window.mining_start, expected_mining_start); - } -} - -#[tokio::test] -async fn test_smart_fetch_platform_activation() { - let network = Network::Dash; - - // Test before platform activation - let windows_before = network.get_all_dkg_windows(1_000_000, 1_000_100); - - // Should not include platform quorum (100_67) before activation - let has_platform_before = windows_before - .values() - .flat_map(|v| v.iter()) - .any(|w| w.llmq_type == LLMQType::Llmqtype100_67); - assert!(!has_platform_before, "Platform quorum should not be active before height 1,888,888"); - - // Test after platform activation - let windows_after = network.get_all_dkg_windows(1_888_900, 1_889_000); - - // Should include platform quorum after activation - let has_platform_after = windows_after - .values() - .flat_map(|v| v.iter()) - .any(|w| w.llmq_type == LLMQType::Llmqtype100_67); - assert!(has_platform_after, "Platform quorum should be active after height 1,888,888"); -} diff --git a/docs/implementation-notes/REORG_INTEGRATION_STATUS.md b/docs/implementation-notes/REORG_INTEGRATION_STATUS.md deleted file mode 100644 index 0ce3b6c81..000000000 --- a/docs/implementation-notes/REORG_INTEGRATION_STATUS.md +++ /dev/null @@ -1,65 +0,0 @@ -# Reorg and Checkpoint Integration Status - -## ✅ Successfully Integrated - -### 1. HeaderSyncManagerWithReorg Fully Integrated -- Replaced basic `HeaderSyncManager` with `HeaderSyncManagerWithReorg` throughout the codebase -- Updated both `SyncManager` and `SequentialSyncManager` to use the new implementation -- All existing APIs maintained for backward compatibility - -### 2. Key Integration Points -- **SyncManager**: Now uses `HeaderSyncManagerWithReorg` with default `ReorgConfig` -- **SequentialSyncManager**: Updated to use reorg-aware header sync -- **SyncAdapter**: Updated type signatures to expose `HeaderSyncManagerWithReorg` -- **MessageHandler**: Works seamlessly with the new implementation - -### 3. New Features Active -- **Fork Detection**: Automatically detects competing chains during sync -- **Reorg Handling**: Can perform chain reorganizations when a stronger fork is found -- **Checkpoint Validation**: Blocks at checkpoint heights are validated against known hashes -- **Checkpoint-based Sync**: Can start sync from last checkpoint for faster initial sync -- **Deep Reorg Protection**: Prevents reorganizations past checkpoint heights - -### 4. Configuration -Default `ReorgConfig` settings: -```rust -ReorgConfig { - max_reorg_depth: 1000, // Maximum 1000 block reorg - respect_chain_locks: true, // Honor chain locks (when implemented) - max_forks: 10, // Track up to 10 competing forks - enforce_checkpoints: true, // Enforce checkpoint validation -} -``` - -### 5. Test Results -- ✅ All 49 library tests passing -- ✅ Reorg tests (8/8) passing -- ✅ Checkpoint unit tests (3/3) passing -- ✅ Compilation successful with full integration - -## What This Means - -### Security Improvements -1. **Protection Against Deep Reorgs**: The library now rejects attempts to reorganize the chain past checkpoints -2. **Fork Awareness**: Multiple competing chains are tracked and evaluated -3. **Best Chain Selection**: Automatically switches to the chain with most work - -### Performance Improvements -1. **Checkpoint-based Fast Sync**: Can start from recent checkpoints instead of genesis -2. **Optimized Fork Handling**: Efficient tracking of multiple chain tips - -### Compatibility -- All existing code continues to work without modification -- The integration is transparent to users of the library -- Additional methods available for advanced use cases - -## Next Steps - -While reorg handling and checkpoints are now fully integrated, several critical features remain: - -1. **Chain Lock Validation** - Still needed for InstantSend security -2. **Persistent State** - Sync progress is lost on restart -3. **Peer Reputation** - No protection against malicious peers -4. **UTXO Rollback** - Wallet state not updated during reorgs - -The library is now significantly more secure against reorganization attacks, but still requires the remaining features for production use. diff --git a/docs/implementation-notes/SEQUENTIAL_SYNC_DESIGN.md b/docs/implementation-notes/SEQUENTIAL_SYNC_DESIGN.md deleted file mode 100644 index deb39fb35..000000000 --- a/docs/implementation-notes/SEQUENTIAL_SYNC_DESIGN.md +++ /dev/null @@ -1,440 +0,0 @@ -# Sequential Sync Design Document - -## Overview - -This document outlines the design for transforming dash-spv from an interleaved sync approach to a strict sequential sync pipeline. - -## State Machine Design - -### Core State Enum - -```rust -#[derive(Debug, Clone, PartialEq)] -pub enum SyncPhase { - /// Not syncing, waiting to start - Idle, - - /// Phase 1: Downloading headers - DownloadingHeaders { - start_time: Instant, - start_height: u32, - current_height: u32, - target_height: Option, - last_progress: Instant, - headers_per_second: f64, - }, - - /// Phase 2: Downloading masternode lists - DownloadingMnList { - start_time: Instant, - start_height: u32, - current_height: u32, - target_height: u32, - last_progress: Instant, - }, - - /// Phase 3: Downloading compact filter headers - DownloadingCFHeaders { - start_time: Instant, - start_height: u32, - current_height: u32, - target_height: u32, - last_progress: Instant, - cfheaders_per_second: f64, - }, - - /// Phase 4: Downloading compact filters - DownloadingFilters { - start_time: Instant, - requested_ranges: HashMap<(u32, u32), Instant>, - completed_heights: HashSet, - total_filters: u32, - last_progress: Instant, - }, - - /// Phase 5: Downloading full blocks - DownloadingBlocks { - start_time: Instant, - pending_blocks: VecDeque<(BlockHash, u32)>, - downloading: HashMap, - completed: Vec, - last_progress: Instant, - }, - - /// Fully synchronized - FullySynced { - sync_completed_at: Instant, - total_sync_time: Duration, - }, -} -``` - -### Phase Manager - -```rust -pub struct SequentialSyncManager { - /// Current sync phase - current_phase: SyncPhase, - - /// Phase-specific managers (existing, but controlled) - header_sync: HeaderSyncManager, - filter_sync: FilterSyncManager, - masternode_sync: MasternodeSyncManager, - - /// Configuration - config: ClientConfig, - - /// Phase transition history - phase_history: Vec, - - /// Phase-specific request queue - pending_requests: VecDeque, - - /// Active request tracking - active_requests: HashMap, -} - -#[derive(Debug)] -struct PhaseTransition { - from_phase: SyncPhase, - to_phase: SyncPhase, - timestamp: Instant, - reason: String, -} -``` - -## Phase Lifecycle - -### 1. Phase Entry -Each phase has strict entry conditions: - -```rust -impl SequentialSyncManager { - fn can_enter_phase(&self, phase: &SyncPhase) -> Result { - match phase { - SyncPhase::DownloadingHeaders { .. } => Ok(true), // Always can start - - SyncPhase::DownloadingMnList { .. } => { - // Headers must be 100% complete - self.are_headers_complete() - } - - SyncPhase::DownloadingCFHeaders { .. } => { - // Headers complete AND MnList complete (or disabled) - Ok(self.are_headers_complete()? && - (self.are_masternodes_complete()? || !self.config.enable_masternodes)) - } - - SyncPhase::DownloadingFilters { .. } => { - // CFHeaders must be 100% complete - self.are_cfheaders_complete() - } - - SyncPhase::DownloadingBlocks { .. } => { - // Filters complete (or no blocks needed) - Ok(self.are_filters_complete()? || self.no_blocks_needed()) - } - - _ => Ok(false), - } - } -} -``` - -### 2. Phase Execution -Each phase follows a standard pattern: - -```rust -async fn execute_current_phase(&mut self, network: &mut dyn NetworkManager, storage: &mut dyn StorageManager) -> Result { - match &self.current_phase { - SyncPhase::DownloadingHeaders { .. } => { - self.execute_headers_phase(network, storage).await - } - SyncPhase::DownloadingMnList { .. } => { - self.execute_mnlist_phase(network, storage).await - } - // ... etc - } -} - -enum PhaseAction { - Continue, // Keep working on current phase - TransitionTo(SyncPhase), // Move to next phase - Error(SyncError), // Handle error - Complete, // Sync fully complete -} -``` - -### 3. Phase Completion -Strict completion criteria for each phase: - -```rust -impl SequentialSyncManager { - async fn is_phase_complete(&self, storage: &dyn StorageManager) -> Result { - match &self.current_phase { - SyncPhase::DownloadingHeaders { current_height, .. } => { - // Headers complete when we receive empty headers response - // AND we've verified chain continuity - let tip = storage.get_tip_height().await?; - let peer_height = self.get_peer_reported_height().await?; - Ok(tip == Some(peer_height) && self.last_headers_response_was_empty()) - } - - SyncPhase::DownloadingCFHeaders { current_height, target_height, .. } => { - // Complete when current matches target exactly - Ok(current_height >= target_height) - } - - // ... etc - } - } -} -``` - -### 4. Phase Transition -Clean handoff between phases: - -```rust -async fn transition_to_next_phase(&mut self, storage: &mut dyn StorageManager) -> Result<()> { - let next_phase = match &self.current_phase { - SyncPhase::Idle => SyncPhase::DownloadingHeaders { /* ... */ }, - - SyncPhase::DownloadingHeaders { .. } => { - if self.config.enable_masternodes { - SyncPhase::DownloadingMnList { /* ... */ } - } else if self.config.enable_filters { - SyncPhase::DownloadingCFHeaders { /* ... */ } - } else { - SyncPhase::FullySynced { /* ... */ } - } - } - - // ... etc - }; - - // Log transition - info!("📊 Phase transition: {:?} -> {:?}", self.current_phase, next_phase); - - // Record history - self.phase_history.push(PhaseTransition { - from_phase: self.current_phase.clone(), - to_phase: next_phase.clone(), - timestamp: Instant::now(), - reason: "Phase completed successfully".to_string(), - }); - - // Clean up current phase - self.cleanup_current_phase().await?; - - // Initialize next phase - self.current_phase = next_phase; - self.initialize_current_phase().await?; - - Ok(()) -} -``` - -## Request Management - -### Request Control Flow - -```rust -impl SequentialSyncManager { - /// All requests must go through this method - pub async fn request(&mut self, request_type: RequestType, network: &mut dyn NetworkManager) -> Result<()> { - // Phase validation - if !self.is_request_allowed_in_phase(&request_type) { - debug!("Rejecting {:?} request in phase {:?}", request_type, self.current_phase); - return Err(SyncError::InvalidPhase); - } - - // Rate limiting - if !self.can_send_request(&request_type) { - self.pending_requests.push_back(NetworkRequest { - request_type, - queued_at: Instant::now(), - }); - return Ok(()); - } - - // Send request - self.send_request(request_type, network).await - } - - fn is_request_allowed_in_phase(&self, request_type: &RequestType) -> bool { - match (&self.current_phase, request_type) { - (SyncPhase::DownloadingHeaders { .. }, RequestType::GetHeaders(_)) => true, - (SyncPhase::DownloadingMnList { .. }, RequestType::GetMnListDiff(_)) => true, - (SyncPhase::DownloadingCFHeaders { .. }, RequestType::GetCFHeaders(_)) => true, - (SyncPhase::DownloadingFilters { .. }, RequestType::GetCFilters(_)) => true, - (SyncPhase::DownloadingBlocks { .. }, RequestType::GetBlock(_)) => true, - _ => false, - } - } -} -``` - -### Message Filtering - -```rust -impl SequentialSyncManager { - /// Filter incoming messages based on current phase - pub async fn handle_message(&mut self, msg: NetworkMessage, network: &mut dyn NetworkManager, storage: &mut dyn StorageManager) -> Result<()> { - // Check if message is expected in current phase - if !self.is_message_expected(&msg) { - debug!("Ignoring unexpected {:?} message in phase {:?}", msg, self.current_phase); - return Ok(()); - } - - // Route to appropriate handler - match (&mut self.current_phase, msg) { - (SyncPhase::DownloadingHeaders { .. }, NetworkMessage::Headers(headers)) => { - self.handle_headers_in_phase(headers, network, storage).await - } - (SyncPhase::DownloadingCFHeaders { .. }, NetworkMessage::CFHeaders(cfheaders)) => { - self.handle_cfheaders_in_phase(cfheaders, network, storage).await - } - // ... etc - _ => Ok(()), // Ignore messages for other phases - } - } -} -``` - -## Progress Tracking - -### Per-Phase Progress - -```rust -impl SyncPhase { - pub fn progress(&self) -> PhaseProgress { - match self { - SyncPhase::DownloadingHeaders { start_height, current_height, target_height, .. } => { - PhaseProgress { - phase_name: "Headers", - items_completed: current_height - start_height, - items_total: target_height.map(|t| t - start_height), - percentage: calculate_percentage(*start_height, *current_height, *target_height), - rate: self.calculate_rate(), - eta: self.calculate_eta(), - } - } - // ... etc - } - } -} -``` - -### Overall Progress - -```rust -pub struct OverallSyncProgress { - pub current_phase: String, - pub phase_progress: PhaseProgress, - pub phases_completed: Vec, - pub phases_remaining: Vec, - pub total_elapsed: Duration, - pub estimated_total_time: Option, -} -``` - -## Error Recovery - -### Phase-Specific Recovery - -```rust -impl SequentialSyncManager { - async fn handle_phase_error(&mut self, error: SyncError, network: &mut dyn NetworkManager, storage: &mut dyn StorageManager) -> Result<()> { - match &self.current_phase { - SyncPhase::DownloadingHeaders { .. } => { - // Retry from last known good header - let last_good = storage.get_tip_height().await?.unwrap_or(0); - self.restart_headers_from(last_good).await - } - - SyncPhase::DownloadingCFHeaders { current_height, .. } => { - // Retry from current_height (already validated) - self.restart_cfheaders_from(*current_height).await - } - - // ... etc - } - } -} -``` - -## Implementation Strategy - -### Step 1: Create New Module Structure -``` -src/sync/ -├── mod.rs # Keep existing -├── sequential/ -│ ├── mod.rs # New SequentialSyncManager -│ ├── phases.rs # Phase definitions and state machine -│ ├── transitions.rs # Phase transition logic -│ ├── progress.rs # Progress tracking -│ └── recovery.rs # Error recovery -``` - -### Step 2: Refactor Existing Managers -- Keep existing sync managers but make them phase-aware -- Add phase validation to their request methods -- Remove automatic interleaving behavior - -### Step 3: Integration Points -- Modify `client/mod.rs` to use SequentialSyncManager -- Update `client/message_handler.rs` to route through sequential manager -- Add phase information to monitoring and logging - -### Step 4: Migration Path -1. Add feature flag for sequential sync -2. Run both implementations in parallel for testing -3. Gradually migrate to sequential as default -4. Remove old interleaved code - -## Testing Strategy - -### Unit Tests -- Test each phase in isolation -- Test phase transitions -- Test error recovery -- Test progress calculation - -### Integration Tests -- Full sync from genesis with phase verification -- Interruption and resume testing -- Network failure recovery -- Performance benchmarks - -### Phase Boundary Tests -```rust -#[test] -async fn test_headers_must_complete_before_cfheaders() { - // Setup - let mut sync = create_test_sync_manager(); - - // Start headers sync - sync.start_sync().await.unwrap(); - assert_eq!(sync.current_phase(), SyncPhase::DownloadingHeaders { .. }); - - // Try to request cfheaders - should fail - let result = sync.request(RequestType::GetCFHeaders(..), network).await; - assert!(matches!(result, Err(SyncError::InvalidPhase))); - - // Complete headers - complete_headers_phase(&mut sync).await; - - // Now cfheaders should be allowed - let result = sync.request(RequestType::GetCFHeaders(..), network).await; - assert!(result.is_ok()); -} -``` - -## Benefits - -1. **Clarity**: Single active phase, clear state machine -2. **Reliability**: No race conditions or dependency issues -3. **Debuggability**: Phase transitions clearly logged -4. **Performance**: Better request batching within phases -5. **Maintainability**: Easier to reason about and extend diff --git a/docs/implementation-notes/SEQUENTIAL_SYNC_SUMMARY.md b/docs/implementation-notes/SEQUENTIAL_SYNC_SUMMARY.md deleted file mode 100644 index fb76634c4..000000000 --- a/docs/implementation-notes/SEQUENTIAL_SYNC_SUMMARY.md +++ /dev/null @@ -1,180 +0,0 @@ -# Sequential Sync Implementation Summary - -## Overview - -I have successfully implemented a sequential synchronization manager for dash-spv that enforces strict phase ordering, preventing the race conditions and complexity issues caused by interleaved downloads. - -## What Was Implemented - -### 1. Core Architecture (`src/sync/sequential/`) - -#### Phase State Machine (`phases.rs`) -- **SyncPhase enum**: Defines all synchronization phases with detailed state tracking - - Idle - - DownloadingHeaders - - DownloadingMnList - - DownloadingCFHeaders - - DownloadingFilters - - DownloadingBlocks - - FullySynced - -- Each phase tracks: - - Start time and last progress time - - Current progress metrics (items completed, rates) - - Phase-specific state (e.g., received_empty_response for headers) - -#### Sequential Sync Manager (`mod.rs`) -- **SequentialSyncManager**: Main coordinator that ensures phases complete sequentially -- Wraps existing sync managers (HeaderSyncManager, FilterSyncManager, MasternodeSyncManager) -- Key features: - - Phase-aware message routing - - Automatic phase transitions on completion - - Timeout detection and recovery - - Progress tracking across all phases - -#### Phase Transitions (`transitions.rs`) -- **TransitionManager**: Validates and manages phase transitions -- Enforces strict dependencies: - - Headers must complete before MnList/CFHeaders - - MnList must complete before CFHeaders (if enabled) - - CFHeaders must complete before Filters - - Filters must complete before Blocks -- Creates detailed transition history for debugging - -#### Request Control (`request_control.rs`) -- **RequestController**: Phase-aware request management -- Features: - - Validates requests match current phase - - Rate limiting per phase - - Request queuing and batching - - Concurrent request limits -- Prevents out-of-phase requests from being sent - -#### Progress Tracking (`progress.rs`) -- **ProgressTracker**: Comprehensive progress monitoring -- Tracks: - - Per-phase progress (items, percentage, rate, ETA) - - Overall sync progress across all phases - - Phase completion history - - Time estimates - -#### Error Recovery (`recovery.rs`) -- **RecoveryManager**: Smart error recovery strategies -- Recovery strategies: - - Retry with exponential backoff - - Restart phase from checkpoint - - Switch to different peer - - Wait for network connectivity -- Phase-specific recovery logic - -## Key Benefits - -### 1. **No Race Conditions** -- Each phase completes 100% before the next begins -- No interleaving of different data types -- Clear dependencies are enforced - -### 2. **Simplified State Management** -- Single active phase at any time -- Clear state machine with well-defined transitions -- Easy to reason about system state - -### 3. **Better Error Recovery** -- Phase-specific recovery strategies -- Can restart from last known good state -- Prevents cascading failures - -### 4. **Improved Debugging** -- Phase transition logging -- Detailed progress tracking -- Clear error messages with phase context - -### 5. **Performance Optimization** -- Better request batching within phases -- Reduced network overhead -- More efficient resource usage - -## Current Status - -✅ **Implemented**: -- Complete phase state machine -- Sequential sync manager with phase enforcement -- Phase transition logic with validation -- Request filtering and control -- Progress tracking and reporting -- Error recovery framework -- Integration with existing sync managers - -⚠️ **TODO**: -- Integration with DashSpvClient -- Comprehensive test suite -- Performance benchmarking -- Documentation updates - -## Usage Example - -```rust -// Create sequential sync manager -let mut seq_sync = SequentialSyncManager::new(&config, received_filter_heights); - -// Start sync process -seq_sync.start_sync(&mut network, &mut storage).await?; - -// Handle incoming messages -match message { - NetworkMessage::Headers(headers) => { - seq_sync.handle_message(message, &mut network, &mut storage).await?; - } - // ... other message types -} - -// Check for timeouts periodically -seq_sync.check_timeout(&mut network, &mut storage).await?; - -// Get progress -let progress = seq_sync.get_progress(); -println!("Current phase: {}", progress.current_phase); -``` - -## Phase Flow Example - -``` -[Idle] - ↓ -[Downloading Headers] - - Request headers from genesis/checkpoint - - Process batches of 2000 headers - - Complete when empty response received - ↓ -[Downloading MnList] (if enabled) - - Request masternode list diffs - - Process incrementally - - Complete when caught up to header tip - ↓ -[Downloading CFHeaders] (if filters enabled) - - Request filter headers in batches - - Validate against block headers - - Complete when caught up to header tip - ↓ -[Downloading Filters] - - Request filters for watched addresses - - Check for matches - - Complete when all needed filters downloaded - ↓ -[Downloading Blocks] - - Request full blocks for filter matches - - Process transactions - - Complete when all blocks downloaded - ↓ -[Fully Synced] -``` - -## Next Steps - -1. **Integration**: Wire up SequentialSyncManager in DashSpvClient -2. **Testing**: Create comprehensive test suite for phase transitions -3. **Migration**: Add feature flag to switch between interleaved and sequential -4. **Optimization**: Fine-tune batch sizes and timeouts per phase -5. **Documentation**: Update API docs and examples - -The sequential sync implementation provides a solid foundation for reliable, predictable synchronization in dash-spv.