From d31da81d30138f780b0965c9f59f9700c4a07d2a Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Mon, 22 Dec 2025 22:01:43 +0000 Subject: [PATCH 01/21] traits created --- dash-spv/src/storage/manager.rs | 142 ------------ dash-spv/src/storage/mod.rs | 375 +++++++++++++++++++++++--------- dash-spv/src/storage/state.rs | 165 -------------- 3 files changed, 271 insertions(+), 411 deletions(-) delete mode 100644 dash-spv/src/storage/manager.rs diff --git a/dash-spv/src/storage/manager.rs b/dash-spv/src/storage/manager.rs deleted file mode 100644 index 96cb266a..00000000 --- a/dash-spv/src/storage/manager.rs +++ /dev/null @@ -1,142 +0,0 @@ -//! Core DiskStorageManager struct and background worker implementation. - -use std::collections::HashMap; -use std::path::PathBuf; -use std::sync::Arc; -use std::time::Duration; -use tokio::sync::RwLock; - -use dashcore::{block::Header as BlockHeader, hash_types::FilterHeader, BlockHash, Txid}; - -use crate::error::{StorageError, StorageResult}; -use crate::storage::headers::load_block_index; -use crate::storage::segments::SegmentCache; -use crate::types::{MempoolState, UnconfirmedTransaction}; - -use super::lockfile::LockFile; - -/// Disk-based storage manager with segmented files and async background saving. -pub struct DiskStorageManager { - pub(super) base_path: PathBuf, - - // Segmented header storage - pub(super) block_headers: Arc>>, - pub(super) filter_headers: Arc>>, - pub(super) filters: Arc>>>, - - // Reverse index for O(1) lookups - pub(super) header_hash_index: Arc>>, - - // Background worker - pub(super) worker_handle: Option>, - - // Mempool storage - pub(super) mempool_transactions: Arc>>, - pub(super) mempool_state: Arc>>, - - // Lock file to prevent concurrent access from multiple processes. - _lock_file: LockFile, -} - -impl DiskStorageManager { - pub async fn new(base_path: PathBuf) -> StorageResult { - use std::fs; - - // Create directories if they don't exist - fs::create_dir_all(&base_path) - .map_err(|e| StorageError::WriteFailed(format!("Failed to create directory: {}", e)))?; - - // Acquire exclusive lock on the data directory - let lock_file = LockFile::new(base_path.join(".lock"))?; - - let headers_dir = base_path.join("headers"); - let filters_dir = base_path.join("filters"); - let state_dir = base_path.join("state"); - - fs::create_dir_all(&headers_dir).map_err(|e| { - StorageError::WriteFailed(format!("Failed to create headers directory: {}", e)) - })?; - fs::create_dir_all(&filters_dir).map_err(|e| { - StorageError::WriteFailed(format!("Failed to create filters directory: {}", e)) - })?; - fs::create_dir_all(&state_dir).map_err(|e| { - StorageError::WriteFailed(format!("Failed to create state directory: {}", e)) - })?; - - let mut storage = Self { - base_path: base_path.clone(), - block_headers: Arc::new(RwLock::new( - SegmentCache::load_or_new(base_path.clone()).await?, - )), - filter_headers: Arc::new(RwLock::new( - SegmentCache::load_or_new(base_path.clone()).await?, - )), - filters: Arc::new(RwLock::new(SegmentCache::load_or_new(base_path.clone()).await?)), - header_hash_index: Arc::new(RwLock::new(HashMap::new())), - worker_handle: None, - mempool_transactions: Arc::new(RwLock::new(HashMap::new())), - mempool_state: Arc::new(RwLock::new(None)), - _lock_file: lock_file, - }; - - // Load chain state to get sync_base_height - if let Ok(Some(state)) = storage.load_chain_state().await { - tracing::debug!("Loaded sync_base_height: {}", state.sync_base_height); - } - - // Start background worker that - // persists data when appropriate - storage.start_worker().await; - - // Rebuild index - let block_index = match load_block_index(&storage).await { - Ok(index) => index, - Err(e) => { - tracing::error!( - "An unexpected IO or deserialization error didn't allow the block index to be built: {}", - e - ); - HashMap::new() - } - }; - storage.header_hash_index = Arc::new(RwLock::new(block_index)); - - Ok(storage) - } - - #[cfg(test)] - pub async fn with_temp_dir() -> StorageResult { - use tempfile::TempDir; - - let temp_dir = TempDir::new()?; - Self::new(temp_dir.path().into()).await - } - - /// Start the background worker - pub(super) async fn start_worker(&mut self) { - let block_headers = Arc::clone(&self.block_headers); - let filter_headers = Arc::clone(&self.filter_headers); - let filters = Arc::clone(&self.filters); - - let worker_handle = tokio::spawn(async move { - let mut ticker = tokio::time::interval(Duration::from_secs(5)); - - loop { - ticker.tick().await; - - block_headers.write().await.persist_evicted().await; - filter_headers.write().await.persist_evicted().await; - filters.write().await.persist_evicted().await; - } - }); - - self.worker_handle = Some(worker_handle); - } - - /// Stop the background worker without forcing a save. - pub(super) fn stop_worker(&mut self) { - if let Some(handle) = self.worker_handle.take() { - handle.abort(); - } - } -} diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index baae5862..d954c5c6 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -6,7 +6,6 @@ pub mod types; mod headers; mod lockfile; -mod manager; mod segments; mod state; @@ -19,58 +18,237 @@ use dashcore::{block::Header as BlockHeader, hash_types::FilterHeader, Txid}; use crate::error::StorageResult; use crate::types::{ChainState, MempoolState, UnconfirmedTransaction}; -pub use manager::DiskStorageManager; pub use types::*; -/// Storage manager trait for abstracting data persistence. -/// -/// # Thread Safety -/// -/// This trait requires `Send + Sync` bounds to ensure thread safety, but uses `&mut self` -/// for mutation methods. This design choice provides several benefits: -/// -/// 1. **Simplified Implementation**: Storage backends don't need to implement interior -/// mutability patterns (like `Arc>` or `RwLock`) internally. -/// -/// 2. **Performance**: Avoids unnecessary locking overhead when the storage manager -/// is already protected by external synchronization. -/// -/// 3. **Flexibility**: Callers can choose the appropriate synchronization strategy -/// based on their specific use case (e.g., single-threaded, mutex-protected, etc.). -/// -/// ## Usage Pattern -/// -/// The typical usage pattern wraps the storage manager in an `Arc>` or similar: -/// -/// ```rust,no_run -/// # use std::sync::Arc; -/// # use tokio::sync::Mutex; -/// # use dash_spv::storage::DiskStorageManager; -/// # use dashcore::blockdata::block::Header as BlockHeader; -/// # -/// # async fn example() -> Result<(), Box> { -/// let storage: Arc> = Arc::new(Mutex::new(DiskStorageManager::new("./.tmp/example-storage".into()).await?)); -/// let headers: Vec = vec![]; // Your headers here -/// -/// // In async context: -/// let mut guard = storage.lock().await; -/// guard.store_headers(&headers).await?; -/// # Ok(()) -/// # } -/// ``` -/// -/// ## Implementation Requirements -/// -/// Implementations must ensure that: -/// - All operations are atomic at the logical level (e.g., all headers in a batch succeed or fail together) -/// - Read operations are consistent (no partial reads of in-progress writes) -/// - The implementation is safe to move between threads (`Send`) -/// - The implementation can be referenced from multiple threads (`Sync`) -/// -/// Note that the `&mut self` requirement means only one thread can be mutating the storage -/// at a time when using external synchronization, which naturally provides consistency. #[async_trait] -pub trait StorageManager: Send + Sync + 'static { +pub trait StorageManager: + BlockHeaderStorage + + FilterHeaderStorage + + FilterStorage + + TransactionStorage + + MempoolStateStorage + + MetadataStorage + + ChainStateStorage + + MasternodeStateStorage + + Send + + Sync +{ +} + +/// Disk-based storage manager with segmented files and async background saving. +pub struct DiskStorageManager { + pub(super) base_path: PathBuf, + + // Segmented header storage + pub(super) block_headers: Arc>>, + pub(super) filter_headers: Arc>>, + pub(super) filters: Arc>>>, + + // Reverse index for O(1) lookups + pub(super) header_hash_index: Arc>>, + + // Background worker + pub(super) worker_tx: Option>, + pub(super) worker_handle: Option>, + + // Index save tracking to avoid redundant saves + pub(super) last_index_save_count: Arc>, + + // Mempool storage + pub(super) mempool_transactions: Arc>>, + pub(super) mempool_state: Arc>>, + + // Lock file to prevent concurrent access from multiple processes. + _lock_file: LockFile, +} + +impl DiskStorageManager { + pub async fn new(base_path: PathBuf) -> StorageResult { + use std::fs; + + // Create directories if they don't exist + fs::create_dir_all(&base_path) + .map_err(|e| StorageError::WriteFailed(format!("Failed to create directory: {}", e)))?; + + // Acquire exclusive lock on the data directory + let lock_file = LockFile::new(base_path.join(".lock"))?; + + let headers_dir = base_path.join("headers"); + let filters_dir = base_path.join("filters"); + let state_dir = base_path.join("state"); + + fs::create_dir_all(&headers_dir).map_err(|e| { + StorageError::WriteFailed(format!("Failed to create headers directory: {}", e)) + })?; + fs::create_dir_all(&filters_dir).map_err(|e| { + StorageError::WriteFailed(format!("Failed to create filters directory: {}", e)) + })?; + fs::create_dir_all(&state_dir).map_err(|e| { + StorageError::WriteFailed(format!("Failed to create state directory: {}", e)) + })?; + + let mut storage = Self { + base_path: base_path.clone(), + block_headers: Arc::new(RwLock::new( + SegmentCache::load_or_new(base_path.clone()).await?, + )), + filter_headers: Arc::new(RwLock::new( + SegmentCache::load_or_new(base_path.clone()).await?, + )), + filters: Arc::new(RwLock::new(SegmentCache::load_or_new(base_path.clone()).await?)), + header_hash_index: Arc::new(RwLock::new(HashMap::new())), + worker_tx: None, + worker_handle: None, + last_index_save_count: Arc::new(RwLock::new(0)), + mempool_transactions: Arc::new(RwLock::new(HashMap::new())), + mempool_state: Arc::new(RwLock::new(None)), + _lock_file: lock_file, + }; + + // Load chain state to get sync_base_height + if let Ok(Some(state)) = storage.load_chain_state().await { + tracing::debug!("Loaded sync_base_height: {}", state.sync_base_height); + } + + // Start background worker + storage.start_worker().await; + + // Rebuild index + let block_index = match load_block_index(&storage).await { + Ok(index) => index, + Err(e) => { + tracing::error!( + "An unexpected IO or deserialization error didn't allow the block index to be built: {}", + e + ); + HashMap::new() + } + }; + storage.header_hash_index = Arc::new(RwLock::new(block_index)); + + Ok(storage) + } + + #[cfg(test)] + pub async fn with_temp_dir() -> StorageResult { + use tempfile::TempDir; + + let temp_dir = TempDir::new()?; + Self::new(temp_dir.path().into()).await + } + + /// Start the background worker + pub(super) async fn start_worker(&mut self) { + let block_headers = Arc::clone(&self.block_headers); + let filter_headers = Arc::clone(&self.filter_headers); + let filters = Arc::clone(&self.filters); + + let worker_handle = tokio::spawn(async move { + let mut ticker = tokio::time::interval(Duration::from_secs(5)); + + loop { + ticker.tick().await; + + block_headers.write().await.persist_evicted().await; + filter_headers.write().await.persist_evicted().await; + filters.write().await.persist_evicted().await; + } + }); + + self.worker_handle = Some(worker_handle); + } + + /// Stop the background worker without forcing a save. + pub(super) fn stop_worker(&mut self) { + if let Some(handle) = self.worker_handle.take() { + handle.abort(); + } + } + + /// Clear all filter headers and compact filters. + pub(super) async fn clear_filters(&mut self) -> StorageResult<()> { + // Stop worker to prevent concurrent writes to filter directories + self.stop_worker().await; + + // Clear in-memory and on-disk filter headers segments + self.filter_headers.write().await.clear_all().await?; + self.filters.write().await.clear_all().await?; + + // Restart background worker for future operations + self.start_worker().await; + + Ok(()) + } + + /// Clear all storage. + pub async fn clear(&mut self) -> StorageResult<()> { + // First, stop the background worker to avoid races with file deletion + self.stop_worker(); + + // Clear in-memory state + self.block_headers.write().await.clear_in_memory(); + self.filter_headers.write().await.clear_in_memory(); + self.filters.write().await.clear_in_memory(); + + self.header_hash_index.write().await.clear(); + self.mempool_transactions.write().await.clear(); + *self.mempool_state.write().await = None; + + // Remove all files and directories under base_path + if self.base_path.exists() { + // Best-effort removal; if concurrent files appear, retry once + match tokio::fs::remove_dir_all(&self.base_path).await { + Ok(_) => {} + Err(e) => { + // Retry once after a short delay to handle transient races + if e.kind() == std::io::ErrorKind::Other + || e.kind() == std::io::ErrorKind::DirectoryNotEmpty + { + tokio::time::sleep(std::time::Duration::from_millis(50)).await; + tokio::fs::remove_dir_all(&self.base_path).await?; + } else { + return Err(crate::error::StorageError::Io(e)); + } + } + } + tokio::fs::create_dir_all(&self.base_path).await?; + } + + // Recreate expected subdirectories + tokio::fs::create_dir_all(self.base_path.join("headers")).await?; + tokio::fs::create_dir_all(self.base_path.join("filters")).await?; + tokio::fs::create_dir_all(self.base_path.join("state")).await?; + + // Restart the background worker for future operations + self.start_worker().await; + + Ok(()) + } + + /// Shutdown the storage manager. + pub async fn shutdown(&mut self) { + self.stop_worker(); + + // Persist all dirty data + self.save_dirty().await; + } + + /// Save all dirty data. + pub(super) async fn save_dirty(&self) { + self.filter_headers.write().await.persist().await; + self.block_headers.write().await.persist().await; + self.filters.write().await.persist().await; + + let path = self.base_path.join("headers/index.dat"); + let index = self.header_hash_index.read().await; + if let Err(e) = save_index_to_disk(&path, &index).await { + tracing::error!("Failed to persist header index: {}", e); + } + } +} + +#[async_trait] +pub trait BlockHeaderStorage { /// Store block headers. async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()>; @@ -87,6 +265,15 @@ pub trait StorageManager: Send + Sync + 'static { async fn get_stored_headers_len(&self) -> u32; + /// Get header height by block hash (reverse lookup). + async fn get_header_height_by_hash( + &self, + hash: &dashcore::BlockHash, + ) -> StorageResult>; +} + +#[async_trait] +pub trait FilterHeaderStorage { /// Store filter headers. async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()>; @@ -98,63 +285,19 @@ pub trait StorageManager: Send + Sync + 'static { /// Get the current filter tip blockchain height. async fn get_filter_tip_height(&self) -> StorageResult>; +} - /// Store masternode state. - async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()>; - - /// Load masternode state. - async fn load_masternode_state(&self) -> StorageResult>; - - /// Store chain state. - async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()>; - - /// Load chain state. - async fn load_chain_state(&self) -> StorageResult>; - +#[async_trait] +pub trait FilterStorage { /// Store a compact filter at a blockchain height. async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()>; /// Load compact filters in the given blockchain height range. async fn load_filters(&self, range: Range) -> StorageResult>>; +} - /// Store metadata. - async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()>; - - /// Load metadata. - async fn load_metadata(&self, key: &str) -> StorageResult>>; - - /// Clear all data. - async fn clear(&mut self) -> StorageResult<()>; - - /// Clear all filter headers and compact filters. - async fn clear_filters(&mut self) -> StorageResult<()>; - - /// Get header height by block hash (reverse lookup). - async fn get_header_height_by_hash( - &self, - hash: &dashcore::BlockHash, - ) -> StorageResult>; - - // UTXO methods removed - handled by external wallet - - /// Store a chain lock. - async fn store_chain_lock( - &mut self, - height: u32, - chain_lock: &dashcore::ChainLock, - ) -> StorageResult<()>; - - /// Load a chain lock by height. - async fn load_chain_lock(&self, height: u32) -> StorageResult>; - - /// Get chain locks in a height range. - async fn get_chain_locks( - &self, - start_height: u32, - end_height: u32, - ) -> StorageResult>; - - // Mempool storage methods +#[async_trait] +pub trait TransactionStorage { /// Store an unconfirmed transaction. async fn store_mempool_transaction( &mut self, @@ -175,16 +318,40 @@ pub trait StorageManager: Send + Sync + 'static { async fn get_all_mempool_transactions( &self, ) -> StorageResult>; +} +#[async_trait] +pub trait MempoolStateStorage { /// Store the complete mempool state. async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()>; /// Load the mempool state. async fn load_mempool_state(&self) -> StorageResult>; +} - /// Clear all mempool data. - async fn clear_mempool(&mut self) -> StorageResult<()>; +#[async_trait] +pub trait MetadataStorage { + /// Store metadata. + async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()>; + + /// Load metadata. + async fn load_metadata(&self, key: &str) -> StorageResult>>; +} + +#[async_trait] +pub trait ChainStateStorage { + /// Store chain state. + async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()>; - /// Shutdown the storage manager - async fn shutdown(&mut self) -> StorageResult<()>; + /// Load chain state. + async fn load_chain_state(&self) -> StorageResult>; +} + +#[async_trait] +pub trait MasternodeStateStorage { + /// Store masternode state. + async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()>; + + /// Load masternode state. + async fn load_masternode_state(&self) -> StorageResult>; } diff --git a/dash-spv/src/storage/state.rs b/dash-spv/src/storage/state.rs index 31f5fdda..b46fc9bc 100644 --- a/dash-spv/src/storage/state.rs +++ b/dash-spv/src/storage/state.rs @@ -104,84 +104,6 @@ impl DiskStorageManager { Ok(Some(state)) } - /// Store a ChainLock. - pub async fn store_chain_lock( - &mut self, - height: u32, - chain_lock: &dashcore::ChainLock, - ) -> StorageResult<()> { - let path = self.base_path.join("chainlocks").join(format!("chainlock_{:08}.bin", height)); - let data = bincode::serialize(chain_lock).map_err(|e| { - crate::error::StorageError::WriteFailed(format!( - "Failed to serialize chain lock: {}", - e - )) - })?; - - atomic_write(&path, &data).await?; - tracing::debug!("Stored chain lock at height {}", height); - Ok(()) - } - - /// Load a ChainLock. - pub async fn load_chain_lock(&self, height: u32) -> StorageResult> { - let path = self.base_path.join("chainlocks").join(format!("chainlock_{:08}.bin", height)); - - if !path.exists() { - return Ok(None); - } - - let data = tokio::fs::read(&path).await?; - let chain_lock = bincode::deserialize(&data).map_err(|e| { - crate::error::StorageError::ReadFailed(format!( - "Failed to deserialize chain lock: {}", - e - )) - })?; - - Ok(Some(chain_lock)) - } - - /// Get ChainLocks in a height range. - pub async fn get_chain_locks( - &self, - start_height: u32, - end_height: u32, - ) -> StorageResult> { - let chainlocks_dir = self.base_path.join("chainlocks"); - - if !chainlocks_dir.exists() { - return Ok(Vec::new()); - } - - let mut chain_locks = Vec::new(); - let mut entries = tokio::fs::read_dir(&chainlocks_dir).await?; - - while let Some(entry) = entries.next_entry().await? { - let file_name = entry.file_name(); - let file_name_str = file_name.to_string_lossy(); - - // Parse height from filename - if let Some(height_str) = - file_name_str.strip_prefix("chainlock_").and_then(|s| s.strip_suffix(".bin")) - { - if let Ok(height) = height_str.parse::() { - if height >= start_height && height <= end_height { - let path = entry.path(); - let data = tokio::fs::read(&path).await?; - if let Ok(chain_lock) = bincode::deserialize(&data) { - chain_locks.push((height, chain_lock)); - } - } - } - } - } - - // Sort by height - chain_locks.sort_by_key(|(h, _)| *h); - Ok(chain_locks) - } - /// Store metadata. pub async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()> { let path = self.base_path.join(format!("state/{}.dat", key)); @@ -199,72 +121,6 @@ impl DiskStorageManager { let data = tokio::fs::read(path).await?; Ok(Some(data)) } - - /// Clear all storage. - pub async fn clear(&mut self) -> StorageResult<()> { - // First, stop the background worker to avoid races with file deletion - self.stop_worker(); - - // Clear in-memory state - self.block_headers.write().await.clear_in_memory(); - self.filter_headers.write().await.clear_in_memory(); - self.filters.write().await.clear_in_memory(); - - self.header_hash_index.write().await.clear(); - self.mempool_transactions.write().await.clear(); - *self.mempool_state.write().await = None; - - // Remove all files and directories under base_path - if self.base_path.exists() { - // Best-effort removal; if concurrent files appear, retry once - match tokio::fs::remove_dir_all(&self.base_path).await { - Ok(_) => {} - Err(e) => { - // Retry once after a short delay to handle transient races - if e.kind() == std::io::ErrorKind::Other - || e.kind() == std::io::ErrorKind::DirectoryNotEmpty - { - tokio::time::sleep(std::time::Duration::from_millis(50)).await; - tokio::fs::remove_dir_all(&self.base_path).await?; - } else { - return Err(crate::error::StorageError::Io(e)); - } - } - } - tokio::fs::create_dir_all(&self.base_path).await?; - } - - // Recreate expected subdirectories - tokio::fs::create_dir_all(self.base_path.join("headers")).await?; - tokio::fs::create_dir_all(self.base_path.join("filters")).await?; - tokio::fs::create_dir_all(self.base_path.join("state")).await?; - - // Restart the background worker for future operations - self.start_worker().await; - - Ok(()) - } - - /// Shutdown the storage manager. - pub async fn shutdown(&mut self) { - self.stop_worker(); - - // Persist all dirty data - self.save_dirty().await; - } - - /// Save all dirty data. - pub(super) async fn save_dirty(&self) { - self.filter_headers.write().await.persist().await; - self.block_headers.write().await.persist().await; - self.filters.write().await.persist().await; - - let path = self.base_path.join("headers/index.dat"); - let index = self.header_hash_index.read().await; - if let Err(e) = save_index_to_disk(&path, &index).await { - tracing::error!("Failed to persist header index: {}", e); - } - } } /// Mempool storage methods @@ -310,13 +166,6 @@ impl DiskStorageManager { pub async fn load_mempool_state(&self) -> StorageResult> { Ok(self.mempool_state.read().await.clone()) } - - /// Clear mempool. - pub async fn clear_mempool(&mut self) -> StorageResult<()> { - self.mempool_transactions.write().await.clear(); - *self.mempool_state.write().await = None; - Ok(()) - } } #[async_trait] @@ -427,20 +276,6 @@ impl StorageManager for DiskStorageManager { Self::clear(self).await } - async fn clear_filters(&mut self) -> StorageResult<()> { - // Stop worker to prevent concurrent writes to filter directories - self.stop_worker(); - - // Clear in-memory and on-disk filter headers segments - self.filter_headers.write().await.clear_all().await?; - self.filters.write().await.clear_all().await?; - - // Restart background worker for future operations - self.start_worker().await; - - Ok(()) - } - async fn get_header_height_by_hash(&self, hash: &BlockHash) -> StorageResult> { Self::get_header_height_by_hash(self, hash).await } From 3ee9369d7e601ce31e99b775a14de8b734fb395d Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 16 Dec 2025 21:16:17 +0000 Subject: [PATCH 02/21] the disk storage manager worker is now a time based check, removed old command communciation --- dash-spv/src/storage/segments.rs | 1 - dash-spv/src/storage/state.rs | 175 ------------------------------- 2 files changed, 176 deletions(-) diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index c33c669d..14c29fb0 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -354,7 +354,6 @@ impl SegmentCache { height += 1; } - // Update cached tip height with blockchain height self.tip_height = match self.tip_height { Some(current) => Some(current.max(height - 1)), None => Some(height - 1), diff --git a/dash-spv/src/storage/state.rs b/dash-spv/src/storage/state.rs index b46fc9bc..92229536 100644 --- a/dash-spv/src/storage/state.rs +++ b/dash-spv/src/storage/state.rs @@ -168,181 +168,6 @@ impl DiskStorageManager { } } -#[async_trait] -impl StorageManager for DiskStorageManager { - async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()> { - self.store_headers(headers).await - } - - async fn load_headers(&self, range: std::ops::Range) -> StorageResult> { - self.block_headers.write().await.get_items(range).await - } - - async fn get_header(&self, height: u32) -> StorageResult> { - if self.get_tip_height().await.is_none_or(|tip_height| height > tip_height) { - return Ok(None); - } - - if self.get_start_height().await.is_none_or(|start_height| height < start_height) { - return Ok(None); - } - - Ok(self.block_headers.write().await.get_items(height..height + 1).await?.first().copied()) - } - - async fn get_tip_height(&self) -> Option { - self.block_headers.read().await.tip_height() - } - - async fn get_start_height(&self) -> Option { - self.block_headers.read().await.start_height() - } - - async fn get_stored_headers_len(&self) -> u32 { - let headers_guard = self.block_headers.read().await; - let start_height = if let Some(start_height) = headers_guard.start_height() { - start_height - } else { - return 0; - }; - - let end_height = if let Some(end_height) = headers_guard.tip_height() { - end_height - } else { - return 0; - }; - - end_height - start_height + 1 - } - - async fn store_filter_headers( - &mut self, - headers: &[dashcore::hash_types::FilterHeader], - ) -> StorageResult<()> { - self.filter_headers.write().await.store_items(headers).await - } - - async fn load_filter_headers( - &self, - range: std::ops::Range, - ) -> StorageResult> { - self.filter_headers.write().await.get_items(range).await - } - - async fn get_filter_header( - &self, - height: u32, - ) -> StorageResult> { - Ok(self.filter_headers.write().await.get_items(height..height + 1).await?.first().copied()) - } - - async fn get_filter_tip_height(&self) -> StorageResult> { - Ok(self.filter_headers.read().await.tip_height()) - } - - async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()> { - Self::store_masternode_state(self, state).await - } - - async fn load_masternode_state(&self) -> StorageResult> { - Self::load_masternode_state(self).await - } - - async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()> { - Self::store_chain_state(self, state).await - } - - async fn load_chain_state(&self) -> StorageResult> { - Self::load_chain_state(self).await - } - - async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()> { - self.filters.write().await.store_items_at_height(&[filter.to_vec()], height).await - } - - async fn load_filters(&self, range: std::ops::Range) -> StorageResult>> { - self.filters.write().await.get_items(range).await - } - - async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()> { - Self::store_metadata(self, key, value).await - } - - async fn load_metadata(&self, key: &str) -> StorageResult>> { - Self::load_metadata(self, key).await - } - - async fn clear(&mut self) -> StorageResult<()> { - Self::clear(self).await - } - - async fn get_header_height_by_hash(&self, hash: &BlockHash) -> StorageResult> { - Self::get_header_height_by_hash(self, hash).await - } - - async fn store_chain_lock( - &mut self, - height: u32, - chain_lock: &dashcore::ChainLock, - ) -> StorageResult<()> { - Self::store_chain_lock(self, height, chain_lock).await - } - - async fn load_chain_lock(&self, height: u32) -> StorageResult> { - Self::load_chain_lock(self, height).await - } - - async fn get_chain_locks( - &self, - start_height: u32, - end_height: u32, - ) -> StorageResult> { - Self::get_chain_locks(self, start_height, end_height).await - } - - async fn store_mempool_transaction( - &mut self, - txid: &Txid, - tx: &UnconfirmedTransaction, - ) -> StorageResult<()> { - Self::store_mempool_transaction(self, txid, tx).await - } - - async fn remove_mempool_transaction(&mut self, txid: &Txid) -> StorageResult<()> { - Self::remove_mempool_transaction(self, txid).await - } - - async fn get_mempool_transaction( - &self, - txid: &Txid, - ) -> StorageResult> { - Self::get_mempool_transaction(self, txid).await - } - - async fn get_all_mempool_transactions( - &self, - ) -> StorageResult> { - Self::get_all_mempool_transactions(self).await - } - - async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()> { - Self::store_mempool_state(self, state).await - } - - async fn load_mempool_state(&self) -> StorageResult> { - Self::load_mempool_state(self).await - } - - async fn clear_mempool(&mut self) -> StorageResult<()> { - Self::clear_mempool(self).await - } - - async fn shutdown(&mut self) -> StorageResult<()> { - Self::shutdown(self).await; - Ok(()) - } -} - #[cfg(test)] mod tests { use super::*; From b80fb2118c52108975512646dc4d362cb2bd371d Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 16 Dec 2025 21:36:28 +0000 Subject: [PATCH 03/21] tests updated --- dash-spv/src/storage/segments.rs | 33 +++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index 14c29fb0..25118c4e 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -628,7 +628,7 @@ mod tests { .await .expect("Failed to create new segment_cache"); - cache.store_items_at_height(&items, 10).await.expect("Failed to store items"); + cache.store_items(&items).await.expect("Failed to store items"); cache.persist().await; @@ -636,10 +636,10 @@ mod tests { assert!(cache.segments.is_empty()); assert!(cache.evicted.is_empty()); - assert_eq!( - cache.get_items(10..20).await.expect("Failed to retrieve get irems from segment cache"), - items - ); + let recovered_items = cache.get_items(0..10).await.expect("Failed to load items"); + + assert_eq!(recovered_items, items); + assert_eq!(cache.segments.len(), 1); cache.clear_all().await.expect("Failed to clean on-memory and on-disk data"); assert!(cache.segments.is_empty()); @@ -677,18 +677,25 @@ mod tests { cache.get_items(0..ITEMS_PER_SEGMENT - 1).await.expect("Failed to get items") ); + let items: Vec<_> = (0..ITEMS_PER_SEGMENT * 2 + ITEMS_PER_SEGMENT / 2) + .map(FilterHeader::new_test) + .collect(); + + cache.store_items(&items).await.expect("Failed to store items"); + assert_eq!( - items[0..(ITEMS_PER_SEGMENT + 1) as usize], - cache.get_items(0..ITEMS_PER_SEGMENT + 1).await.expect("Failed to get items") + items[0..ITEMS_PER_SEGMENT as usize], + cache.get_items(0..ITEMS_PER_SEGMENT).await.expect("Failed to get items") + ); + + assert_eq!( + items[0..(ITEMS_PER_SEGMENT - 1) as usize], + cache.get_items(0..ITEMS_PER_SEGMENT - 1).await.expect("Failed to get items") ); assert_eq!( - items[(ITEMS_PER_SEGMENT - 1) as usize - ..(ITEMS_PER_SEGMENT * 2 + ITEMS_PER_SEGMENT / 2) as usize], - cache - .get_items(ITEMS_PER_SEGMENT - 1..ITEMS_PER_SEGMENT * 2 + ITEMS_PER_SEGMENT / 2) - .await - .expect("Failed to get items") + items[0..(ITEMS_PER_SEGMENT + 1) as usize], + cache.get_items(0..ITEMS_PER_SEGMENT + 1).await.expect("Failed to get items") ); } From 27f7da22f127eab8bae9bc172c80d78d7f1581df Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:11:54 +0000 Subject: [PATCH 04/21] replaced header_at_height --- dash-spv/src/chain/chainlock_manager.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dash-spv/src/chain/chainlock_manager.rs b/dash-spv/src/chain/chainlock_manager.rs index 84b1db5a..bc3a8be9 100644 --- a/dash-spv/src/chain/chainlock_manager.rs +++ b/dash-spv/src/chain/chainlock_manager.rs @@ -178,7 +178,11 @@ impl ChainLockManager { if let Some(header) = storage .get_header(chain_lock.block_height) .await +<<<<<<< HEAD .map_err(ValidationError::StorageError)? +======= + .map_err(|e| ValidationError::StorageError(e))? +>>>>>>> eb32b7bc (replaced header_at_height) { let header_hash = header.block_hash(); if header_hash != chain_lock.block_hash { From 8a3496387ef7cca90ba079b7cfa2b27c92f22079 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:14:51 +0000 Subject: [PATCH 05/21] removed unused methods --- dash-spv/src/types.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index 47dff94c..482c9055 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -367,6 +367,10 @@ impl ChainState { impl std::fmt::Debug for ChainState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ChainState") +<<<<<<< HEAD +======= + .field("filter_headers", &format!("{} filter headers", self.filter_headers.len())) +>>>>>>> 2bf3a91a (removed unused methods) .field("last_chainlock_height", &self.last_chainlock_height) .field("last_chainlock_hash", &self.last_chainlock_hash) .field("current_filter_tip", &self.current_filter_tip) From 861f63d7e7c40c6bd57eb468afd6b5581e1ab489 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:24:39 +0000 Subject: [PATCH 06/21] init_from_checkpoint sync --- dash-spv/src/types.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index 482c9055..47dff94c 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -367,10 +367,6 @@ impl ChainState { impl std::fmt::Debug for ChainState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ChainState") -<<<<<<< HEAD -======= - .field("filter_headers", &format!("{} filter headers", self.filter_headers.len())) ->>>>>>> 2bf3a91a (removed unused methods) .field("last_chainlock_height", &self.last_chainlock_height) .field("last_chainlock_hash", &self.last_chainlock_hash) .field("current_filter_tip", &self.current_filter_tip) From f42976385baee14a8647fc36054f4e98511a7c1d Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:39:24 +0000 Subject: [PATCH 07/21] removed two methos that where invovled in the same process --- dash-spv/src/sync/headers/manager.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index 1faf92c3..30d46b10 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -214,6 +214,7 @@ impl HeaderSyncManager { // Step 3: Process the Entire Validated Batch +<<<<<<< HEAD // Checkpoint Validation: Perform in-memory security check against checkpoints for (index, cached_header) in cached_headers.iter().enumerate() { let prospective_height = tip_height + (index as u32) + 1; @@ -230,6 +231,8 @@ impl HeaderSyncManager { } } +======= +>>>>>>> 7acccc0b (removed two methos that where invovled in the same process) storage .store_headers(headers) .await From eb3487b78fd411a232a37079ca56de13e4c2caee Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 19 Dec 2025 18:53:26 +0000 Subject: [PATCH 08/21] fixed clippy warnings --- dash-spv/src/chain/chainlock_manager.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/dash-spv/src/chain/chainlock_manager.rs b/dash-spv/src/chain/chainlock_manager.rs index bc3a8be9..84b1db5a 100644 --- a/dash-spv/src/chain/chainlock_manager.rs +++ b/dash-spv/src/chain/chainlock_manager.rs @@ -178,11 +178,7 @@ impl ChainLockManager { if let Some(header) = storage .get_header(chain_lock.block_height) .await -<<<<<<< HEAD .map_err(ValidationError::StorageError)? -======= - .map_err(|e| ValidationError::StorageError(e))? ->>>>>>> eb32b7bc (replaced header_at_height) { let header_hash = header.block_hash(); if header_hash != chain_lock.block_hash { From 31f9938d50a8d2494b1fa37aa355b69d450b2a42 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 19 Dec 2025 19:50:14 +0000 Subject: [PATCH 09/21] dropped unuseed code --- dash-spv/src/sync/headers/manager.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index 30d46b10..ef1b96e3 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -49,7 +49,6 @@ pub struct HeaderSyncManager { config: ClientConfig, tip_manager: ChainTipManager, checkpoint_manager: CheckpointManager, - reorg_config: ReorgConfig, chain_state: Arc>, // WalletState removed - wallet functionality is now handled externally headers2_state: Headers2StateManager, @@ -81,7 +80,6 @@ impl HeaderSyncManager { config: config.clone(), tip_manager: ChainTipManager::new(reorg_config.max_forks), checkpoint_manager, - reorg_config, chain_state, // WalletState removed headers2_state: Headers2StateManager::new(), From f60fc005e369a455cd0c2d809f5411d32cc21c9f Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 23 Dec 2025 20:26:23 +0000 Subject: [PATCH 10/21] everything moved where I want it to be --- dash-spv/src/storage/blocks.rs | 176 ++++++++++++++ dash-spv/src/storage/chainstate.rs | 87 +++++++ dash-spv/src/storage/filters.rs | 102 +++++++++ dash-spv/src/storage/headers.rs | 74 ------ dash-spv/src/storage/masternode.rs | 59 +++++ dash-spv/src/storage/metadata.rs | 45 ++++ dash-spv/src/storage/mod.rs | 330 ++++++++++++++++----------- dash-spv/src/storage/transactions.rs | 104 +++++++++ 8 files changed, 766 insertions(+), 211 deletions(-) create mode 100644 dash-spv/src/storage/blocks.rs create mode 100644 dash-spv/src/storage/chainstate.rs create mode 100644 dash-spv/src/storage/filters.rs delete mode 100644 dash-spv/src/storage/headers.rs create mode 100644 dash-spv/src/storage/masternode.rs create mode 100644 dash-spv/src/storage/metadata.rs create mode 100644 dash-spv/src/storage/transactions.rs diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs new file mode 100644 index 00000000..826cbe94 --- /dev/null +++ b/dash-spv/src/storage/blocks.rs @@ -0,0 +1,176 @@ +//! Header storage operations for DiskStorageManager. + +use std::collections::HashMap; +use std::ops::Range; +use std::path::Path; +use std::sync::{Arc, RwLock}; + +use async_trait::async_trait; +use dashcore::block::Header as BlockHeader; +use dashcore::BlockHash; + +use crate::error::StorageResult; +use crate::storage::io::atomic_write; +use crate::storage::segments::SegmentCache; +use crate::storage::PersistentStorage; +use crate::StorageError; + +#[async_trait] +pub trait BlockHeaderStorage { + /// Store block headers. + async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()>; + + /// Store block headers. + async fn store_headers_at_height( + &mut self, + headers: &[BlockHeader], + height: u32, + ) -> StorageResult<()>; + + /// Load block headers in the given range. + async fn load_headers(&self, range: Range) -> StorageResult>; + + /// Get a specific header by blockchain height. + async fn get_header(&self, height: u32) -> StorageResult>; + + /// Get the current tip blockchain height. + async fn get_tip_height(&self) -> Option; + + async fn get_start_height(&self) -> Option; + + async fn get_stored_headers_len(&self) -> u32; + + /// Get header height by block hash (reverse lookup). + async fn get_header_height_by_hash( + &self, + hash: &dashcore::BlockHash, + ) -> StorageResult>; +} + +pub struct PersistentBlockHeaderStorage { + block_headers: Arc>>, + header_hash_index: Arc>>, +} + +#[async_trait] +impl PersistentStorage for PersistentBlockHeaderStorage { + async fn load(&self) -> StorageResult { + let index_path = self.base_path.join("headers/index.dat"); + + let block_headers = SegmentCache::load_or_new(base_path).await; + + let header_hash_index = if let Ok(index) = + tokio::fs::read(&index_path).await.and_then(|content| bincode::deserialize(&content)) + { + index + } else { + block_headers.build_block_index_from_segments().await + }; + + let block_headers = Arc::new(RwLock::new(block_headers)); + let header_hash_index = Arc::new(RwLock::new(header_hash_index)); + + Ok(Self { + block_headers, + header_hash_index, + }) + } + + async fn persist(&self) { + let index_path = self.base_path.join("headers/index.dat"); + + self.block_headers.write().await.persist().await; + + let data = bincode::serialize(&self.header_hash_index.read().await) + .map_err(|e| StorageError::WriteFailed(format!("Failed to serialize index: {}", e)))?; + + atomic_write(&index_path, &data).await + } +} + +#[async_trait] +impl BlockHeaderStorage for PersistentBlockHeaderStorage { + async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()> { + let height = self.block_headers.read().await.next_height(); + self.store_headers_at_height(headers, height).await + } + + async fn store_headers_at_height( + &mut self, + headers: &[BlockHeader], + height: u32, + ) -> StorageResult<()> { + let mut height = height; + + let hashes = headers.iter().map(|header| header.block_hash()).collect::>(); + + self.block_headers.write().await.store_items_at_height(headers, height).await?; + + // Update reverse index + let mut reverse_index = self.header_hash_index.write().await; + + for hash in hashes { + reverse_index.insert(hash, height); + height += 1; + } + + Ok(()) + } + + async fn load_headers(&self, range: Range) -> StorageResult> { + self.block_headers.write().await.get_items(range).await + } + + async fn get_header(&self, height: u32) -> StorageResult> { + if let Some(tip_height) = self.get_tip_height().await { + if height > tip_height { + return Ok(None); + } + } else { + return Ok(None); + } + + if let Some(start_height) = self.get_start_height().await { + if height < start_height { + return Ok(None); + } + } else { + return Ok(None); + } + + Ok(self.block_headers.write().await.get_items(height..height + 1).await?.first().copied()) + } + + async fn get_tip_height(&self) -> Option { + self.block_headers.read().await.tip_height() + } + + async fn get_start_height(&self) -> Option { + self.block_headers.read().await.start_height() + } + + async fn get_stored_headers_len(&self) -> u32 { + let headers_guard = self.block_headers.read().await; + let start_height = if let Some(start_height) = headers_guard.start_height() { + start_height + } else { + return 0; + }; + + let end_height = if let Some(end_height) = headers_guard.tip_height() { + end_height + } else { + return 0; + }; + + end_height - start_height + 1 + } + + /// Get header height by block hash (reverse lookup). + async fn get_header_height_by_hash( + &self, + hash: &dashcore::BlockHash, + ) -> StorageResult> { + Ok(self.header_hash_index.read().await.get(hash).copied()) + } +} diff --git a/dash-spv/src/storage/chainstate.rs b/dash-spv/src/storage/chainstate.rs new file mode 100644 index 00000000..7b3c9680 --- /dev/null +++ b/dash-spv/src/storage/chainstate.rs @@ -0,0 +1,87 @@ +use async_trait::async_trait; + +use crate::{ + error::StorageResult, + storage::{io::atomic_write, PersistentStorage}, + ChainState, +}; + +#[async_trait] +pub trait ChainStateStorage { + async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()>; + + async fn load_chain_state(&self) -> StorageResult>; +} + +pub struct PersistentChainStateStorage {} + +#[async_trait] +impl PersistentStorage for PersistentChainStateStorage { + async fn load(&self) -> StorageResult { + Ok(PersistentChainStateStorage {}) + } + + async fn persist(&self) { + // Current implementation persists data everytime data is stored + } +} + +#[async_trait] +impl ChainStateStorage for PersistentChainStateStorage { + async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()> { + // Store other state as JSON + let state_data = serde_json::json!({ + "last_chainlock_height": state.last_chainlock_height, + "last_chainlock_hash": state.last_chainlock_hash, + "current_filter_tip": state.current_filter_tip, + "last_masternode_diff_height": state.last_masternode_diff_height, + "sync_base_height": state.sync_base_height, + }); + + let path = self.base_path.join("state/chain.json"); + let json = state_data.to_string(); + atomic_write(&path, json.as_bytes()).await?; + + Ok(()) + } + + async fn load_chain_state(&self) -> StorageResult> { + let path = self.base_path.join("state/chain.json"); + if !path.exists() { + return Ok(None); + } + + let content = tokio::fs::read_to_string(path).await?; + let value: serde_json::Value = serde_json::from_str(&content).map_err(|e| { + crate::error::StorageError::Serialization(format!("Failed to parse chain state: {}", e)) + })?; + + let mut state = ChainState { + last_chainlock_height: value + .get("last_chainlock_height") + .and_then(|v| v.as_u64()) + .map(|h| h as u32), + last_chainlock_hash: value + .get("last_chainlock_hash") + .and_then(|v| v.as_str()) + .and_then(|s| s.parse().ok()), + current_filter_tip: value + .get("current_filter_tip") + .and_then(|v| v.as_str()) + .and_then(|s| s.parse().ok()), + masternode_engine: None, + last_masternode_diff_height: value + .get("last_masternode_diff_height") + .and_then(|v| v.as_u64()) + .map(|h| h as u32), + sync_base_height: value + .get("sync_base_height") + .and_then(|v| v.as_u64()) + .map(|h| h as u32) + .unwrap_or(0), + ..Default::default() + }; + + Ok(Some(state)) + } +} diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs new file mode 100644 index 00000000..84e5658b --- /dev/null +++ b/dash-spv/src/storage/filters.rs @@ -0,0 +1,102 @@ +use std::{ + ops::Range, + sync::{Arc, RwLock}, +}; + +use async_trait::async_trait; +use dashcore::hash_types::FilterHeader; + +use crate::{ + error::StorageResult, + storage::{segments::SegmentCache, PersistentStorage}, +}; + +#[async_trait] +pub trait FilterHeaderStorage { + /// Store filter headers. + async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()>; + + /// Load filter headers in the given blockchain height range. + async fn load_filter_headers(&self, range: Range) -> StorageResult>; + + /// Get a specific filter header by blockchain height. + async fn get_filter_header(&self, height: u32) -> StorageResult>; + + /// Get the current filter tip blockchain height. + async fn get_filter_tip_height(&self) -> StorageResult>; +} + +#[async_trait] +pub trait FilterStorage { + /// Store a compact filter at a blockchain height. + async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()>; + + /// Load compact filters in the given blockchain height range. + async fn load_filters(&self, range: Range) -> StorageResult>>; +} + +pub struct PersistentFilterHeaderStorage { + filter_headers: Arc>>, +} + +#[async_trait] +impl PersistentStorage for PersistentFilterHeaderStorage { + async fn load(&self) -> StorageResult { + todo!() + } + + async fn persist(&self) { + todo!() + } +} + +#[async_trait] +impl FilterHeaderStorage for PersistentFilterHeaderStorage { + /// Store filter headers. + async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()> { + self.filter_headers.write().await.store_items(headers).await + } + + /// Load filter headers in the given blockchain height range. + async fn load_filter_headers(&self, range: Range) -> StorageResult> { + self.filter_headers.write().await.get_items(range).await + } + + /// Get a specific filter header by blockchain height. + async fn get_filter_header(&self, height: u32) -> StorageResult> { + Ok(self.filter_headers.write().await.get_items(height..height + 1).await?.first().copied()) + } + + /// Get the current filter tip blockchain height. + async fn get_filter_tip_height(&self) -> StorageResult> { + Ok(self.filter_headers.read().await.tip_height()) + } +} + +pub struct PersistentFilterStorage { + filters: Arc>>>, +} + +#[async_trait] +impl PersistentStorage for PersistentFilterStorage { + async fn load(&self) -> StorageResult { + todo!() + } + + async fn persist(&self) { + todo!() + } +} + +#[async_trait] +impl FilterStorage for PersistentFilterStorage { + /// Store a compact filter at a blockchain height. + async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()> { + self.filters.write().await.store_items_at_height(&[filter.to_vec()], height).await + } + + /// Load compact filters in the given blockchain height range. + async fn load_filters(&self, range: Range) -> StorageResult>> { + self.filters.write().await.get_items(range).await + } +} diff --git a/dash-spv/src/storage/headers.rs b/dash-spv/src/storage/headers.rs deleted file mode 100644 index 45ee0265..00000000 --- a/dash-spv/src/storage/headers.rs +++ /dev/null @@ -1,74 +0,0 @@ -//! Header storage operations for DiskStorageManager. - -use std::collections::HashMap; -use std::path::Path; - -use dashcore::block::Header as BlockHeader; -use dashcore::BlockHash; - -use crate::error::StorageResult; -use crate::storage::io::atomic_write; -use crate::StorageError; - -use super::manager::DiskStorageManager; - -impl DiskStorageManager { - pub async fn store_headers_at_height( - &mut self, - headers: &[BlockHeader], - mut height: u32, - ) -> StorageResult<()> { - let hashes = headers.iter().map(|header| header.block_hash()).collect::>(); - - self.block_headers.write().await.store_items_at_height(headers, height).await?; - - // Update reverse index - let mut reverse_index = self.header_hash_index.write().await; - - for hash in hashes { - reverse_index.insert(hash, height); - height += 1; - } - - Ok(()) - } - - pub async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()> { - let height = self.block_headers.read().await.next_height(); - self.store_headers_at_height(headers, height).await - } - - /// Get header height by hash. - pub async fn get_header_height_by_hash(&self, hash: &BlockHash) -> StorageResult> { - Ok(self.header_hash_index.read().await.get(hash).copied()) - } -} - -/// Load index from file, if it fails it tries to build it from block -/// header segments and, if that also fails, it return an empty index. -/// -/// IO and deserialize errors are returned, the empty index is only built -/// if there is no persisted data to recreate it. -pub(super) async fn load_block_index( - manager: &DiskStorageManager, -) -> StorageResult> { - let index_path = manager.base_path.join("headers/index.dat"); - - if let Ok(content) = tokio::fs::read(&index_path).await { - bincode::deserialize(&content) - .map_err(|e| StorageError::ReadFailed(format!("Failed to deserialize index: {}", e))) - } else { - manager.block_headers.write().await.build_block_index_from_segments().await - } -} - -/// Save index to disk. -pub(super) async fn save_index_to_disk( - path: &Path, - index: &HashMap, -) -> StorageResult<()> { - let data = bincode::serialize(index) - .map_err(|e| StorageError::WriteFailed(format!("Failed to serialize index: {}", e)))?; - - atomic_write(path, &data).await -} diff --git a/dash-spv/src/storage/masternode.rs b/dash-spv/src/storage/masternode.rs new file mode 100644 index 00000000..d0268465 --- /dev/null +++ b/dash-spv/src/storage/masternode.rs @@ -0,0 +1,59 @@ +use async_trait::async_trait; + +use crate::{ + error::StorageResult, + storage::{io::atomic_write, MasternodeState, PersistentStorage}, +}; + +#[async_trait] +pub trait MasternodeStateStorage { + async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()>; + + async fn load_masternode_state(&self) -> StorageResult>; +} + +pub struct PersistentMasternodeStateStorage {} + +#[async_trait] +impl PersistentStorage for PersistentMasternodeStateStorage { + async fn load(&self) -> StorageResult { + Ok(PersistentMasternodeStateStorage {}) + } + + async fn persist(&self) { + // Current implementation persists data everytime data is stored + } +} + +#[async_trait] +impl MasternodeStateStorage for PersistentMasternodeStateStorage { + async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()> { + let path = self.base_path.join("state/masternode.json"); + let json = serde_json::to_string_pretty(state).map_err(|e| { + crate::error::StorageError::Serialization(format!( + "Failed to serialize masternode state: {}", + e + )) + })?; + + atomic_write(&path, json.as_bytes()).await?; + Ok(()) + } + + async fn load_masternode_state(&self) -> StorageResult> { + let path = self.base_path.join("state/masternode.json"); + if !path.exists() { + return Ok(None); + } + + let content = tokio::fs::read_to_string(path).await?; + let state = serde_json::from_str(&content).map_err(|e| { + crate::error::StorageError::Serialization(format!( + "Failed to deserialize masternode state: {}", + e + )) + })?; + + Ok(Some(state)) + } +} diff --git a/dash-spv/src/storage/metadata.rs b/dash-spv/src/storage/metadata.rs new file mode 100644 index 00000000..616e4c7f --- /dev/null +++ b/dash-spv/src/storage/metadata.rs @@ -0,0 +1,45 @@ +use async_trait::async_trait; + +use crate::{ + error::StorageResult, + storage::{io::atomic_write, PersistentStorage}, +}; + +#[async_trait] +pub trait MetadataStorage { + async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()>; + + async fn load_metadata(&self, key: &str) -> StorageResult>>; +} + +pub struct PersistentMetadataStorage {} + +#[async_trait] +impl PersistentStorage for PersistentMetadataStorage { + async fn load(&self) -> StorageResult { + Ok(PersistentMetadataStorage {}) + } + + async fn persist(&self) { + // Current implementation persists data everytime data is stored + } +} + +#[async_trait] +impl MetadataStorage for PersistentMetadataStorage { + async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()> { + let path = self.base_path.join(format!("state/{}.dat", key)); + atomic_write(&path, value).await?; + Ok(()) + } + + async fn load_metadata(&self, key: &str) -> StorageResult>> { + let path = self.base_path.join(format!("state/{}.dat", key)); + if !path.exists() { + return Ok(None); + } + + let data = tokio::fs::read(path).await?; + Ok(Some(data)) + } +} diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index d954c5c6..7bf4a7e3 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -4,26 +4,43 @@ pub(crate) mod io; pub mod types; -mod headers; +mod blocks; +mod chainstate; +mod filters; mod lockfile; +mod masternode; +mod metadata; mod segments; -mod state; +mod transactions; use async_trait::async_trait; use std::collections::HashMap; -use std::ops::Range; - -use dashcore::{block::Header as BlockHeader, hash_types::FilterHeader, Txid}; +use std::path::PathBuf; +use std::sync::{Arc, RwLock}; +use std::time::Duration; use crate::error::StorageResult; -use crate::types::{ChainState, MempoolState, UnconfirmedTransaction}; +use crate::storage::blocks::PersistentBlockHeaderStorage; +use crate::storage::chainstate::PersistentChainStateStorage; +use crate::storage::filters::{PersistentFilterHeaderStorage, PersistentFilterStorage}; +use crate::storage::lockfile::LockFile; +use crate::storage::metadata::PersistentMetadataStorage; +use crate::storage::segments::SegmentCache; +use crate::storage::transactions::PersistentTransactionStorage; +use crate::StorageError; pub use types::*; +#[async_trait] +pub trait PersistentStorage { + async fn load(&self) -> StorageResult; + async fn persist(&self); +} + #[async_trait] pub trait StorageManager: - BlockHeaderStorage - + FilterHeaderStorage + blocks::BlockHeaderStorage + + filters::FilterHeaderStorage + FilterStorage + TransactionStorage + MempoolStateStorage @@ -37,27 +54,18 @@ pub trait StorageManager: /// Disk-based storage manager with segmented files and async background saving. pub struct DiskStorageManager { - pub(super) base_path: PathBuf, - - // Segmented header storage - pub(super) block_headers: Arc>>, - pub(super) filter_headers: Arc>>, - pub(super) filters: Arc>>>, + base_path: PathBuf, - // Reverse index for O(1) lookups - pub(super) header_hash_index: Arc>>, + block_headers_storage: PersistentBlockHeaderStorage, + filter_headers_storage: PersistentFilterHeaderStorage, + filter_storage: PersistentFilterStorage, + transactions_storage: PersistentTransactionStorage, + metadata_storage: PersistentMetadataStorage, + chainstate_storage: PersistentChainStateStorage, // Background worker - pub(super) worker_tx: Option>, pub(super) worker_handle: Option>, - // Index save tracking to avoid redundant saves - pub(super) last_index_save_count: Arc>, - - // Mempool storage - pub(super) mempool_transactions: Arc>>, - pub(super) mempool_state: Arc>>, - // Lock file to prevent concurrent access from multiple processes. _lock_file: LockFile, } @@ -73,20 +81,6 @@ impl DiskStorageManager { // Acquire exclusive lock on the data directory let lock_file = LockFile::new(base_path.join(".lock"))?; - let headers_dir = base_path.join("headers"); - let filters_dir = base_path.join("filters"); - let state_dir = base_path.join("state"); - - fs::create_dir_all(&headers_dir).map_err(|e| { - StorageError::WriteFailed(format!("Failed to create headers directory: {}", e)) - })?; - fs::create_dir_all(&filters_dir).map_err(|e| { - StorageError::WriteFailed(format!("Failed to create filters directory: {}", e)) - })?; - fs::create_dir_all(&state_dir).map_err(|e| { - StorageError::WriteFailed(format!("Failed to create state directory: {}", e)) - })?; - let mut storage = Self { base_path: base_path.clone(), block_headers: Arc::new(RwLock::new( @@ -105,12 +99,8 @@ impl DiskStorageManager { _lock_file: lock_file, }; - // Load chain state to get sync_base_height - if let Ok(Some(state)) = storage.load_chain_state().await { - tracing::debug!("Loaded sync_base_height: {}", state.sync_base_height); - } - - // Start background worker + // Start background worker that + // persists data when appropriate storage.start_worker().await; // Rebuild index @@ -247,111 +237,177 @@ impl DiskStorageManager { } } -#[async_trait] -pub trait BlockHeaderStorage { - /// Store block headers. - async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()>; - - /// Load block headers in the given range. - async fn load_headers(&self, range: Range) -> StorageResult>; - - /// Get a specific header by blockchain height. - async fn get_header(&self, height: u32) -> StorageResult>; - - /// Get the current tip blockchain height. - async fn get_tip_height(&self) -> Option; - - async fn get_start_height(&self) -> Option; - - async fn get_stored_headers_len(&self) -> u32; - - /// Get header height by block hash (reverse lookup). - async fn get_header_height_by_hash( - &self, - hash: &dashcore::BlockHash, - ) -> StorageResult>; -} - -#[async_trait] -pub trait FilterHeaderStorage { - /// Store filter headers. - async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()>; +#[cfg(test)] +mod tests { + use super::*; + use dashcore::{block::Version, pow::CompactTarget}; + use dashcore_hashes::Hash; + use tempfile::TempDir; + + fn build_headers(count: usize) -> Vec { + let mut headers = Vec::with_capacity(count); + let mut prev_hash = BlockHash::from_byte_array([0u8; 32]); + + for i in 0..count { + let header = BlockHeader { + version: Version::from_consensus(1), + prev_blockhash: prev_hash, + merkle_root: dashcore::hashes::sha256d::Hash::from_byte_array( + [(i % 255) as u8; 32], + ) + .into(), + time: 1 + i as u32, + bits: CompactTarget::from_consensus(0x1d00ffff), + nonce: i as u32, + }; + prev_hash = header.block_hash(); + headers.push(header); + } - /// Load filter headers in the given blockchain height range. - async fn load_filter_headers(&self, range: Range) -> StorageResult>; + headers + } - /// Get a specific filter header by blockchain height. - async fn get_filter_header(&self, height: u32) -> StorageResult>; + #[tokio::test] + async fn test_load_headers() -> Result<(), Box> { + // Create a temporary directory for the test + let temp_dir = TempDir::new()?; + let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()) + .await + .expect("Unable to create storage"); + + // Create a test header + let test_header = BlockHeader { + version: Version::from_consensus(1), + prev_blockhash: BlockHash::from_byte_array([1; 32]), + merkle_root: dashcore::hashes::sha256d::Hash::from_byte_array([2; 32]).into(), + time: 12345, + bits: CompactTarget::from_consensus(0x1d00ffff), + nonce: 67890, + }; - /// Get the current filter tip blockchain height. - async fn get_filter_tip_height(&self) -> StorageResult>; -} + // Store just one header + storage.store_headers(&[test_header]).await?; -#[async_trait] -pub trait FilterStorage { - /// Store a compact filter at a blockchain height. - async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()>; + let loaded_headers = storage.load_headers(0..1).await?; - /// Load compact filters in the given blockchain height range. - async fn load_filters(&self, range: Range) -> StorageResult>>; -} + // Should only get back the one header we stored + assert_eq!(loaded_headers.len(), 1); + assert_eq!(loaded_headers[0], test_header); -#[async_trait] -pub trait TransactionStorage { - /// Store an unconfirmed transaction. - async fn store_mempool_transaction( - &mut self, - txid: &Txid, - tx: &UnconfirmedTransaction, - ) -> StorageResult<()>; - - /// Remove a mempool transaction. - async fn remove_mempool_transaction(&mut self, txid: &Txid) -> StorageResult<()>; - - /// Get a mempool transaction. - async fn get_mempool_transaction( - &self, - txid: &Txid, - ) -> StorageResult>; - - /// Get all mempool transactions. - async fn get_all_mempool_transactions( - &self, - ) -> StorageResult>; -} + Ok(()) + } -#[async_trait] -pub trait MempoolStateStorage { - /// Store the complete mempool state. - async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()>; + #[tokio::test] + async fn test_checkpoint_storage_indexing() -> StorageResult<()> { + use dashcore::TxMerkleNode; + use tempfile::tempdir; + + let temp_dir = tempdir().expect("Failed to create temp dir"); + let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()).await?; + + // Create test headers starting from checkpoint height + let checkpoint_height = 1_100_000; + let headers: Vec = (0..100) + .map(|i| BlockHeader { + version: Version::from_consensus(1), + prev_blockhash: BlockHash::from_byte_array([i as u8; 32]), + merkle_root: TxMerkleNode::from_byte_array([(i + 1) as u8; 32]), + time: 1234567890 + i, + bits: CompactTarget::from_consensus(0x1a2b3c4d), + nonce: 67890 + i, + }) + .collect(); + + let mut base_state = ChainState::new(); + base_state.sync_base_height = checkpoint_height; + storage.store_chain_state(&base_state).await?; + + storage.store_headers_at_height(&headers, checkpoint_height).await?; + assert_eq!(storage.get_stored_headers_len().await, headers.len() as u32); + + // Verify headers are stored at correct blockchain heights + let header_at_base = storage.get_header(checkpoint_height).await?; + assert_eq!( + header_at_base.expect("Header at base blockchain height should exist"), + headers[0] + ); + + let header_at_ending = storage.get_header(checkpoint_height + 99).await?; + assert_eq!( + header_at_ending.expect("Header at ending blockchain height should exist"), + headers[99] + ); + + // Test the reverse index (hash -> blockchain height) + let hash_0 = headers[0].block_hash(); + let height_0 = storage.get_header_height_by_hash(&hash_0).await?; + assert_eq!( + height_0, + Some(checkpoint_height), + "Hash should map to blockchain height 1,100,000" + ); + + let hash_99 = headers[99].block_hash(); + let height_99 = storage.get_header_height_by_hash(&hash_99).await?; + assert_eq!( + height_99, + Some(checkpoint_height + 99), + "Hash should map to blockchain height 1,100,099" + ); + + // Store chain state to persist sync_base_height + let mut chain_state = ChainState::new(); + chain_state.sync_base_height = checkpoint_height; + storage.store_chain_state(&chain_state).await?; + + // Force save to disk + storage.save_dirty().await; + + drop(storage); + + // Create a new storage instance to test index rebuilding + let storage2 = DiskStorageManager::new(temp_dir.path().to_path_buf()).await?; + + // Verify the index was rebuilt correctly + let height_after_rebuild = storage2.get_header_height_by_hash(&hash_0).await?; + assert_eq!( + height_after_rebuild, + Some(checkpoint_height), + "After index rebuild, hash should still map to blockchain height 1,100,000" + ); + + // Verify header can still be retrieved by blockchain height after reload + let header_after_reload = storage2.get_header(checkpoint_height).await?; + assert!( + header_after_reload.is_some(), + "Header at base blockchain height should exist after reload" + ); + assert_eq!(header_after_reload.unwrap(), headers[0]); - /// Load the mempool state. - async fn load_mempool_state(&self) -> StorageResult>; -} + Ok(()) + } -#[async_trait] -pub trait MetadataStorage { - /// Store metadata. - async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()>; + #[tokio::test] + async fn test_shutdown_flushes_index() -> Result<(), Box> { + let temp_dir = TempDir::new()?; + let base_path = temp_dir.path().to_path_buf(); + let headers = build_headers(11_000); + let last_hash = headers.last().unwrap().block_hash(); - /// Load metadata. - async fn load_metadata(&self, key: &str) -> StorageResult>>; -} + { + let mut storage = DiskStorageManager::new(base_path.clone()).await?; -#[async_trait] -pub trait ChainStateStorage { - /// Store chain state. - async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()>; + storage.store_headers(&headers[..10_000]).await?; + storage.save_dirty().await; - /// Load chain state. - async fn load_chain_state(&self) -> StorageResult>; -} + storage.store_headers(&headers[10_000..]).await?; + storage.shutdown().await; + } -#[async_trait] -pub trait MasternodeStateStorage { - /// Store masternode state. - async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()>; + let storage = DiskStorageManager::new(base_path).await?; + let height = storage.get_header_height_by_hash(&last_hash).await?; + assert_eq!(height, Some(10_999)); - /// Load masternode state. - async fn load_masternode_state(&self) -> StorageResult>; + Ok(()) + } } diff --git a/dash-spv/src/storage/transactions.rs b/dash-spv/src/storage/transactions.rs new file mode 100644 index 00000000..2cd4d45b --- /dev/null +++ b/dash-spv/src/storage/transactions.rs @@ -0,0 +1,104 @@ +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +use async_trait::async_trait; +use dashcore::Txid; + +use crate::{ + error::StorageResult, + storage::PersistentStorage, + types::{MempoolState, UnconfirmedTransaction}, +}; + +#[async_trait] +pub trait TransactionStorage { + async fn store_mempool_transaction( + &mut self, + txid: &Txid, + tx: &UnconfirmedTransaction, + ) -> StorageResult<()>; + + async fn remove_mempool_transaction(&mut self, txid: &Txid) -> StorageResult<()>; + + async fn get_mempool_transaction( + &self, + txid: &Txid, + ) -> StorageResult>; + + async fn get_all_mempool_transactions( + &self, + ) -> StorageResult>; +} + +#[async_trait] +pub trait MempoolStateStorage { + async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()>; + + async fn load_mempool_state(&self) -> StorageResult>; +} + +pub struct PersistentTransactionStorage { + mempool_transactions: Arc>>, + mempool_state: Arc>>, +} + +#[async_trait] +impl PersistentStorage for PersistentTransactionStorage { + async fn load(&self) -> StorageResult { + let mempool_transactions = Arc::new(RwLock::new(HashMap::new())); + let mempool_state = Arc::new(RwLock::new(None)); + + Ok(PersistentTransactionStorage { + mempool_transactions, + mempool_state, + }) + } + + async fn persist(&self) { + // This data is not currently being persisted + } +} + +#[async_trait] +impl TransactionStorage for PersistentTransactionStorage { + async fn store_mempool_transaction( + &mut self, + txid: &Txid, + tx: &UnconfirmedTransaction, + ) -> StorageResult<()> { + self.mempool_transactions.write().await.insert(*txid, tx.clone()); + Ok(()) + } + + async fn remove_mempool_transaction(&mut self, txid: &Txid) -> StorageResult<()> { + self.mempool_transactions.write().await.remove(txid); + Ok(()) + } + + async fn get_mempool_transaction( + &self, + txid: &Txid, + ) -> StorageResult> { + Ok(self.mempool_transactions.read().await.get(txid).cloned()) + } + + async fn get_all_mempool_transactions( + &self, + ) -> StorageResult> { + Ok(self.mempool_transactions.read().await.clone()) + } +} + +#[async_trait] +impl MempoolStateStorage for PersistentTransactionStorage { + async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()> { + *self.mempool_state.write().await = Some(state.clone()); + Ok(()) + } + + async fn load_mempool_state(&self) -> StorageResult> { + Ok(self.mempool_state.read().await.clone()) + } +} From 739e809b2e9a0a6bcddcb42be813502e329804fa Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 26 Dec 2025 18:32:28 +0000 Subject: [PATCH 11/21] general structure made --- dash-spv/src/storage/blocks.rs | 70 ++++++++------- dash-spv/src/storage/chainstate.rs | 32 +++++-- dash-spv/src/storage/filters.rs | 66 ++++++++++---- dash-spv/src/storage/masternode.rs | 29 +++++-- dash-spv/src/storage/metadata.rs | 29 +++++-- dash-spv/src/storage/mod.rs | 124 +++++++++++++-------------- dash-spv/src/storage/transactions.rs | 30 +++---- 7 files changed, 228 insertions(+), 152 deletions(-) diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs index 826cbe94..e63c3f9b 100644 --- a/dash-spv/src/storage/blocks.rs +++ b/dash-spv/src/storage/blocks.rs @@ -2,8 +2,7 @@ use std::collections::HashMap; use std::ops::Range; -use std::path::Path; -use std::sync::{Arc, RwLock}; +use std::path::PathBuf; use async_trait::async_trait; use dashcore::block::Header as BlockHeader; @@ -48,40 +47,49 @@ pub trait BlockHeaderStorage { } pub struct PersistentBlockHeaderStorage { - block_headers: Arc>>, - header_hash_index: Arc>>, + block_headers: SegmentCache, + header_hash_index: HashMap, +} + +impl PersistentBlockHeaderStorage { + const FOLDER_NAME: &str = "block_headers"; + const INDEX_FILE_NAME: &str = "index.dat"; } #[async_trait] impl PersistentStorage for PersistentBlockHeaderStorage { - async fn load(&self) -> StorageResult { - let index_path = self.base_path.join("headers/index.dat"); + async fn load(storage_path: impl Into + Send) -> StorageResult { + let storage_path = storage_path.into(); - let block_headers = SegmentCache::load_or_new(base_path).await; + let index_path = storage_path.join(Self::FOLDER_NAME).join(Self::INDEX_FILE_NAME); - let header_hash_index = if let Ok(index) = - tokio::fs::read(&index_path).await.and_then(|content| bincode::deserialize(&content)) + let block_headers = SegmentCache::load_or_new(storage_path).await?; + + let header_hash_index = match tokio::fs::read(&index_path) + .await + .ok() + .map(|content| bincode::deserialize(&content).ok()) + .flatten() { - index - } else { - block_headers.build_block_index_from_segments().await + Some(index) => index, + _ => block_headers.build_block_index_from_segments().await?, }; - let block_headers = Arc::new(RwLock::new(block_headers)); - let header_hash_index = Arc::new(RwLock::new(header_hash_index)); - Ok(Self { block_headers, header_hash_index, }) } - async fn persist(&self) { - let index_path = self.base_path.join("headers/index.dat"); + async fn persist(&mut self, storage_path: impl Into + Send) -> StorageResult<()> { + let block_headers_folder = storage_path.into().join(Self::FOLDER_NAME); + let index_path = block_headers_folder.join(Self::INDEX_FILE_NAME); + + tokio::fs::create_dir_all(block_headers_folder).await?; - self.block_headers.write().await.persist().await; + self.block_headers.persist().await; - let data = bincode::serialize(&self.header_hash_index.read().await) + let data = bincode::serialize(&self.header_hash_index) .map_err(|e| StorageError::WriteFailed(format!("Failed to serialize index: {}", e)))?; atomic_write(&index_path, &data).await @@ -91,7 +99,7 @@ impl PersistentStorage for PersistentBlockHeaderStorage { #[async_trait] impl BlockHeaderStorage for PersistentBlockHeaderStorage { async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()> { - let height = self.block_headers.read().await.next_height(); + let height = self.block_headers.next_height(); self.store_headers_at_height(headers, height).await } @@ -104,13 +112,10 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { let hashes = headers.iter().map(|header| header.block_hash()).collect::>(); - self.block_headers.write().await.store_items_at_height(headers, height).await?; - - // Update reverse index - let mut reverse_index = self.header_hash_index.write().await; + self.block_headers.store_items_at_height(headers, height).await?; for hash in hashes { - reverse_index.insert(hash, height); + self.header_hash_index.insert(hash, height); height += 1; } @@ -118,7 +123,7 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { } async fn load_headers(&self, range: Range) -> StorageResult> { - self.block_headers.write().await.get_items(range).await + self.block_headers.get_items(range).await } async fn get_header(&self, height: u32) -> StorageResult> { @@ -138,26 +143,25 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { return Ok(None); } - Ok(self.block_headers.write().await.get_items(height..height + 1).await?.first().copied()) + Ok(self.load_headers(height..height + 1).await?.first().copied()) } async fn get_tip_height(&self) -> Option { - self.block_headers.read().await.tip_height() + self.block_headers.tip_height() } async fn get_start_height(&self) -> Option { - self.block_headers.read().await.start_height() + self.block_headers.start_height() } async fn get_stored_headers_len(&self) -> u32 { - let headers_guard = self.block_headers.read().await; - let start_height = if let Some(start_height) = headers_guard.start_height() { + let start_height = if let Some(start_height) = self.block_headers.start_height() { start_height } else { return 0; }; - let end_height = if let Some(end_height) = headers_guard.tip_height() { + let end_height = if let Some(end_height) = self.block_headers.tip_height() { end_height } else { return 0; @@ -171,6 +175,6 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { &self, hash: &dashcore::BlockHash, ) -> StorageResult> { - Ok(self.header_hash_index.read().await.get(hash).copied()) + Ok(self.header_hash_index.get(hash).copied()) } } diff --git a/dash-spv/src/storage/chainstate.rs b/dash-spv/src/storage/chainstate.rs index 7b3c9680..23b1aaec 100644 --- a/dash-spv/src/storage/chainstate.rs +++ b/dash-spv/src/storage/chainstate.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use async_trait::async_trait; use crate::{ @@ -13,23 +15,32 @@ pub trait ChainStateStorage { async fn load_chain_state(&self) -> StorageResult>; } -pub struct PersistentChainStateStorage {} +pub struct PersistentChainStateStorage { + storage_path: PathBuf, +} + +impl PersistentChainStateStorage { + const FOLDER_NAME: &str = "chainstate"; + const FILE_NAME: &str = "chainstate.json"; +} #[async_trait] impl PersistentStorage for PersistentChainStateStorage { - async fn load(&self) -> StorageResult { - Ok(PersistentChainStateStorage {}) + async fn load(storage_path: impl Into + Send) -> StorageResult { + Ok(PersistentChainStateStorage { + storage_path: storage_path.into(), + }) } - async fn persist(&self) { + async fn persist(&mut self, _storage_path: impl Into + Send) -> StorageResult<()> { // Current implementation persists data everytime data is stored + Ok(()) } } #[async_trait] impl ChainStateStorage for PersistentChainStateStorage { async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()> { - // Store other state as JSON let state_data = serde_json::json!({ "last_chainlock_height": state.last_chainlock_height, "last_chainlock_hash": state.last_chainlock_hash, @@ -38,7 +49,11 @@ impl ChainStateStorage for PersistentChainStateStorage { "sync_base_height": state.sync_base_height, }); - let path = self.base_path.join("state/chain.json"); + let chainstate_folder = self.storage_path.join(Self::FOLDER_NAME); + let path = chainstate_folder.join(Self::FILE_NAME); + + tokio::fs::create_dir_all(chainstate_folder).await?; + let json = state_data.to_string(); atomic_write(&path, json.as_bytes()).await?; @@ -46,7 +61,7 @@ impl ChainStateStorage for PersistentChainStateStorage { } async fn load_chain_state(&self) -> StorageResult> { - let path = self.base_path.join("state/chain.json"); + let path = self.storage_path.join(Self::FOLDER_NAME).join(Self::FILE_NAME); if !path.exists() { return Ok(None); } @@ -56,7 +71,7 @@ impl ChainStateStorage for PersistentChainStateStorage { crate::error::StorageError::Serialization(format!("Failed to parse chain state: {}", e)) })?; - let mut state = ChainState { + let state = ChainState { last_chainlock_height: value .get("last_chainlock_height") .and_then(|v| v.as_u64()) @@ -79,7 +94,6 @@ impl ChainStateStorage for PersistentChainStateStorage { .and_then(|v| v.as_u64()) .map(|h| h as u32) .unwrap_or(0), - ..Default::default() }; Ok(Some(state)) diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs index 84e5658b..6fcbd5fe 100644 --- a/dash-spv/src/storage/filters.rs +++ b/dash-spv/src/storage/filters.rs @@ -1,7 +1,4 @@ -use std::{ - ops::Range, - sync::{Arc, RwLock}, -}; +use std::{ops::Range, path::PathBuf}; use async_trait::async_trait; use dashcore::hash_types::FilterHeader; @@ -36,17 +33,32 @@ pub trait FilterStorage { } pub struct PersistentFilterHeaderStorage { - filter_headers: Arc>>, + filter_headers: SegmentCache, +} + +impl PersistentFilterHeaderStorage { + const FOLDER_NAME: &str = "filter_headers"; } #[async_trait] impl PersistentStorage for PersistentFilterHeaderStorage { - async fn load(&self) -> StorageResult { - todo!() + async fn load(storage_path: impl Into + Send) -> StorageResult { + let storage_path = storage_path.into(); + let segments_folder = storage_path.join(Self::FOLDER_NAME); + + let filter_headers = SegmentCache::load_or_new(segments_folder).await?; + + Ok(Self { + filter_headers, + }) } - async fn persist(&self) { - todo!() + async fn persist(&mut self, base_path: impl Into + Send) -> StorageResult<()> { + let filter_headers_folder = base_path.into().join(Self::FOLDER_NAME); + + tokio::fs::create_dir_all(filter_headers_folder).await?; + + self.filter_headers.persist(filter_headers_folder).await } } @@ -54,17 +66,17 @@ impl PersistentStorage for PersistentFilterHeaderStorage { impl FilterHeaderStorage for PersistentFilterHeaderStorage { /// Store filter headers. async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()> { - self.filter_headers.write().await.store_items(headers).await + self.filter_headers.store_items(headers).await } /// Load filter headers in the given blockchain height range. async fn load_filter_headers(&self, range: Range) -> StorageResult> { - self.filter_headers.write().await.get_items(range).await + self.filter_headers.get_items(range).await } /// Get a specific filter header by blockchain height. async fn get_filter_header(&self, height: u32) -> StorageResult> { - Ok(self.filter_headers.write().await.get_items(height..height + 1).await?.first().copied()) + Ok(self.filter_headers.get_items(height..height + 1).await?.first().copied()) } /// Get the current filter tip blockchain height. @@ -74,17 +86,33 @@ impl FilterHeaderStorage for PersistentFilterHeaderStorage { } pub struct PersistentFilterStorage { - filters: Arc>>>, + filters: SegmentCache>, +} + +impl PersistentFilterStorage { + const FOLDER_NAME: &str = "filters"; } #[async_trait] impl PersistentStorage for PersistentFilterStorage { - async fn load(&self) -> StorageResult { - todo!() + async fn load(storage_path: impl Into + Send) -> StorageResult { + let storage_path = storage_path.into(); + let filters_folder = storage_path.join(Self::FOLDER_NAME); + + let filters = SegmentCache::load_or_new(filters_folder).await?; + + Ok(Self { + filters, + }) } - async fn persist(&self) { - todo!() + async fn persist(&mut self, storage_path: impl Into + Send) -> StorageResult<()> { + let storage_path = storage_path.into(); + let filters_folder = storage_path.join(Self::FOLDER_NAME); + + tokio::fs::create_dir_all(filters_folder).await?; + + self.filters.persist(filters_folder).await } } @@ -92,11 +120,11 @@ impl PersistentStorage for PersistentFilterStorage { impl FilterStorage for PersistentFilterStorage { /// Store a compact filter at a blockchain height. async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()> { - self.filters.write().await.store_items_at_height(&[filter.to_vec()], height).await + self.filters.store_items_at_height(&[filter.to_vec()], height).await } /// Load compact filters in the given blockchain height range. async fn load_filters(&self, range: Range) -> StorageResult>> { - self.filters.write().await.get_items(range).await + self.filters.get_items(range).await } } diff --git a/dash-spv/src/storage/masternode.rs b/dash-spv/src/storage/masternode.rs index d0268465..254b26d1 100644 --- a/dash-spv/src/storage/masternode.rs +++ b/dash-spv/src/storage/masternode.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use async_trait::async_trait; use crate::{ @@ -12,23 +14,37 @@ pub trait MasternodeStateStorage { async fn load_masternode_state(&self) -> StorageResult>; } -pub struct PersistentMasternodeStateStorage {} +pub struct PersistentMasternodeStateStorage { + storage_path: PathBuf, +} + +impl PersistentMasternodeStateStorage { + const FOLDER_NAME: &str = "masternodestate"; + const MASTERNODE_FILE_NAME: &str = "masternodestate.json"; +} #[async_trait] impl PersistentStorage for PersistentMasternodeStateStorage { - async fn load(&self) -> StorageResult { - Ok(PersistentMasternodeStateStorage {}) + async fn load(storage_path: impl Into + Send) -> StorageResult { + Ok(PersistentMasternodeStateStorage { + storage_path: storage_path.into(), + }) } - async fn persist(&self) { + async fn persist(&mut self, _storage_path: impl Into + Send) -> StorageResult<()> { // Current implementation persists data everytime data is stored + Ok(()) } } #[async_trait] impl MasternodeStateStorage for PersistentMasternodeStateStorage { async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()> { - let path = self.base_path.join("state/masternode.json"); + let masternodestate_folder = self.storage_path.join(Self::FOLDER_NAME); + let path = masternodestate_folder.join(Self::MASTERNODE_FILE_NAME); + + tokio::fs::create_dir_all(masternodestate_folder).await?; + let json = serde_json::to_string_pretty(state).map_err(|e| { crate::error::StorageError::Serialization(format!( "Failed to serialize masternode state: {}", @@ -41,7 +57,8 @@ impl MasternodeStateStorage for PersistentMasternodeStateStorage { } async fn load_masternode_state(&self) -> StorageResult> { - let path = self.base_path.join("state/masternode.json"); + let path = self.storage_path.join(Self::FOLDER_NAME).join(Self::MASTERNODE_FILE_NAME); + if !path.exists() { return Ok(None); } diff --git a/dash-spv/src/storage/metadata.rs b/dash-spv/src/storage/metadata.rs index 616e4c7f..5ec51712 100644 --- a/dash-spv/src/storage/metadata.rs +++ b/dash-spv/src/storage/metadata.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use async_trait::async_trait; use crate::{ @@ -12,29 +14,44 @@ pub trait MetadataStorage { async fn load_metadata(&self, key: &str) -> StorageResult>>; } -pub struct PersistentMetadataStorage {} +pub struct PersistentMetadataStorage { + storage_path: PathBuf, +} + +impl PersistentMetadataStorage { + const FOLDER_NAME: &str = "metadata"; +} #[async_trait] impl PersistentStorage for PersistentMetadataStorage { - async fn load(&self) -> StorageResult { - Ok(PersistentMetadataStorage {}) + async fn load(storage_path: impl Into + Send) -> StorageResult { + Ok(PersistentMetadataStorage { + storage_path: storage_path.into(), + }) } - async fn persist(&self) { + async fn persist(&mut self, _storage_path: impl Into + Send) -> StorageResult<()> { // Current implementation persists data everytime data is stored + Ok(()) } } #[async_trait] impl MetadataStorage for PersistentMetadataStorage { async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()> { - let path = self.base_path.join(format!("state/{}.dat", key)); + let metadata_folder = self.storage_path.join(Self::FOLDER_NAME); + let path = metadata_folder.join(format!("{key}.dat")); + + tokio::fs::create_dir_all(metadata_folder).await?; + atomic_write(&path, value).await?; + Ok(()) } async fn load_metadata(&self, key: &str) -> StorageResult>> { - let path = self.base_path.join(format!("state/{}.dat", key)); + let path = self.storage_path.join(Self::FOLDER_NAME).join(format!("{key}.dat")); + if !path.exists() { return Ok(None); } diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index 7bf4a7e3..1ba66e75 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -14,10 +14,10 @@ mod segments; mod transactions; use async_trait::async_trait; -use std::collections::HashMap; use std::path::PathBuf; -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use std::time::Duration; +use tokio::sync::RwLock; use crate::error::StorageResult; use crate::storage::blocks::PersistentBlockHeaderStorage; @@ -25,28 +25,33 @@ use crate::storage::chainstate::PersistentChainStateStorage; use crate::storage::filters::{PersistentFilterHeaderStorage, PersistentFilterStorage}; use crate::storage::lockfile::LockFile; use crate::storage::metadata::PersistentMetadataStorage; -use crate::storage::segments::SegmentCache; use crate::storage::transactions::PersistentTransactionStorage; use crate::StorageError; pub use types::*; #[async_trait] -pub trait PersistentStorage { - async fn load(&self) -> StorageResult; - async fn persist(&self); +pub trait PersistentStorage: Sized { + async fn load(storage_path: impl Into + Send) -> StorageResult; + async fn persist(&mut self, storage_path: impl Into + Send) -> StorageResult<()>; + + async fn persist_dirty( + &mut self, + storage_path: impl Into + Send, + ) -> StorageResult<()> { + self.persist(storage_path).await + } } #[async_trait] pub trait StorageManager: blocks::BlockHeaderStorage + filters::FilterHeaderStorage - + FilterStorage - + TransactionStorage - + MempoolStateStorage - + MetadataStorage - + ChainStateStorage - + MasternodeStateStorage + + filters::FilterStorage + + transactions::TransactionStorage + + metadata::MetadataStorage + + chainstate::ChainStateStorage + + masternode::MasternodeStateStorage + Send + Sync { @@ -56,24 +61,26 @@ pub trait StorageManager: pub struct DiskStorageManager { base_path: PathBuf, - block_headers_storage: PersistentBlockHeaderStorage, - filter_headers_storage: PersistentFilterHeaderStorage, - filter_storage: PersistentFilterStorage, - transactions_storage: PersistentTransactionStorage, - metadata_storage: PersistentMetadataStorage, - chainstate_storage: PersistentChainStateStorage, + block_headers: Arc>, + filter_headers: Arc>, + filters: Arc>, + transactions: Arc>, + metadata: Arc>, + chainstate: Arc>, // Background worker - pub(super) worker_handle: Option>, + worker_handle: Option>, // Lock file to prevent concurrent access from multiple processes. _lock_file: LockFile, } impl DiskStorageManager { - pub async fn new(base_path: PathBuf) -> StorageResult { + pub async fn new(base_path: impl Into + Send) -> StorageResult { use std::fs; + let base_path = base_path.into(); + // Create directories if they don't exist fs::create_dir_all(&base_path) .map_err(|e| StorageError::WriteFailed(format!("Failed to create directory: {}", e)))?; @@ -83,19 +90,22 @@ impl DiskStorageManager { let mut storage = Self { base_path: base_path.clone(), + block_headers: Arc::new(RwLock::new( - SegmentCache::load_or_new(base_path.clone()).await?, + PersistentBlockHeaderStorage::load(&base_path).await?, )), filter_headers: Arc::new(RwLock::new( - SegmentCache::load_or_new(base_path.clone()).await?, + PersistentFilterHeaderStorage::load(&base_path).await?, )), - filters: Arc::new(RwLock::new(SegmentCache::load_or_new(base_path.clone()).await?)), - header_hash_index: Arc::new(RwLock::new(HashMap::new())), - worker_tx: None, + filters: Arc::new(RwLock::new(PersistentFilterStorage::load(&base_path).await?)), + transactions: Arc::new(RwLock::new( + PersistentTransactionStorage::load(&base_path).await?, + )), + metadata: Arc::new(RwLock::new(PersistentMetadataStorage::load(&base_path).await?)), + chainstate: Arc::new(RwLock::new(PersistentChainStateStorage::load(&base_path).await?)), + worker_handle: None, - last_index_save_count: Arc::new(RwLock::new(0)), - mempool_transactions: Arc::new(RwLock::new(HashMap::new())), - mempool_state: Arc::new(RwLock::new(None)), + _lock_file: lock_file, }; @@ -103,19 +113,6 @@ impl DiskStorageManager { // persists data when appropriate storage.start_worker().await; - // Rebuild index - let block_index = match load_block_index(&storage).await { - Ok(index) => index, - Err(e) => { - tracing::error!( - "An unexpected IO or deserialization error didn't allow the block index to be built: {}", - e - ); - HashMap::new() - } - }; - storage.header_hash_index = Arc::new(RwLock::new(block_index)); - Ok(storage) } @@ -124,7 +121,7 @@ impl DiskStorageManager { use tempfile::TempDir; let temp_dir = TempDir::new()?; - Self::new(temp_dir.path().into()).await + Self::new(temp_dir.path()).await } /// Start the background worker @@ -132,6 +129,11 @@ impl DiskStorageManager { let block_headers = Arc::clone(&self.block_headers); let filter_headers = Arc::clone(&self.filter_headers); let filters = Arc::clone(&self.filters); + let transactions = Arc::clone(&self.transactions); + let metadata = Arc::clone(&self.metadata); + let chainstate = Arc::clone(&self.chainstate); + + let storage_path = self.base_path.clone(); let worker_handle = tokio::spawn(async move { let mut ticker = tokio::time::interval(Duration::from_secs(5)); @@ -139,9 +141,12 @@ impl DiskStorageManager { loop { ticker.tick().await; - block_headers.write().await.persist_evicted().await; - filter_headers.write().await.persist_evicted().await; - filters.write().await.persist_evicted().await; + let _ = block_headers.write().await.persist_dirty(&storage_path).await; + let _ = filter_headers.write().await.persist_dirty(&storage_path).await; + let _ = filters.write().await.persist_dirty(&storage_path).await; + let _ = transactions.write().await.persist_dirty(&storage_path).await; + let _ = metadata.write().await.persist_dirty(&storage_path).await; + let _ = chainstate.write().await.persist_dirty(&storage_path).await; } }); @@ -204,11 +209,6 @@ impl DiskStorageManager { tokio::fs::create_dir_all(&self.base_path).await?; } - // Recreate expected subdirectories - tokio::fs::create_dir_all(self.base_path.join("headers")).await?; - tokio::fs::create_dir_all(self.base_path.join("filters")).await?; - tokio::fs::create_dir_all(self.base_path.join("state")).await?; - // Restart the background worker for future operations self.start_worker().await; @@ -220,20 +220,18 @@ impl DiskStorageManager { self.stop_worker(); // Persist all dirty data - self.save_dirty().await; + self.persist().await; } - /// Save all dirty data. - pub(super) async fn save_dirty(&self) { - self.filter_headers.write().await.persist().await; - self.block_headers.write().await.persist().await; - self.filters.write().await.persist().await; + async fn persist(&self) { + let storage_path = &self.base_path; - let path = self.base_path.join("headers/index.dat"); - let index = self.header_hash_index.read().await; - if let Err(e) = save_index_to_disk(&path, &index).await { - tracing::error!("Failed to persist header index: {}", e); - } + let _ = self.block_headers.write().await.persist(storage_path).await; + let _ = self.filter_headers.write().await.persist(storage_path).await; + let _ = self.filters.write().await.persist(storage_path).await; + let _ = self.transactions.write().await.persist(storage_path).await; + let _ = self.metadata.write().await.persist(storage_path).await; + let _ = self.chainstate.write().await.persist(storage_path).await; } } @@ -361,7 +359,7 @@ mod tests { storage.store_chain_state(&chain_state).await?; // Force save to disk - storage.save_dirty().await; + storage.persist().await; drop(storage); @@ -398,7 +396,7 @@ mod tests { let mut storage = DiskStorageManager::new(base_path.clone()).await?; storage.store_headers(&headers[..10_000]).await?; - storage.save_dirty().await; + storage.persist().await; storage.store_headers(&headers[10_000..]).await?; storage.shutdown().await; diff --git a/dash-spv/src/storage/transactions.rs b/dash-spv/src/storage/transactions.rs index 2cd4d45b..d16e5b55 100644 --- a/dash-spv/src/storage/transactions.rs +++ b/dash-spv/src/storage/transactions.rs @@ -1,7 +1,4 @@ -use std::{ - collections::HashMap, - sync::{Arc, RwLock}, -}; +use std::{collections::HashMap, path::PathBuf}; use async_trait::async_trait; use dashcore::Txid; @@ -40,15 +37,15 @@ pub trait MempoolStateStorage { } pub struct PersistentTransactionStorage { - mempool_transactions: Arc>>, - mempool_state: Arc>>, + mempool_transactions: HashMap, + mempool_state: Option, } #[async_trait] impl PersistentStorage for PersistentTransactionStorage { - async fn load(&self) -> StorageResult { - let mempool_transactions = Arc::new(RwLock::new(HashMap::new())); - let mempool_state = Arc::new(RwLock::new(None)); + async fn load(_storage_path: impl Into + Send) -> StorageResult { + let mempool_transactions = HashMap::new(); + let mempool_state = None; Ok(PersistentTransactionStorage { mempool_transactions, @@ -56,8 +53,9 @@ impl PersistentStorage for PersistentTransactionStorage { }) } - async fn persist(&self) { + async fn persist(&mut self, _storage_path: impl Into + Send) -> StorageResult<()> { // This data is not currently being persisted + Ok(()) } } @@ -68,12 +66,12 @@ impl TransactionStorage for PersistentTransactionStorage { txid: &Txid, tx: &UnconfirmedTransaction, ) -> StorageResult<()> { - self.mempool_transactions.write().await.insert(*txid, tx.clone()); + self.mempool_transactions.insert(*txid, tx.clone()); Ok(()) } async fn remove_mempool_transaction(&mut self, txid: &Txid) -> StorageResult<()> { - self.mempool_transactions.write().await.remove(txid); + self.mempool_transactions.remove(txid); Ok(()) } @@ -81,24 +79,24 @@ impl TransactionStorage for PersistentTransactionStorage { &self, txid: &Txid, ) -> StorageResult> { - Ok(self.mempool_transactions.read().await.get(txid).cloned()) + Ok(self.mempool_transactions.get(txid).cloned()) } async fn get_all_mempool_transactions( &self, ) -> StorageResult> { - Ok(self.mempool_transactions.read().await.clone()) + Ok(self.mempool_transactions.clone()) } } #[async_trait] impl MempoolStateStorage for PersistentTransactionStorage { async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()> { - *self.mempool_state.write().await = Some(state.clone()); + self.mempool_state = Some(state.clone()); Ok(()) } async fn load_mempool_state(&self) -> StorageResult> { - Ok(self.mempool_state.read().await.clone()) + Ok(self.mempool_state.clone()) } } From bd33f0a2a125514e491b48b3284493ab51582b7c Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 26 Dec 2025 18:53:22 +0000 Subject: [PATCH 12/21] persist segments caches now requires the directory where the user wants to write the data --- dash-spv/src/storage/blocks.rs | 4 +- dash-spv/src/storage/filters.rs | 10 +++-- dash-spv/src/storage/segments.rs | 69 +++++++++++++------------------- 3 files changed, 35 insertions(+), 48 deletions(-) diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs index e63c3f9b..7f1e22e6 100644 --- a/dash-spv/src/storage/blocks.rs +++ b/dash-spv/src/storage/blocks.rs @@ -85,9 +85,9 @@ impl PersistentStorage for PersistentBlockHeaderStorage { let block_headers_folder = storage_path.into().join(Self::FOLDER_NAME); let index_path = block_headers_folder.join(Self::INDEX_FILE_NAME); - tokio::fs::create_dir_all(block_headers_folder).await?; + tokio::fs::create_dir_all(&block_headers_folder).await?; - self.block_headers.persist().await; + self.block_headers.persist(&block_headers_folder).await; let data = bincode::serialize(&self.header_hash_index) .map_err(|e| StorageError::WriteFailed(format!("Failed to serialize index: {}", e)))?; diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs index 6fcbd5fe..fa22c4a5 100644 --- a/dash-spv/src/storage/filters.rs +++ b/dash-spv/src/storage/filters.rs @@ -56,9 +56,10 @@ impl PersistentStorage for PersistentFilterHeaderStorage { async fn persist(&mut self, base_path: impl Into + Send) -> StorageResult<()> { let filter_headers_folder = base_path.into().join(Self::FOLDER_NAME); - tokio::fs::create_dir_all(filter_headers_folder).await?; + tokio::fs::create_dir_all(&filter_headers_folder).await?; - self.filter_headers.persist(filter_headers_folder).await + self.filter_headers.persist(&filter_headers_folder).await; + Ok(()) } } @@ -110,9 +111,10 @@ impl PersistentStorage for PersistentFilterStorage { let storage_path = storage_path.into(); let filters_folder = storage_path.join(Self::FOLDER_NAME); - tokio::fs::create_dir_all(filters_folder).await?; + tokio::fs::create_dir_all(&filters_folder).await?; - self.filters.persist(filters_folder).await + self.filters.persist(&filters_folder).await; + Ok(()) } } diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index 25118c4e..ada4e7bf 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -20,35 +20,23 @@ use dashcore_hashes::Hash; use crate::{error::StorageResult, storage::io::atomic_write, StorageError}; pub trait Persistable: Sized + Encodable + Decodable + PartialEq + Clone { - const FOLDER_NAME: &'static str; const SEGMENT_PREFIX: &'static str = "segment"; const DATA_FILE_EXTENSION: &'static str = "dat"; - fn relative_disk_path(segment_id: u32) -> PathBuf { - format!( - "{}/{}_{:04}.{}", - Self::FOLDER_NAME, - Self::SEGMENT_PREFIX, - segment_id, - Self::DATA_FILE_EXTENSION - ) - .into() + fn segment_file_name(segment_id: u32) -> String { + format!("{}_{:04}.{}", Self::SEGMENT_PREFIX, segment_id, Self::DATA_FILE_EXTENSION) } fn sentinel() -> Self; } impl Persistable for Vec { - const FOLDER_NAME: &'static str = "filters"; - fn sentinel() -> Self { vec![] } } impl Persistable for BlockHeader { - const FOLDER_NAME: &'static str = "block_headers"; - fn sentinel() -> Self { Self { version: Version::from_consensus(i32::MAX), // Invalid version @@ -62,8 +50,6 @@ impl Persistable for BlockHeader { } impl Persistable for FilterHeader { - const FOLDER_NAME: &'static str = "filter_headers"; - fn sentinel() -> Self { FilterHeader::from_byte_array([0u8; 32]) } @@ -76,19 +62,15 @@ pub struct SegmentCache { evicted: HashMap>, tip_height: Option, start_height: Option, - base_path: PathBuf, + segments_dir: PathBuf, } impl SegmentCache { - pub async fn build_block_index_from_segments( - &mut self, - ) -> StorageResult> { - let segments_dir = self.base_path.join(BlockHeader::FOLDER_NAME); + pub async fn build_block_index_from_segments(&self) -> StorageResult> { + let entries = fs::read_dir(&self.segments_dir)?; let mut block_index = HashMap::new(); - let entries = fs::read_dir(&segments_dir)?; - for entry in entries.flatten() { let name = match entry.file_name().into_string() { Ok(s) => s, @@ -126,20 +108,19 @@ impl SegmentCache { impl SegmentCache { const MAX_ACTIVE_SEGMENTS: usize = 10; - pub async fn load_or_new(base_path: impl Into) -> StorageResult { - let base_path = base_path.into(); - let items_dir = base_path.join(I::FOLDER_NAME); + pub async fn load_or_new(segments_dir: impl Into) -> StorageResult { + let segments_dir = segments_dir.into(); let mut cache = Self { segments: HashMap::with_capacity(Self::MAX_ACTIVE_SEGMENTS), evicted: HashMap::new(), tip_height: None, start_height: None, - base_path, + segments_dir: segments_dir.clone(), }; // Building the metadata - if let Ok(entries) = fs::read_dir(&items_dir) { + if let Ok(entries) = fs::read_dir(&segments_dir) { let mut max_seg_id = None; let mut min_seg_id = None; @@ -205,11 +186,11 @@ impl SegmentCache { pub async fn clear_all(&mut self) -> StorageResult<()> { self.clear_in_memory(); - let persistence_dir = self.base_path.join(I::FOLDER_NAME); - if persistence_dir.exists() { - tokio::fs::remove_dir_all(&persistence_dir).await?; + if self.segments_dir.exists() { + tokio::fs::remove_dir_all(&self.segments_dir).await?; } - tokio::fs::create_dir_all(&persistence_dir).await?; + + tokio::fs::create_dir_all(&self.segments_dir).await?; Ok(()) } @@ -249,7 +230,7 @@ impl SegmentCache { let segment = if let Some(segment) = self.evicted.remove(segment_id) { segment } else { - Segment::load(&self.base_path, *segment_id).await? + Segment::load(&self.segments_dir, *segment_id).await? }; let segment = self.segments.entry(*segment_id).or_insert(segment); @@ -367,9 +348,10 @@ impl SegmentCache { Ok(()) } - pub async fn persist_evicted(&mut self) { + pub async fn persist_evicted(&mut self, segments_dir: impl Into) { + let segments_dir = segments_dir.into(); for (_, segments) in self.evicted.iter_mut() { - if let Err(e) = segments.persist(&self.base_path).await { + if let Err(e) = segments.persist(&segments_dir).await { tracing::error!("Failed to persist segment: {}", e); } } @@ -377,11 +359,13 @@ impl SegmentCache { self.evicted.clear(); } - pub async fn persist(&mut self) { - self.persist_evicted().await; + pub async fn persist(&mut self, segments_dir: impl Into) { + let segments_dir = segments_dir.into(); + + self.persist_evicted(&segments_dir).await; for (_, segments) in self.segments.iter_mut() { - if let Err(e) = segments.persist(&self.base_path).await { + if let Err(e) = segments.persist(&segments_dir).await { tracing::error!("Failed to persist segment: {}", e); } } @@ -463,7 +447,7 @@ impl Segment { pub async fn load(base_path: &Path, segment_id: u32) -> StorageResult { // Load segment from disk - let segment_path = base_path.join(I::relative_disk_path(segment_id)); + let segment_path = base_path.join(I::segment_file_name(segment_id)); let (items, state) = if segment_path.exists() { let file = File::open(&segment_path)?; @@ -497,12 +481,13 @@ impl Segment { Ok(Self::new(segment_id, items, state)) } - pub async fn persist(&mut self, base_path: &Path) -> StorageResult<()> { + pub async fn persist(&mut self, segments_dir: impl Into) -> StorageResult<()> { if self.state == SegmentState::Clean { return Ok(()); } - let path = base_path.join(I::relative_disk_path(self.segment_id)); + let segments_dir = segments_dir.into(); + let path = segments_dir.join(I::segment_file_name(self.segment_id)); if let Err(e) = fs::create_dir_all(path.parent().unwrap()) { return Err(StorageError::WriteFailed(format!("Failed to persist segment: {}", e))); @@ -630,7 +615,7 @@ mod tests { cache.store_items(&items).await.expect("Failed to store items"); - cache.persist().await; + cache.persist(tmp_dir.path()).await; cache.clear_in_memory(); assert!(cache.segments.is_empty()); From c3166cf845df4a8f570c323804c7b946a0c97143 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 26 Dec 2025 19:46:16 +0000 Subject: [PATCH 13/21] using rwlock to allow segmentcache mutability behind inmutable ref --- dash-spv/src/storage/blocks.rs | 25 ++++++++++++++----------- dash-spv/src/storage/filters.rs | 23 ++++++++++++----------- dash-spv/src/storage/segments.rs | 4 +++- 3 files changed, 29 insertions(+), 23 deletions(-) diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs index 7f1e22e6..168e60fc 100644 --- a/dash-spv/src/storage/blocks.rs +++ b/dash-spv/src/storage/blocks.rs @@ -7,6 +7,7 @@ use std::path::PathBuf; use async_trait::async_trait; use dashcore::block::Header as BlockHeader; use dashcore::BlockHash; +use tokio::sync::RwLock; use crate::error::StorageResult; use crate::storage::io::atomic_write; @@ -47,7 +48,7 @@ pub trait BlockHeaderStorage { } pub struct PersistentBlockHeaderStorage { - block_headers: SegmentCache, + block_headers: RwLock>, header_hash_index: HashMap, } @@ -63,7 +64,7 @@ impl PersistentStorage for PersistentBlockHeaderStorage { let index_path = storage_path.join(Self::FOLDER_NAME).join(Self::INDEX_FILE_NAME); - let block_headers = SegmentCache::load_or_new(storage_path).await?; + let mut block_headers = SegmentCache::load_or_new(storage_path).await?; let header_hash_index = match tokio::fs::read(&index_path) .await @@ -76,7 +77,7 @@ impl PersistentStorage for PersistentBlockHeaderStorage { }; Ok(Self { - block_headers, + block_headers: RwLock::new(block_headers), header_hash_index, }) } @@ -87,7 +88,7 @@ impl PersistentStorage for PersistentBlockHeaderStorage { tokio::fs::create_dir_all(&block_headers_folder).await?; - self.block_headers.persist(&block_headers_folder).await; + self.block_headers.write().await.persist(&block_headers_folder).await; let data = bincode::serialize(&self.header_hash_index) .map_err(|e| StorageError::WriteFailed(format!("Failed to serialize index: {}", e)))?; @@ -99,7 +100,7 @@ impl PersistentStorage for PersistentBlockHeaderStorage { #[async_trait] impl BlockHeaderStorage for PersistentBlockHeaderStorage { async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()> { - let height = self.block_headers.next_height(); + let height = self.block_headers.read().await.next_height(); self.store_headers_at_height(headers, height).await } @@ -112,7 +113,7 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { let hashes = headers.iter().map(|header| header.block_hash()).collect::>(); - self.block_headers.store_items_at_height(headers, height).await?; + self.block_headers.write().await.store_items_at_height(headers, height).await?; for hash in hashes { self.header_hash_index.insert(hash, height); @@ -123,7 +124,7 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { } async fn load_headers(&self, range: Range) -> StorageResult> { - self.block_headers.get_items(range).await + self.block_headers.write().await.get_items(range).await } async fn get_header(&self, height: u32) -> StorageResult> { @@ -147,21 +148,23 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { } async fn get_tip_height(&self) -> Option { - self.block_headers.tip_height() + self.block_headers.read().await.tip_height() } async fn get_start_height(&self) -> Option { - self.block_headers.start_height() + self.block_headers.read().await.start_height() } async fn get_stored_headers_len(&self) -> u32 { - let start_height = if let Some(start_height) = self.block_headers.start_height() { + let block_headers = self.block_headers.read().await; + + let start_height = if let Some(start_height) = block_headers.start_height() { start_height } else { return 0; }; - let end_height = if let Some(end_height) = self.block_headers.tip_height() { + let end_height = if let Some(end_height) = block_headers.tip_height() { end_height } else { return 0; diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs index fa22c4a5..64342686 100644 --- a/dash-spv/src/storage/filters.rs +++ b/dash-spv/src/storage/filters.rs @@ -2,6 +2,7 @@ use std::{ops::Range, path::PathBuf}; use async_trait::async_trait; use dashcore::hash_types::FilterHeader; +use tokio::sync::RwLock; use crate::{ error::StorageResult, @@ -33,7 +34,7 @@ pub trait FilterStorage { } pub struct PersistentFilterHeaderStorage { - filter_headers: SegmentCache, + filter_headers: RwLock>, } impl PersistentFilterHeaderStorage { @@ -49,7 +50,7 @@ impl PersistentStorage for PersistentFilterHeaderStorage { let filter_headers = SegmentCache::load_or_new(segments_folder).await?; Ok(Self { - filter_headers, + filter_headers: RwLock::new(filter_headers), }) } @@ -58,7 +59,7 @@ impl PersistentStorage for PersistentFilterHeaderStorage { tokio::fs::create_dir_all(&filter_headers_folder).await?; - self.filter_headers.persist(&filter_headers_folder).await; + self.filter_headers.write().await.persist(&filter_headers_folder).await; Ok(()) } } @@ -67,17 +68,17 @@ impl PersistentStorage for PersistentFilterHeaderStorage { impl FilterHeaderStorage for PersistentFilterHeaderStorage { /// Store filter headers. async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()> { - self.filter_headers.store_items(headers).await + self.filter_headers.write().await.store_items(headers).await } /// Load filter headers in the given blockchain height range. async fn load_filter_headers(&self, range: Range) -> StorageResult> { - self.filter_headers.get_items(range).await + self.filter_headers.write().await.get_items(range).await } /// Get a specific filter header by blockchain height. async fn get_filter_header(&self, height: u32) -> StorageResult> { - Ok(self.filter_headers.get_items(height..height + 1).await?.first().copied()) + Ok(self.filter_headers.write().await.get_items(height..height + 1).await?.first().copied()) } /// Get the current filter tip blockchain height. @@ -87,7 +88,7 @@ impl FilterHeaderStorage for PersistentFilterHeaderStorage { } pub struct PersistentFilterStorage { - filters: SegmentCache>, + filters: RwLock>>, } impl PersistentFilterStorage { @@ -103,7 +104,7 @@ impl PersistentStorage for PersistentFilterStorage { let filters = SegmentCache::load_or_new(filters_folder).await?; Ok(Self { - filters, + filters: RwLock::new(filters), }) } @@ -113,7 +114,7 @@ impl PersistentStorage for PersistentFilterStorage { tokio::fs::create_dir_all(&filters_folder).await?; - self.filters.persist(&filters_folder).await; + self.filters.write().await.persist(&filters_folder).await; Ok(()) } } @@ -122,11 +123,11 @@ impl PersistentStorage for PersistentFilterStorage { impl FilterStorage for PersistentFilterStorage { /// Store a compact filter at a blockchain height. async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()> { - self.filters.store_items_at_height(&[filter.to_vec()], height).await + self.filters.write().await.store_items_at_height(&[filter.to_vec()], height).await } /// Load compact filters in the given blockchain height range. async fn load_filters(&self, range: Range) -> StorageResult>> { - self.filters.get_items(range).await + self.filters.write().await.get_items(range).await } } diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index ada4e7bf..30f01938 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -66,7 +66,9 @@ pub struct SegmentCache { } impl SegmentCache { - pub async fn build_block_index_from_segments(&self) -> StorageResult> { + pub async fn build_block_index_from_segments( + &mut self, + ) -> StorageResult> { let entries = fs::read_dir(&self.segments_dir)?; let mut block_index = HashMap::new(); From db519b12753035fe38b320201f35806bbc518e6b Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 26 Dec 2025 20:03:24 +0000 Subject: [PATCH 14/21] clear method fixed --- dash-spv/src/storage/blocks.rs | 12 +++++++ dash-spv/src/storage/filters.rs | 24 +++++++++++++ dash-spv/src/storage/mod.rs | 63 ++++++++++++++++++--------------- 3 files changed, 70 insertions(+), 29 deletions(-) diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs index 168e60fc..0053d9a1 100644 --- a/dash-spv/src/storage/blocks.rs +++ b/dash-spv/src/storage/blocks.rs @@ -95,6 +95,18 @@ impl PersistentStorage for PersistentBlockHeaderStorage { atomic_write(&index_path, &data).await } + + async fn persist_dirty( + &mut self, + storage_path: impl Into + Send, + ) -> StorageResult<()> { + let block_headers_folder = storage_path.into().join(Self::FOLDER_NAME); + + tokio::fs::create_dir_all(&block_headers_folder).await?; + + self.block_headers.write().await.persist_evicted(&block_headers_folder).await; + Ok(()) + } } #[async_trait] diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs index 64342686..9a6ca999 100644 --- a/dash-spv/src/storage/filters.rs +++ b/dash-spv/src/storage/filters.rs @@ -62,6 +62,18 @@ impl PersistentStorage for PersistentFilterHeaderStorage { self.filter_headers.write().await.persist(&filter_headers_folder).await; Ok(()) } + + async fn persist_dirty( + &mut self, + storage_path: impl Into + Send, + ) -> StorageResult<()> { + let filter_headers_folder = storage_path.into().join(Self::FOLDER_NAME); + + tokio::fs::create_dir_all(&filter_headers_folder).await?; + + self.filter_headers.write().await.persist_evicted(&filter_headers_folder).await; + Ok(()) + } } #[async_trait] @@ -117,6 +129,18 @@ impl PersistentStorage for PersistentFilterStorage { self.filters.write().await.persist(&filters_folder).await; Ok(()) } + + async fn persist_dirty( + &mut self, + storage_path: impl Into + Send, + ) -> StorageResult<()> { + let filters_folder = storage_path.into().join(Self::FOLDER_NAME); + + tokio::fs::create_dir_all(&filters_folder).await?; + + self.filters.write().await.persist_evicted(&filters_folder).await; + Ok(()) + } } #[async_trait] diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index 1ba66e75..04cf6ee6 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -26,7 +26,6 @@ use crate::storage::filters::{PersistentFilterHeaderStorage, PersistentFilterSto use crate::storage::lockfile::LockFile; use crate::storage::metadata::PersistentMetadataStorage; use crate::storage::transactions::PersistentTransactionStorage; -use crate::StorageError; pub use types::*; @@ -59,7 +58,7 @@ pub trait StorageManager: /// Disk-based storage manager with segmented files and async background saving. pub struct DiskStorageManager { - base_path: PathBuf, + storage_path: PathBuf, block_headers: Arc>, filter_headers: Arc>, @@ -76,33 +75,34 @@ pub struct DiskStorageManager { } impl DiskStorageManager { - pub async fn new(base_path: impl Into + Send) -> StorageResult { + pub async fn new(storage_path: impl Into + Send) -> StorageResult { use std::fs; - let base_path = base_path.into(); + let storage_path = storage_path.into(); // Create directories if they don't exist - fs::create_dir_all(&base_path) - .map_err(|e| StorageError::WriteFailed(format!("Failed to create directory: {}", e)))?; + fs::create_dir_all(&storage_path)?; // Acquire exclusive lock on the data directory - let lock_file = LockFile::new(base_path.join(".lock"))?; + let lock_file = LockFile::new(storage_path.with_added_extension(".lock"))?; let mut storage = Self { - base_path: base_path.clone(), + storage_path: storage_path.clone(), block_headers: Arc::new(RwLock::new( - PersistentBlockHeaderStorage::load(&base_path).await?, + PersistentBlockHeaderStorage::load(&storage_path).await?, )), filter_headers: Arc::new(RwLock::new( - PersistentFilterHeaderStorage::load(&base_path).await?, + PersistentFilterHeaderStorage::load(&storage_path).await?, )), - filters: Arc::new(RwLock::new(PersistentFilterStorage::load(&base_path).await?)), + filters: Arc::new(RwLock::new(PersistentFilterStorage::load(&storage_path).await?)), transactions: Arc::new(RwLock::new( - PersistentTransactionStorage::load(&base_path).await?, + PersistentTransactionStorage::load(&storage_path).await?, + )), + metadata: Arc::new(RwLock::new(PersistentMetadataStorage::load(&storage_path).await?)), + chainstate: Arc::new(RwLock::new( + PersistentChainStateStorage::load(&storage_path).await?, )), - metadata: Arc::new(RwLock::new(PersistentMetadataStorage::load(&base_path).await?)), - chainstate: Arc::new(RwLock::new(PersistentChainStateStorage::load(&base_path).await?)), worker_handle: None, @@ -133,7 +133,7 @@ impl DiskStorageManager { let metadata = Arc::clone(&self.metadata); let chainstate = Arc::clone(&self.chainstate); - let storage_path = self.base_path.clone(); + let storage_path = self.storage_path.clone(); let worker_handle = tokio::spawn(async move { let mut ticker = tokio::time::interval(Duration::from_secs(5)); @@ -180,19 +180,10 @@ impl DiskStorageManager { // First, stop the background worker to avoid races with file deletion self.stop_worker(); - // Clear in-memory state - self.block_headers.write().await.clear_in_memory(); - self.filter_headers.write().await.clear_in_memory(); - self.filters.write().await.clear_in_memory(); - - self.header_hash_index.write().await.clear(); - self.mempool_transactions.write().await.clear(); - *self.mempool_state.write().await = None; - // Remove all files and directories under base_path - if self.base_path.exists() { + if self.storage_path.exists() { // Best-effort removal; if concurrent files appear, retry once - match tokio::fs::remove_dir_all(&self.base_path).await { + match tokio::fs::remove_dir_all(&self.storage_path).await { Ok(_) => {} Err(e) => { // Retry once after a short delay to handle transient races @@ -200,15 +191,29 @@ impl DiskStorageManager { || e.kind() == std::io::ErrorKind::DirectoryNotEmpty { tokio::time::sleep(std::time::Duration::from_millis(50)).await; - tokio::fs::remove_dir_all(&self.base_path).await?; + tokio::fs::remove_dir_all(&self.storage_path).await?; } else { return Err(crate::error::StorageError::Io(e)); } } } - tokio::fs::create_dir_all(&self.base_path).await?; + tokio::fs::create_dir_all(&self.storage_path).await?; } + // Instantiate storages again once persisted data has been cleared + let storage_path = &self.storage_path; + + self.block_headers = + Arc::new(RwLock::new(PersistentBlockHeaderStorage::load(storage_path).await?)); + self.filter_headers = + Arc::new(RwLock::new(PersistentFilterHeaderStorage::load(storage_path).await?)); + self.filters = Arc::new(RwLock::new(PersistentFilterStorage::load(storage_path).await?)); + self.transactions = + Arc::new(RwLock::new(PersistentTransactionStorage::load(storage_path).await?)); + self.metadata = Arc::new(RwLock::new(PersistentMetadataStorage::load(storage_path).await?)); + self.chainstate = + Arc::new(RwLock::new(PersistentChainStateStorage::load(storage_path).await?)); + // Restart the background worker for future operations self.start_worker().await; @@ -224,7 +229,7 @@ impl DiskStorageManager { } async fn persist(&self) { - let storage_path = &self.base_path; + let storage_path = &self.storage_path; let _ = self.block_headers.write().await.persist(storage_path).await; let _ = self.filter_headers.write().await.persist(storage_path).await; From e1924ef3a1964ee690b2e064761095292a0afc74 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 26 Dec 2025 21:11:04 +0000 Subject: [PATCH 15/21] default method implementations in storage traits --- dash-spv/src/storage/blocks.rs | 40 ++++++++++++++++----------------- dash-spv/src/storage/filters.rs | 25 ++++++++++++++++----- dash-spv/src/storage/mod.rs | 4 +++- 3 files changed, 41 insertions(+), 28 deletions(-) diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs index 0053d9a1..cc921367 100644 --- a/dash-spv/src/storage/blocks.rs +++ b/dash-spv/src/storage/blocks.rs @@ -31,7 +31,25 @@ pub trait BlockHeaderStorage { async fn load_headers(&self, range: Range) -> StorageResult>; /// Get a specific header by blockchain height. - async fn get_header(&self, height: u32) -> StorageResult>; + async fn get_header(&self, height: u32) -> StorageResult> { + if let Some(tip_height) = self.get_tip_height().await { + if height > tip_height { + return Ok(None); + } + } else { + return Ok(None); + } + + if let Some(start_height) = self.get_start_height().await { + if height < start_height { + return Ok(None); + } + } else { + return Ok(None); + } + + Ok(self.load_headers(height..height + 1).await?.first().copied()) + } /// Get the current tip blockchain height. async fn get_tip_height(&self) -> Option; @@ -139,26 +157,6 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { self.block_headers.write().await.get_items(range).await } - async fn get_header(&self, height: u32) -> StorageResult> { - if let Some(tip_height) = self.get_tip_height().await { - if height > tip_height { - return Ok(None); - } - } else { - return Ok(None); - } - - if let Some(start_height) = self.get_start_height().await { - if height < start_height { - return Ok(None); - } - } else { - return Ok(None); - } - - Ok(self.load_headers(height..height + 1).await?.first().copied()) - } - async fn get_tip_height(&self) -> Option { self.block_headers.read().await.tip_height() } diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs index 9a6ca999..5e3aaa85 100644 --- a/dash-spv/src/storage/filters.rs +++ b/dash-spv/src/storage/filters.rs @@ -18,7 +18,25 @@ pub trait FilterHeaderStorage { async fn load_filter_headers(&self, range: Range) -> StorageResult>; /// Get a specific filter header by blockchain height. - async fn get_filter_header(&self, height: u32) -> StorageResult>; + async fn get_filter_header(&self, height: u32) -> StorageResult> { + if let Some(tip_height) = self.get_filter_tip_height().await? { + if height > tip_height { + return Ok(None); + } + } else { + return Ok(None); + } + + if let Some(start_height) = self.get_filter_tip_height().await? { + if height < start_height { + return Ok(None); + } + } else { + return Ok(None); + } + + Ok(self.load_filter_headers(height..height + 1).await?.first().copied()) + } /// Get the current filter tip blockchain height. async fn get_filter_tip_height(&self) -> StorageResult>; @@ -88,11 +106,6 @@ impl FilterHeaderStorage for PersistentFilterHeaderStorage { self.filter_headers.write().await.get_items(range).await } - /// Get a specific filter header by blockchain height. - async fn get_filter_header(&self, height: u32) -> StorageResult> { - Ok(self.filter_headers.write().await.get_items(height..height + 1).await?.first().copied()) - } - /// Get the current filter tip blockchain height. async fn get_filter_tip_height(&self) -> StorageResult> { Ok(self.filter_headers.read().await.tip_height()) diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index 04cf6ee6..2f393b08 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -242,8 +242,10 @@ impl DiskStorageManager { #[cfg(test)] mod tests { + use crate::ChainState; + use super::*; - use dashcore::{block::Version, pow::CompactTarget}; + use dashcore::{block::Version, pow::CompactTarget, BlockHash, Header as BlockHeader}; use dashcore_hashes::Hash; use tempfile::TempDir; From b8850b2c3eb6c73938114c95a81594e8e2df491b Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Mon, 29 Dec 2025 17:50:55 +0000 Subject: [PATCH 16/21] storage manager trait implemented --- dash-spv/examples/filter_sync.rs | 3 +- dash-spv/examples/simple_sync.rs | 3 +- dash-spv/examples/spv_with_wallet.rs | 3 +- dash-spv/src/client/block_processor_test.rs | 2 +- dash-spv/src/client/lifecycle.rs | 2 +- dash-spv/src/storage/filters.rs | 5 - dash-spv/src/storage/mod.rs | 192 ++++++++++++++++--- dash-spv/src/storage/segments.rs | 29 +-- dash-spv/src/storage/transactions.rs | 6 - dash-spv/tests/header_sync_test.rs | 23 ++- dash-spv/tests/integration_real_node_test.rs | 20 +- dash-spv/tests/peer_test.rs | 2 +- dash-spv/tests/reverse_index_test.rs | 4 +- dash-spv/tests/segmented_storage_debug.rs | 2 +- dash-spv/tests/segmented_storage_test.rs | 5 +- dash-spv/tests/simple_header_test.rs | 9 +- dash-spv/tests/simple_segmented_test.rs | 2 +- dash-spv/tests/storage_consistency_test.rs | 2 +- dash-spv/tests/storage_test.rs | 5 + dash-spv/tests/wallet_integration_test.rs | 2 +- 20 files changed, 224 insertions(+), 97 deletions(-) diff --git a/dash-spv/examples/filter_sync.rs b/dash-spv/examples/filter_sync.rs index b1821d43..6d0e7a39 100644 --- a/dash-spv/examples/filter_sync.rs +++ b/dash-spv/examples/filter_sync.rs @@ -28,8 +28,7 @@ async fn main() -> Result<(), Box> { let network_manager = PeerNetworkManager::new(&config).await?; // Create storage manager - let storage_manager = - DiskStorageManager::new("./.tmp/filter-sync-example-storage".into()).await?; + let storage_manager = DiskStorageManager::new("./.tmp/filter-sync-example-storage").await?; // Create wallet manager let wallet = Arc::new(RwLock::new(WalletManager::::new(config.network))); diff --git a/dash-spv/examples/simple_sync.rs b/dash-spv/examples/simple_sync.rs index 920c8fca..57e266fa 100644 --- a/dash-spv/examples/simple_sync.rs +++ b/dash-spv/examples/simple_sync.rs @@ -24,8 +24,7 @@ async fn main() -> Result<(), Box> { let network_manager = PeerNetworkManager::new(&config).await?; // Create storage manager - let storage_manager = - DiskStorageManager::new("./.tmp/simple-sync-example-storage".into()).await?; + let storage_manager = DiskStorageManager::new("./.tmp/simple-sync-example-storage").await?; // Create wallet manager let wallet = Arc::new(RwLock::new(WalletManager::::new(config.network))); diff --git a/dash-spv/examples/spv_with_wallet.rs b/dash-spv/examples/spv_with_wallet.rs index dc391f2e..0a15e6d6 100644 --- a/dash-spv/examples/spv_with_wallet.rs +++ b/dash-spv/examples/spv_with_wallet.rs @@ -26,8 +26,7 @@ async fn main() -> Result<(), Box> { let network_manager = PeerNetworkManager::new(&config).await?; // Create storage manager - use disk storage for persistence - let storage_manager = - DiskStorageManager::new("./.tmp/spv-with-wallet-example-storage".into()).await?; + let storage_manager = DiskStorageManager::new("./.tmp/spv-with-wallet-example-storage").await?; // Create wallet manager let wallet = Arc::new(RwLock::new(WalletManager::::new(config.network))); diff --git a/dash-spv/src/client/block_processor_test.rs b/dash-spv/src/client/block_processor_test.rs index a8330a2d..5ec060ac 100644 --- a/dash-spv/src/client/block_processor_test.rs +++ b/dash-spv/src/client/block_processor_test.rs @@ -4,7 +4,7 @@ mod tests { use crate::client::block_processor::{BlockProcessingTask, BlockProcessor}; - use crate::storage::DiskStorageManager; + use crate::storage::{BlockHeaderStorage, DiskStorageManager}; use crate::types::{SpvEvent, SpvStats}; use dashcore::{blockdata::constants::genesis_block, Block, Network, Transaction}; diff --git a/dash-spv/src/client/lifecycle.rs b/dash-spv/src/client/lifecycle.rs index 6b617bcd..4aefd9ec 100644 --- a/dash-spv/src/client/lifecycle.rs +++ b/dash-spv/src/client/lifecycle.rs @@ -219,7 +219,7 @@ impl DashSpvClient StorageResult<()> { self.filter_headers.write().await.store_items(headers).await } - /// Load filter headers in the given blockchain height range. async fn load_filter_headers(&self, range: Range) -> StorageResult> { self.filter_headers.write().await.get_items(range).await } - /// Get the current filter tip blockchain height. async fn get_filter_tip_height(&self) -> StorageResult> { Ok(self.filter_headers.read().await.tip_height()) } @@ -158,12 +155,10 @@ impl PersistentStorage for PersistentFilterStorage { #[async_trait] impl FilterStorage for PersistentFilterStorage { - /// Store a compact filter at a blockchain height. async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()> { self.filters.write().await.store_items_at_height(&[filter.to_vec()], height).await } - /// Load compact filters in the given blockchain height range. async fn load_filters(&self, range: Range) -> StorageResult>> { self.filters.write().await.get_items(range).await } diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index 2f393b08..711b69ed 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -14,6 +14,10 @@ mod segments; mod transactions; use async_trait::async_trait; +use dashcore::hash_types::FilterHeader; +use dashcore::{Header as BlockHeader, Txid}; +use std::collections::HashMap; +use std::ops::Range; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; @@ -24,8 +28,19 @@ use crate::storage::blocks::PersistentBlockHeaderStorage; use crate::storage::chainstate::PersistentChainStateStorage; use crate::storage::filters::{PersistentFilterHeaderStorage, PersistentFilterStorage}; use crate::storage::lockfile::LockFile; +use crate::storage::masternode::PersistentMasternodeStateStorage; use crate::storage::metadata::PersistentMetadataStorage; use crate::storage::transactions::PersistentTransactionStorage; +use crate::types::{MempoolState, UnconfirmedTransaction}; +use crate::ChainState; + +pub use crate::storage::blocks::BlockHeaderStorage; +pub use crate::storage::chainstate::ChainStateStorage; +pub use crate::storage::filters::FilterHeaderStorage; +pub use crate::storage::filters::FilterStorage; +pub use crate::storage::masternode::MasternodeStateStorage; +pub use crate::storage::metadata::MetadataStorage; +pub use crate::storage::transactions::TransactionStorage; pub use types::*; @@ -54,6 +69,8 @@ pub trait StorageManager: + Send + Sync { + async fn clear(&mut self) -> StorageResult<()>; + async fn shutdown(&mut self); } /// Disk-based storage manager with segmented files and async background saving. @@ -66,6 +83,7 @@ pub struct DiskStorageManager { transactions: Arc>, metadata: Arc>, chainstate: Arc>, + masternodestate: Arc>, // Background worker worker_handle: Option>, @@ -103,6 +121,9 @@ impl DiskStorageManager { chainstate: Arc::new(RwLock::new( PersistentChainStateStorage::load(&storage_path).await?, )), + masternodestate: Arc::new(RwLock::new( + PersistentMasternodeStateStorage::load(&storage_path).await?, + )), worker_handle: None, @@ -160,23 +181,21 @@ impl DiskStorageManager { } } - /// Clear all filter headers and compact filters. - pub(super) async fn clear_filters(&mut self) -> StorageResult<()> { - // Stop worker to prevent concurrent writes to filter directories - self.stop_worker().await; - - // Clear in-memory and on-disk filter headers segments - self.filter_headers.write().await.clear_all().await?; - self.filters.write().await.clear_all().await?; - - // Restart background worker for future operations - self.start_worker().await; + async fn persist(&self) { + let storage_path = &self.storage_path; - Ok(()) + let _ = self.block_headers.write().await.persist(storage_path).await; + let _ = self.filter_headers.write().await.persist(storage_path).await; + let _ = self.filters.write().await.persist(storage_path).await; + let _ = self.transactions.write().await.persist(storage_path).await; + let _ = self.metadata.write().await.persist(storage_path).await; + let _ = self.chainstate.write().await.persist(storage_path).await; } - - /// Clear all storage. - pub async fn clear(&mut self) -> StorageResult<()> { +} + +#[async_trait] +impl StorageManager for DiskStorageManager { + async fn clear(&mut self) -> StorageResult<()> { // First, stop the background worker to avoid races with file deletion self.stop_worker(); @@ -220,23 +239,150 @@ impl DiskStorageManager { Ok(()) } +<<<<<<< HEAD /// Shutdown the storage manager. pub async fn shutdown(&mut self) { +======= + async fn shutdown(&mut self) { +>>>>>>> f40a2bd9 (storage manager trait implemented) self.stop_worker(); // Persist all dirty data self.persist().await; } +} - async fn persist(&self) { - let storage_path = &self.storage_path; +#[async_trait] +impl blocks::BlockHeaderStorage for DiskStorageManager { + async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()> { + self.block_headers.write().await.store_headers(headers).await + } - let _ = self.block_headers.write().await.persist(storage_path).await; - let _ = self.filter_headers.write().await.persist(storage_path).await; - let _ = self.filters.write().await.persist(storage_path).await; - let _ = self.transactions.write().await.persist(storage_path).await; - let _ = self.metadata.write().await.persist(storage_path).await; - let _ = self.chainstate.write().await.persist(storage_path).await; + async fn store_headers_at_height( + &mut self, + headers: &[BlockHeader], + height: u32, + ) -> StorageResult<()> { + self.block_headers.write().await.store_headers_at_height(headers, height).await + } + + async fn load_headers(&self, range: Range) -> StorageResult> { + self.block_headers.write().await.load_headers(range).await + } + + async fn get_tip_height(&self) -> Option { + self.block_headers.read().await.get_tip_height().await + } + + async fn get_start_height(&self) -> Option { + self.block_headers.read().await.get_start_height().await + } + + async fn get_stored_headers_len(&self) -> u32 { + self.block_headers.read().await.get_stored_headers_len().await + } + + /// Get header height by block hash (reverse lookup). + async fn get_header_height_by_hash( + &self, + hash: &dashcore::BlockHash, + ) -> StorageResult> { + self.block_headers.read().await.get_header_height_by_hash(hash).await + } +} + +#[async_trait] +impl filters::FilterHeaderStorage for DiskStorageManager { + async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()> { + self.filter_headers.write().await.store_filter_headers(headers).await + } + + async fn load_filter_headers(&self, range: Range) -> StorageResult> { + self.filter_headers.write().await.load_filter_headers(range).await + } + + async fn get_filter_tip_height(&self) -> StorageResult> { + self.filter_headers.read().await.get_filter_tip_height().await + } +} + +#[async_trait] +impl filters::FilterStorage for DiskStorageManager { + async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()> { + self.filters.write().await.store_filter(height, filter).await + } + + async fn load_filters(&self, range: Range) -> StorageResult>> { + self.filters.write().await.load_filters(range).await + } +} + +#[async_trait] +impl transactions::TransactionStorage for DiskStorageManager { + async fn store_mempool_transaction( + &mut self, + txid: &Txid, + tx: &UnconfirmedTransaction, + ) -> StorageResult<()> { + self.transactions.write().await.store_mempool_transaction(txid, tx).await + } + + async fn remove_mempool_transaction(&mut self, txid: &Txid) -> StorageResult<()> { + self.transactions.write().await.remove_mempool_transaction(txid).await + } + + async fn get_mempool_transaction( + &self, + txid: &Txid, + ) -> StorageResult> { + self.transactions.read().await.get_mempool_transaction(txid).await + } + + async fn get_all_mempool_transactions( + &self, + ) -> StorageResult> { + self.transactions.read().await.get_all_mempool_transactions().await + } + + async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()> { + self.transactions.write().await.store_mempool_state(state).await + } + + async fn load_mempool_state(&self) -> StorageResult> { + self.transactions.read().await.load_mempool_state().await + } +} + +#[async_trait] +impl metadata::MetadataStorage for DiskStorageManager { + async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()> { + self.metadata.write().await.store_metadata(key, value).await + } + + async fn load_metadata(&self, key: &str) -> StorageResult>> { + self.metadata.read().await.load_metadata(key).await + } +} + +#[async_trait] +impl chainstate::ChainStateStorage for DiskStorageManager { + async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()> { + self.chainstate.write().await.store_chain_state(state).await + } + + async fn load_chain_state(&self) -> StorageResult> { + self.chainstate.read().await.load_chain_state().await + } +} + +#[async_trait] +impl masternode::MasternodeStateStorage for DiskStorageManager { + async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()> { + self.masternodestate.write().await.store_masternode_state(state).await + } + + async fn load_masternode_state(&self) -> StorageResult> { + self.masternodestate.read().await.load_masternode_state().await } } diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index 30f01938..59ef43c2 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -179,24 +179,6 @@ impl SegmentCache { height % Segment::::ITEMS_PER_SEGMENT } - pub fn clear_in_memory(&mut self) { - self.segments.clear(); - self.evicted.clear(); - self.tip_height = None; - } - - pub async fn clear_all(&mut self) -> StorageResult<()> { - self.clear_in_memory(); - - if self.segments_dir.exists() { - tokio::fs::remove_dir_all(&self.segments_dir).await?; - } - - tokio::fs::create_dir_all(&self.segments_dir).await?; - - Ok(()) - } - async fn get_segment(&mut self, segment_id: &u32) -> StorageResult<&Segment> { let segment = self.get_segment_mut(segment_id).await?; Ok(&*segment) @@ -619,10 +601,13 @@ mod tests { cache.persist(tmp_dir.path()).await; - cache.clear_in_memory(); + let mut cache = SegmentCache::::load_or_new(tmp_dir.path()) + .await + .expect("Failed to load new segment_cache"); assert!(cache.segments.is_empty()); assert!(cache.evicted.is_empty()); +<<<<<<< HEAD let recovered_items = cache.get_items(0..10).await.expect("Failed to load items"); assert_eq!(recovered_items, items); @@ -636,6 +621,12 @@ mod tests { assert!(segment.first_valid_offset().is_none()); assert!(segment.last_valid_offset().is_none()); assert_eq!(segment.state, SegmentState::Dirty); +======= + assert_eq!( + cache.get_items(10..20).await.expect("Failed to get items from segment cache"), + items + ); +>>>>>>> f40a2bd9 (storage manager trait implemented) } #[tokio::test] diff --git a/dash-spv/src/storage/transactions.rs b/dash-spv/src/storage/transactions.rs index d16e5b55..67baaf4b 100644 --- a/dash-spv/src/storage/transactions.rs +++ b/dash-spv/src/storage/transactions.rs @@ -27,10 +27,7 @@ pub trait TransactionStorage { async fn get_all_mempool_transactions( &self, ) -> StorageResult>; -} -#[async_trait] -pub trait MempoolStateStorage { async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()>; async fn load_mempool_state(&self) -> StorageResult>; @@ -87,10 +84,7 @@ impl TransactionStorage for PersistentTransactionStorage { ) -> StorageResult> { Ok(self.mempool_transactions.clone()) } -} -#[async_trait] -impl MempoolStateStorage for PersistentTransactionStorage { async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()> { self.mempool_state = Some(state.clone()); Ok(()) diff --git a/dash-spv/tests/header_sync_test.rs b/dash-spv/tests/header_sync_test.rs index b5a79564..8cfa3e47 100644 --- a/dash-spv/tests/header_sync_test.rs +++ b/dash-spv/tests/header_sync_test.rs @@ -5,7 +5,7 @@ use std::time::Duration; use dash_spv::{ client::{ClientConfig, DashSpvClient}, network::PeerNetworkManager, - storage::{DiskStorageManager, StorageManager}, + storage::{BlockHeaderStorage, ChainStateStorage, DiskStorageManager}, sync::{HeaderSyncManager, ReorgConfig}, types::{ChainState, ValidationMode}, }; @@ -25,7 +25,7 @@ async fn test_basic_header_sync_from_genesis() { // Create fresh storage starting from empty state let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -48,7 +48,7 @@ async fn test_header_sync_continuation() { let _ = env_logger::try_init(); let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -83,7 +83,7 @@ async fn test_header_batch_processing() { let _ = env_logger::try_init(); let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -133,7 +133,7 @@ async fn test_header_sync_edge_cases() { let _ = env_logger::try_init(); let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -171,7 +171,7 @@ async fn test_header_chain_validation() { let _ = env_logger::try_init(); let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -209,7 +209,7 @@ async fn test_header_sync_performance() { let _ = env_logger::try_init(); let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -273,7 +273,7 @@ async fn test_header_sync_with_client_integration() { // Create storage manager let storage_manager = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -329,7 +329,7 @@ async fn test_header_storage_consistency() { let _ = env_logger::try_init(); let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -365,9 +365,8 @@ async fn test_header_storage_consistency() { #[tokio::test] async fn test_prepare_sync(sync_base_height: u32, header_count: usize) { let temp_dir = TempDir::new().expect("Failed to create temp dir"); - let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()) - .await - .expect("Failed to create storage"); + let mut storage = + DiskStorageManager::new(temp_dir.path()).await.expect("Failed to create storage"); let headers = create_test_header_chain(header_count); let expected_tip_hash = headers.last().unwrap().block_hash(); diff --git a/dash-spv/tests/integration_real_node_test.rs b/dash-spv/tests/integration_real_node_test.rs index f493a7ab..bb071e04 100644 --- a/dash-spv/tests/integration_real_node_test.rs +++ b/dash-spv/tests/integration_real_node_test.rs @@ -6,10 +6,11 @@ use std::net::SocketAddr; use std::time::{Duration, Instant}; +use dash_spv::storage::BlockHeaderStorage; use dash_spv::{ client::{ClientConfig, DashSpvClient}, network::{NetworkManager, PeerNetworkManager}, - storage::{DiskStorageManager, StorageManager}, + storage::DiskStorageManager, types::ValidationMode, }; use dashcore::Network; @@ -36,8 +37,7 @@ async fn create_test_client( // Create storage manager let storage_manager = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) - .await?; + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()).await?; // Create wallet manager let wallet = Arc::new(RwLock::new(WalletManager::::new(config.network))); @@ -200,10 +200,9 @@ async fn test_real_header_sync_up_to_10k() { config.peers.push(peer_addr); // Create fresh storage and client - let storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) - .await - .expect("Failed to create tmp storage"); + let storage = DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) + .await + .expect("Failed to create tmp storage"); // Verify starting from empty state assert_eq!(storage.get_tip_height().await, None); @@ -414,10 +413,9 @@ async fn test_real_header_chain_continuity() { config.peers.push(peer_addr); - let storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) - .await - .expect("Failed to create tmp storage"); + let storage = DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) + .await + .expect("Failed to create tmp storage"); let mut client = create_test_client(config).await.expect("Failed to create SPV client"); diff --git a/dash-spv/tests/peer_test.rs b/dash-spv/tests/peer_test.rs index f7fdc24e..4868293f 100644 --- a/dash-spv/tests/peer_test.rs +++ b/dash-spv/tests/peer_test.rs @@ -190,7 +190,7 @@ async fn test_max_peer_limit() { // Create storage manager let storage_manager = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); diff --git a/dash-spv/tests/reverse_index_test.rs b/dash-spv/tests/reverse_index_test.rs index 2b161641..e09d3097 100644 --- a/dash-spv/tests/reverse_index_test.rs +++ b/dash-spv/tests/reverse_index_test.rs @@ -1,4 +1,4 @@ -use dash_spv::storage::{DiskStorageManager, StorageManager}; +use dash_spv::storage::{BlockHeaderStorage, DiskStorageManager, StorageManager}; use dashcore::block::Header as BlockHeader; use dashcore_hashes::Hash; use std::path::PathBuf; @@ -49,7 +49,7 @@ async fn test_reverse_index_disk_storage() { #[tokio::test] async fn test_clear_clears_index() { let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); diff --git a/dash-spv/tests/segmented_storage_debug.rs b/dash-spv/tests/segmented_storage_debug.rs index a26bec77..1b10dd97 100644 --- a/dash-spv/tests/segmented_storage_debug.rs +++ b/dash-spv/tests/segmented_storage_debug.rs @@ -1,6 +1,6 @@ //! Debug test for segmented storage. -use dash_spv::storage::{DiskStorageManager, StorageManager}; +use dash_spv::storage::{BlockHeaderStorage, DiskStorageManager, StorageManager}; use dashcore::block::{Header as BlockHeader, Version}; use dashcore::pow::CompactTarget; use dashcore::BlockHash; diff --git a/dash-spv/tests/segmented_storage_test.rs b/dash-spv/tests/segmented_storage_test.rs index a9bcf491..ebdce3b1 100644 --- a/dash-spv/tests/segmented_storage_test.rs +++ b/dash-spv/tests/segmented_storage_test.rs @@ -1,6 +1,9 @@ //! Tests for segmented disk storage implementation. -use dash_spv::storage::{DiskStorageManager, StorageManager}; +use dash_spv::storage::{ + BlockHeaderStorage, DiskStorageManager, FilterHeaderStorage, FilterStorage, MetadataStorage, + StorageManager, +}; use dashcore::block::{Header as BlockHeader, Version}; use dashcore::hash_types::FilterHeader; use dashcore::pow::CompactTarget; diff --git a/dash-spv/tests/simple_header_test.rs b/dash-spv/tests/simple_header_test.rs index 26c46d06..2676dcb5 100644 --- a/dash-spv/tests/simple_header_test.rs +++ b/dash-spv/tests/simple_header_test.rs @@ -3,7 +3,7 @@ use dash_spv::{ client::{ClientConfig, DashSpvClient}, network::PeerNetworkManager, - storage::{DiskStorageManager, StorageManager}, + storage::{BlockHeaderStorage, DiskStorageManager}, types::ValidationMode, }; use dashcore::Network; @@ -51,10 +51,9 @@ async fn test_simple_header_sync() { config.peers.push(peer_addr); // Create fresh storage - let storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) - .await - .expect("Failed to create tmp storage"); + let storage = DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) + .await + .expect("Failed to create tmp storage"); // Verify starting from empty state assert_eq!(storage.get_tip_height().await, None); diff --git a/dash-spv/tests/simple_segmented_test.rs b/dash-spv/tests/simple_segmented_test.rs index 327c0877..9cea06a3 100644 --- a/dash-spv/tests/simple_segmented_test.rs +++ b/dash-spv/tests/simple_segmented_test.rs @@ -1,6 +1,6 @@ //! Simple test without background saving. -use dash_spv::storage::{DiskStorageManager, StorageManager}; +use dash_spv::storage::{BlockHeaderStorage, DiskStorageManager}; use dashcore::block::{Header as BlockHeader, Version}; use dashcore::pow::CompactTarget; use dashcore::BlockHash; diff --git a/dash-spv/tests/storage_consistency_test.rs b/dash-spv/tests/storage_consistency_test.rs index a5640bf7..cdd16644 100644 --- a/dash-spv/tests/storage_consistency_test.rs +++ b/dash-spv/tests/storage_consistency_test.rs @@ -3,7 +3,7 @@ //! These tests are designed to expose the storage bug where get_tip_height() //! returns a value but get_header() at that height returns None. -use dash_spv::storage::{DiskStorageManager, StorageManager}; +use dash_spv::storage::{BlockHeaderStorage, DiskStorageManager, StorageManager}; use dashcore::block::{Header as BlockHeader, Version}; use dashcore::pow::CompactTarget; use dashcore::BlockHash; diff --git a/dash-spv/tests/storage_test.rs b/dash-spv/tests/storage_test.rs index 89db5da2..1d4374f0 100644 --- a/dash-spv/tests/storage_test.rs +++ b/dash-spv/tests/storage_test.rs @@ -1,9 +1,14 @@ //! Integration tests for storage layer functionality. +<<<<<<< HEAD use dash_spv::{ storage::{DiskStorageManager, StorageManager}, StorageError, }; +======= +use dash_spv::error::StorageError; +use dash_spv::storage::{BlockHeaderStorage, DiskStorageManager, StorageManager}; +>>>>>>> f40a2bd9 (storage manager trait implemented) use dashcore::{block::Header as BlockHeader, block::Version}; use dashcore_hashes::Hash; use tempfile::TempDir; diff --git a/dash-spv/tests/wallet_integration_test.rs b/dash-spv/tests/wallet_integration_test.rs index a13b5b57..8dd8d5c1 100644 --- a/dash-spv/tests/wallet_integration_test.rs +++ b/dash-spv/tests/wallet_integration_test.rs @@ -22,7 +22,7 @@ async fn create_test_client( // Create storage manager let storage_manager = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); From b5fedeb89a263b5e4387e7a1b59072c1a6d8192b Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Mon, 29 Dec 2025 20:23:35 +0000 Subject: [PATCH 17/21] fixed code to pass the tests --- dash-spv/src/lib.rs | 4 ++-- dash-spv/src/storage/blocks.rs | 16 +++++++++++----- dash-spv/src/storage/filters.rs | 8 +++++++- dash-spv/src/storage/mod.rs | 11 ++++++++++- dash-spv/src/storage/segments.rs | 2 -- dash-spv/tests/storage_test.rs | 11 +++++++++-- 6 files changed, 39 insertions(+), 13 deletions(-) diff --git a/dash-spv/src/lib.rs b/dash-spv/src/lib.rs index aea0d45d..29180781 100644 --- a/dash-spv/src/lib.rs +++ b/dash-spv/src/lib.rs @@ -30,8 +30,8 @@ //! //! // Create the required components //! let network = PeerNetworkManager::new(&config).await?; -//! let storage = DiskStorageManager::new("./.tmp/example-storage".into()).await?; -//! let wallet = Arc::new(RwLock::new(WalletManager::::new(config.network))); +//! let storage = DiskStorageManager::new("./.tmp/example-storage").await?; +//! let wallet = Arc::new(RwLock::new(WalletManager::::new())); //! //! // Create and start the client //! let mut client = DashSpvClient::new(config.clone(), network, storage, wallet).await?; diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs index cc921367..eef04917 100644 --- a/dash-spv/src/storage/blocks.rs +++ b/dash-spv/src/storage/blocks.rs @@ -79,19 +79,25 @@ impl PersistentBlockHeaderStorage { impl PersistentStorage for PersistentBlockHeaderStorage { async fn load(storage_path: impl Into + Send) -> StorageResult { let storage_path = storage_path.into(); + let segments_folder = storage_path.join(Self::FOLDER_NAME); - let index_path = storage_path.join(Self::FOLDER_NAME).join(Self::INDEX_FILE_NAME); + let index_path = segments_folder.join(Self::INDEX_FILE_NAME); - let mut block_headers = SegmentCache::load_or_new(storage_path).await?; + let mut block_headers = SegmentCache::load_or_new(&segments_folder).await?; let header_hash_index = match tokio::fs::read(&index_path) .await .ok() - .map(|content| bincode::deserialize(&content).ok()) - .flatten() + .and_then(|content| bincode::deserialize(&content).ok()) { Some(index) => index, - _ => block_headers.build_block_index_from_segments().await?, + _ => { + if segments_folder.exists() { + block_headers.build_block_index_from_segments().await? + } else { + HashMap::new() + } + } }; Ok(Self { diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs index 80e9467e..15a1473d 100644 --- a/dash-spv/src/storage/filters.rs +++ b/dash-spv/src/storage/filters.rs @@ -27,7 +27,7 @@ pub trait FilterHeaderStorage { return Ok(None); } - if let Some(start_height) = self.get_filter_tip_height().await? { + if let Some(start_height) = self.get_filter_start_height().await { if height < start_height { return Ok(None); } @@ -40,6 +40,8 @@ pub trait FilterHeaderStorage { /// Get the current filter tip blockchain height. async fn get_filter_tip_height(&self) -> StorageResult>; + + async fn get_filter_start_height(&self) -> Option; } #[async_trait] @@ -107,6 +109,10 @@ impl FilterHeaderStorage for PersistentFilterHeaderStorage { async fn get_filter_tip_height(&self) -> StorageResult> { Ok(self.filter_headers.read().await.tip_height()) } + + async fn get_filter_start_height(&self) -> Option { + self.filter_headers.read().await.start_height() + } } pub struct PersistentFilterStorage { diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index 711b69ed..aa471717 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -97,12 +97,17 @@ impl DiskStorageManager { use std::fs; let storage_path = storage_path.into(); + let lock_file = { + let mut lock_file = storage_path.clone(); + lock_file.set_extension("lock"); + lock_file + }; // Create directories if they don't exist fs::create_dir_all(&storage_path)?; // Acquire exclusive lock on the data directory - let lock_file = LockFile::new(storage_path.with_added_extension(".lock"))?; + let lock_file = LockFile::new(lock_file)?; let mut storage = Self { storage_path: storage_path.clone(), @@ -304,6 +309,10 @@ impl filters::FilterHeaderStorage for DiskStorageManager { async fn get_filter_tip_height(&self) -> StorageResult> { self.filter_headers.read().await.get_filter_tip_height().await } + + async fn get_filter_start_height(&self) -> Option { + self.filter_headers.read().await.get_filter_start_height().await + } } #[async_trait] diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index 59ef43c2..0cc65e25 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -604,8 +604,6 @@ mod tests { let mut cache = SegmentCache::::load_or_new(tmp_dir.path()) .await .expect("Failed to load new segment_cache"); - assert!(cache.segments.is_empty()); - assert!(cache.evicted.is_empty()); <<<<<<< HEAD let recovered_items = cache.get_items(0..10).await.expect("Failed to load items"); diff --git a/dash-spv/tests/storage_test.rs b/dash-spv/tests/storage_test.rs index 1d4374f0..0d988a40 100644 --- a/dash-spv/tests/storage_test.rs +++ b/dash-spv/tests/storage_test.rs @@ -93,12 +93,19 @@ async fn test_disk_storage_concurrent_access_blocked() { async fn test_disk_storage_lock_file_lifecycle() { let temp_dir = TempDir::new().expect("Failed to create temp directory"); let path = temp_dir.path().to_path_buf(); - let lock_path = path.join(".lock"); + let lock_path = { + let mut lock_file = path.clone(); + lock_file.set_extension("lock"); + lock_file + }; // Lock file created when storage opens { - let _storage = DiskStorageManager::new(path.clone()).await.unwrap(); + let mut storage = DiskStorageManager::new(path.clone()).await.unwrap(); assert!(lock_path.exists(), "Lock file should exist while storage is open"); + + storage.clear().await.expect("Failed to clear the storage"); + assert!(lock_path.exists(), "Lock file should exist after storage is cleared"); } // Lock file removed when storage drops From 99c086ad968eb26c4f928c5266c38eca602ddcf9 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Mon, 29 Dec 2025 21:30:17 +0000 Subject: [PATCH 18/21] storage documentation updated --- dash-spv/src/storage/blocks.rs | 21 +----- dash-spv/src/storage/chainstate.rs | 2 +- dash-spv/src/storage/filters.rs | 34 +-------- dash-spv/src/storage/masternode.rs | 2 +- dash-spv/src/storage/metadata.rs | 2 +- dash-spv/src/storage/mod.rs | 105 ++++++++++++--------------- dash-spv/src/storage/segments.rs | 20 ++--- dash-spv/src/storage/transactions.rs | 2 +- 8 files changed, 61 insertions(+), 127 deletions(-) diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs index eef04917..430cb17e 100644 --- a/dash-spv/src/storage/blocks.rs +++ b/dash-spv/src/storage/blocks.rs @@ -17,20 +17,16 @@ use crate::StorageError; #[async_trait] pub trait BlockHeaderStorage { - /// Store block headers. async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()>; - /// Store block headers. async fn store_headers_at_height( &mut self, headers: &[BlockHeader], height: u32, ) -> StorageResult<()>; - /// Load block headers in the given range. async fn load_headers(&self, range: Range) -> StorageResult>; - /// Get a specific header by blockchain height. async fn get_header(&self, height: u32) -> StorageResult> { if let Some(tip_height) = self.get_tip_height().await { if height > tip_height { @@ -51,14 +47,12 @@ pub trait BlockHeaderStorage { Ok(self.load_headers(height..height + 1).await?.first().copied()) } - /// Get the current tip blockchain height. async fn get_tip_height(&self) -> Option; async fn get_start_height(&self) -> Option; async fn get_stored_headers_len(&self) -> u32; - /// Get header height by block hash (reverse lookup). async fn get_header_height_by_hash( &self, hash: &dashcore::BlockHash, @@ -77,7 +71,7 @@ impl PersistentBlockHeaderStorage { #[async_trait] impl PersistentStorage for PersistentBlockHeaderStorage { - async fn load(storage_path: impl Into + Send) -> StorageResult { + async fn open(storage_path: impl Into + Send) -> StorageResult { let storage_path = storage_path.into(); let segments_folder = storage_path.join(Self::FOLDER_NAME); @@ -119,18 +113,6 @@ impl PersistentStorage for PersistentBlockHeaderStorage { atomic_write(&index_path, &data).await } - - async fn persist_dirty( - &mut self, - storage_path: impl Into + Send, - ) -> StorageResult<()> { - let block_headers_folder = storage_path.into().join(Self::FOLDER_NAME); - - tokio::fs::create_dir_all(&block_headers_folder).await?; - - self.block_headers.write().await.persist_evicted(&block_headers_folder).await; - Ok(()) - } } #[async_trait] @@ -189,7 +171,6 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { end_height - start_height + 1 } - /// Get header height by block hash (reverse lookup). async fn get_header_height_by_hash( &self, hash: &dashcore::BlockHash, diff --git a/dash-spv/src/storage/chainstate.rs b/dash-spv/src/storage/chainstate.rs index 23b1aaec..c6c3b69a 100644 --- a/dash-spv/src/storage/chainstate.rs +++ b/dash-spv/src/storage/chainstate.rs @@ -26,7 +26,7 @@ impl PersistentChainStateStorage { #[async_trait] impl PersistentStorage for PersistentChainStateStorage { - async fn load(storage_path: impl Into + Send) -> StorageResult { + async fn open(storage_path: impl Into + Send) -> StorageResult { Ok(PersistentChainStateStorage { storage_path: storage_path.into(), }) diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs index 15a1473d..0e491680 100644 --- a/dash-spv/src/storage/filters.rs +++ b/dash-spv/src/storage/filters.rs @@ -11,13 +11,10 @@ use crate::{ #[async_trait] pub trait FilterHeaderStorage { - /// Store filter headers. async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()>; - /// Load filter headers in the given blockchain height range. async fn load_filter_headers(&self, range: Range) -> StorageResult>; - /// Get a specific filter header by blockchain height. async fn get_filter_header(&self, height: u32) -> StorageResult> { if let Some(tip_height) = self.get_filter_tip_height().await? { if height > tip_height { @@ -38,7 +35,6 @@ pub trait FilterHeaderStorage { Ok(self.load_filter_headers(height..height + 1).await?.first().copied()) } - /// Get the current filter tip blockchain height. async fn get_filter_tip_height(&self) -> StorageResult>; async fn get_filter_start_height(&self) -> Option; @@ -46,10 +42,8 @@ pub trait FilterHeaderStorage { #[async_trait] pub trait FilterStorage { - /// Store a compact filter at a blockchain height. async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()>; - /// Load compact filters in the given blockchain height range. async fn load_filters(&self, range: Range) -> StorageResult>>; } @@ -63,7 +57,7 @@ impl PersistentFilterHeaderStorage { #[async_trait] impl PersistentStorage for PersistentFilterHeaderStorage { - async fn load(storage_path: impl Into + Send) -> StorageResult { + async fn open(storage_path: impl Into + Send) -> StorageResult { let storage_path = storage_path.into(); let segments_folder = storage_path.join(Self::FOLDER_NAME); @@ -82,18 +76,6 @@ impl PersistentStorage for PersistentFilterHeaderStorage { self.filter_headers.write().await.persist(&filter_headers_folder).await; Ok(()) } - - async fn persist_dirty( - &mut self, - storage_path: impl Into + Send, - ) -> StorageResult<()> { - let filter_headers_folder = storage_path.into().join(Self::FOLDER_NAME); - - tokio::fs::create_dir_all(&filter_headers_folder).await?; - - self.filter_headers.write().await.persist_evicted(&filter_headers_folder).await; - Ok(()) - } } #[async_trait] @@ -125,7 +107,7 @@ impl PersistentFilterStorage { #[async_trait] impl PersistentStorage for PersistentFilterStorage { - async fn load(storage_path: impl Into + Send) -> StorageResult { + async fn open(storage_path: impl Into + Send) -> StorageResult { let storage_path = storage_path.into(); let filters_folder = storage_path.join(Self::FOLDER_NAME); @@ -145,18 +127,6 @@ impl PersistentStorage for PersistentFilterStorage { self.filters.write().await.persist(&filters_folder).await; Ok(()) } - - async fn persist_dirty( - &mut self, - storage_path: impl Into + Send, - ) -> StorageResult<()> { - let filters_folder = storage_path.into().join(Self::FOLDER_NAME); - - tokio::fs::create_dir_all(&filters_folder).await?; - - self.filters.write().await.persist_evicted(&filters_folder).await; - Ok(()) - } } #[async_trait] diff --git a/dash-spv/src/storage/masternode.rs b/dash-spv/src/storage/masternode.rs index 254b26d1..d7ec1dd9 100644 --- a/dash-spv/src/storage/masternode.rs +++ b/dash-spv/src/storage/masternode.rs @@ -25,7 +25,7 @@ impl PersistentMasternodeStateStorage { #[async_trait] impl PersistentStorage for PersistentMasternodeStateStorage { - async fn load(storage_path: impl Into + Send) -> StorageResult { + async fn open(storage_path: impl Into + Send) -> StorageResult { Ok(PersistentMasternodeStateStorage { storage_path: storage_path.into(), }) diff --git a/dash-spv/src/storage/metadata.rs b/dash-spv/src/storage/metadata.rs index 5ec51712..7707e41a 100644 --- a/dash-spv/src/storage/metadata.rs +++ b/dash-spv/src/storage/metadata.rs @@ -24,7 +24,7 @@ impl PersistentMetadataStorage { #[async_trait] impl PersistentStorage for PersistentMetadataStorage { - async fn load(storage_path: impl Into + Send) -> StorageResult { + async fn open(storage_path: impl Into + Send) -> StorageResult { Ok(PersistentMetadataStorage { storage_path: storage_path.into(), }) diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index aa471717..ecb42d4e 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -46,34 +46,35 @@ pub use types::*; #[async_trait] pub trait PersistentStorage: Sized { - async fn load(storage_path: impl Into + Send) -> StorageResult; - async fn persist(&mut self, storage_path: impl Into + Send) -> StorageResult<()>; + /// If the storage_path contains persisted data the storage will use it, if not, + /// a empty storage will be created. + async fn open(storage_path: impl Into + Send) -> StorageResult; - async fn persist_dirty( - &mut self, - storage_path: impl Into + Send, - ) -> StorageResult<()> { - self.persist(storage_path).await - } + async fn persist(&mut self, storage_path: impl Into + Send) -> StorageResult<()>; } #[async_trait] pub trait StorageManager: - blocks::BlockHeaderStorage - + filters::FilterHeaderStorage - + filters::FilterStorage - + transactions::TransactionStorage - + metadata::MetadataStorage - + chainstate::ChainStateStorage - + masternode::MasternodeStateStorage + BlockHeaderStorage + + FilterHeaderStorage + + FilterStorage + + TransactionStorage + + MetadataStorage + + ChainStateStorage + + MasternodeStateStorage + Send + Sync { + /// Deletes in-disk and in-memory data async fn clear(&mut self) -> StorageResult<()>; + + /// Stops all background tasks and persists the data. async fn shutdown(&mut self); } /// Disk-based storage manager with segmented files and async background saving. +/// Only one instance of DiskStorageManager working on the same storage path +/// can exist at a time. pub struct DiskStorageManager { storage_path: PathBuf, @@ -88,7 +89,6 @@ pub struct DiskStorageManager { // Background worker worker_handle: Option>, - // Lock file to prevent concurrent access from multiple processes. _lock_file: LockFile, } @@ -103,31 +103,29 @@ impl DiskStorageManager { lock_file }; - // Create directories if they don't exist fs::create_dir_all(&storage_path)?; - // Acquire exclusive lock on the data directory let lock_file = LockFile::new(lock_file)?; let mut storage = Self { storage_path: storage_path.clone(), block_headers: Arc::new(RwLock::new( - PersistentBlockHeaderStorage::load(&storage_path).await?, + PersistentBlockHeaderStorage::open(&storage_path).await?, )), filter_headers: Arc::new(RwLock::new( - PersistentFilterHeaderStorage::load(&storage_path).await?, + PersistentFilterHeaderStorage::open(&storage_path).await?, )), - filters: Arc::new(RwLock::new(PersistentFilterStorage::load(&storage_path).await?)), + filters: Arc::new(RwLock::new(PersistentFilterStorage::open(&storage_path).await?)), transactions: Arc::new(RwLock::new( - PersistentTransactionStorage::load(&storage_path).await?, + PersistentTransactionStorage::open(&storage_path).await?, )), - metadata: Arc::new(RwLock::new(PersistentMetadataStorage::load(&storage_path).await?)), + metadata: Arc::new(RwLock::new(PersistentMetadataStorage::open(&storage_path).await?)), chainstate: Arc::new(RwLock::new( - PersistentChainStateStorage::load(&storage_path).await?, + PersistentChainStateStorage::open(&storage_path).await?, )), masternodestate: Arc::new(RwLock::new( - PersistentMasternodeStateStorage::load(&storage_path).await?, + PersistentMasternodeStateStorage::open(&storage_path).await?, )), worker_handle: None, @@ -135,8 +133,6 @@ impl DiskStorageManager { _lock_file: lock_file, }; - // Start background worker that - // persists data when appropriate storage.start_worker().await; Ok(storage) @@ -150,8 +146,8 @@ impl DiskStorageManager { Self::new(temp_dir.path()).await } - /// Start the background worker - pub(super) async fn start_worker(&mut self) { + /// Start the background worker saving data every 5 seconds + async fn start_worker(&mut self) { let block_headers = Arc::clone(&self.block_headers); let filter_headers = Arc::clone(&self.filter_headers); let filters = Arc::clone(&self.filters); @@ -167,12 +163,12 @@ impl DiskStorageManager { loop { ticker.tick().await; - let _ = block_headers.write().await.persist_dirty(&storage_path).await; - let _ = filter_headers.write().await.persist_dirty(&storage_path).await; - let _ = filters.write().await.persist_dirty(&storage_path).await; - let _ = transactions.write().await.persist_dirty(&storage_path).await; - let _ = metadata.write().await.persist_dirty(&storage_path).await; - let _ = chainstate.write().await.persist_dirty(&storage_path).await; + let _ = block_headers.write().await.persist(&storage_path).await; + let _ = filter_headers.write().await.persist(&storage_path).await; + let _ = filters.write().await.persist(&storage_path).await; + let _ = transactions.write().await.persist(&storage_path).await; + let _ = metadata.write().await.persist(&storage_path).await; + let _ = chainstate.write().await.persist(&storage_path).await; } }); @@ -180,8 +176,8 @@ impl DiskStorageManager { } /// Stop the background worker without forcing a save. - pub(super) fn stop_worker(&mut self) { - if let Some(handle) = self.worker_handle.take() { + fn stop_worker(&self) { + if let Some(handle) = &self.worker_handle { handle.abort(); } } @@ -204,22 +200,19 @@ impl StorageManager for DiskStorageManager { // First, stop the background worker to avoid races with file deletion self.stop_worker(); - // Remove all files and directories under base_path + // Remove all files and directories under storage_path if self.storage_path.exists() { // Best-effort removal; if concurrent files appear, retry once match tokio::fs::remove_dir_all(&self.storage_path).await { Ok(_) => {} - Err(e) => { - // Retry once after a short delay to handle transient races + Err(e) if e.kind() == std::io::ErrorKind::Other - || e.kind() == std::io::ErrorKind::DirectoryNotEmpty - { - tokio::time::sleep(std::time::Duration::from_millis(50)).await; - tokio::fs::remove_dir_all(&self.storage_path).await?; - } else { - return Err(crate::error::StorageError::Io(e)); - } + || e.kind() == std::io::ErrorKind::DirectoryNotEmpty => + { + tokio::time::sleep(std::time::Duration::from_millis(50)).await; + tokio::fs::remove_dir_all(&self.storage_path).await?; } + Err(e) => return Err(crate::error::StorageError::Io(e)), } tokio::fs::create_dir_all(&self.storage_path).await?; } @@ -228,15 +221,15 @@ impl StorageManager for DiskStorageManager { let storage_path = &self.storage_path; self.block_headers = - Arc::new(RwLock::new(PersistentBlockHeaderStorage::load(storage_path).await?)); + Arc::new(RwLock::new(PersistentBlockHeaderStorage::open(storage_path).await?)); self.filter_headers = - Arc::new(RwLock::new(PersistentFilterHeaderStorage::load(storage_path).await?)); - self.filters = Arc::new(RwLock::new(PersistentFilterStorage::load(storage_path).await?)); + Arc::new(RwLock::new(PersistentFilterHeaderStorage::open(storage_path).await?)); + self.filters = Arc::new(RwLock::new(PersistentFilterStorage::open(storage_path).await?)); self.transactions = - Arc::new(RwLock::new(PersistentTransactionStorage::load(storage_path).await?)); - self.metadata = Arc::new(RwLock::new(PersistentMetadataStorage::load(storage_path).await?)); + Arc::new(RwLock::new(PersistentTransactionStorage::open(storage_path).await?)); + self.metadata = Arc::new(RwLock::new(PersistentMetadataStorage::open(storage_path).await?)); self.chainstate = - Arc::new(RwLock::new(PersistentChainStateStorage::load(storage_path).await?)); + Arc::new(RwLock::new(PersistentChainStateStorage::open(storage_path).await?)); // Restart the background worker for future operations self.start_worker().await; @@ -244,15 +237,10 @@ impl StorageManager for DiskStorageManager { Ok(()) } -<<<<<<< HEAD /// Shutdown the storage manager. pub async fn shutdown(&mut self) { -======= - async fn shutdown(&mut self) { ->>>>>>> f40a2bd9 (storage manager trait implemented) self.stop_worker(); - // Persist all dirty data self.persist().await; } } @@ -287,7 +275,6 @@ impl blocks::BlockHeaderStorage for DiskStorageManager { self.block_headers.read().await.get_stored_headers_len().await } - /// Get header height by block hash (reverse lookup). async fn get_header_height_by_hash( &self, hash: &dashcore::BlockHash, diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index 0cc65e25..e4cf4875 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -162,7 +162,6 @@ impl SegmentCache { Ok(cache) } - /// Get the segment ID for a given storage index. #[inline] fn height_to_segment_id(height: u32) -> u32 { height / Segment::::ITEMS_PER_SEGMENT @@ -319,6 +318,8 @@ impl SegmentCache { height += 1; } + // Update cached tip height and start height + // if needed self.tip_height = match self.tip_height { Some(current) => Some(current.max(height - 1)), None => Some(height - 1), @@ -332,25 +333,20 @@ impl SegmentCache { Ok(()) } - pub async fn persist_evicted(&mut self, segments_dir: impl Into) { + pub async fn persist(&mut self, segments_dir: impl Into) { let segments_dir = segments_dir.into(); - for (_, segments) in self.evicted.iter_mut() { + + for (id, segments) in self.evicted.iter_mut() { if let Err(e) = segments.persist(&segments_dir).await { - tracing::error!("Failed to persist segment: {}", e); + tracing::error!("Failed to persist segment with id {id}: {e}"); } } self.evicted.clear(); - } - - pub async fn persist(&mut self, segments_dir: impl Into) { - let segments_dir = segments_dir.into(); - - self.persist_evicted(&segments_dir).await; - for (_, segments) in self.segments.iter_mut() { + for (id, segments) in self.segments.iter_mut() { if let Err(e) = segments.persist(&segments_dir).await { - tracing::error!("Failed to persist segment: {}", e); + tracing::error!("Failed to persist segment with id {id}: {e}"); } } } diff --git a/dash-spv/src/storage/transactions.rs b/dash-spv/src/storage/transactions.rs index 67baaf4b..480273c4 100644 --- a/dash-spv/src/storage/transactions.rs +++ b/dash-spv/src/storage/transactions.rs @@ -40,7 +40,7 @@ pub struct PersistentTransactionStorage { #[async_trait] impl PersistentStorage for PersistentTransactionStorage { - async fn load(_storage_path: impl Into + Send) -> StorageResult { + async fn open(_storage_path: impl Into + Send) -> StorageResult { let mempool_transactions = HashMap::new(); let mempool_state = None; From 02482570708edcd947e7635d0f783cefce68ec1a Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Mon, 5 Jan 2026 16:13:20 +0000 Subject: [PATCH 19/21] rebase conflicts resolved --- dash-spv/benches/storage.rs | 2 +- dash-spv/src/lib.rs | 2 +- dash-spv/src/storage/mod.rs | 3 +- dash-spv/src/storage/segments.rs | 18 +- dash-spv/src/storage/state.rs | 344 ------------------ dash-spv/src/sync/headers/manager.rs | 5 +- dash-spv/tests/edge_case_filter_sync_test.rs | 2 +- .../tests/filter_header_verification_test.rs | 2 +- dash-spv/tests/rollback_test.rs | 2 +- dash-spv/tests/storage_test.rs | 7 - 10 files changed, 10 insertions(+), 377 deletions(-) delete mode 100644 dash-spv/src/storage/state.rs diff --git a/dash-spv/benches/storage.rs b/dash-spv/benches/storage.rs index 52f98cd2..5677e6dd 100644 --- a/dash-spv/benches/storage.rs +++ b/dash-spv/benches/storage.rs @@ -2,7 +2,7 @@ use std::time::Duration; use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use dash_spv::{ - storage::{DiskStorageManager, StorageManager}, + storage::{BlockHeaderStorage, DiskStorageManager, StorageManager}, Hash, }; use dashcore::{block::Version, BlockHash, CompactTarget, Header}; diff --git a/dash-spv/src/lib.rs b/dash-spv/src/lib.rs index 29180781..eab41469 100644 --- a/dash-spv/src/lib.rs +++ b/dash-spv/src/lib.rs @@ -31,7 +31,7 @@ //! // Create the required components //! let network = PeerNetworkManager::new(&config).await?; //! let storage = DiskStorageManager::new("./.tmp/example-storage").await?; -//! let wallet = Arc::new(RwLock::new(WalletManager::::new())); +//! let wallet = Arc::new(RwLock::new(WalletManager::::new(config.network))); //! //! // Create and start the client //! let mut client = DashSpvClient::new(config.clone(), network, storage, wallet).await?; diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index ecb42d4e..ddc45b6e 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -64,6 +64,7 @@ pub trait StorageManager: + MasternodeStateStorage + Send + Sync + + 'static { /// Deletes in-disk and in-memory data async fn clear(&mut self) -> StorageResult<()>; @@ -238,7 +239,7 @@ impl StorageManager for DiskStorageManager { } /// Shutdown the storage manager. - pub async fn shutdown(&mut self) { + async fn shutdown(&mut self) { self.stop_worker(); self.persist().await; diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index e4cf4875..9401ab72 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -593,7 +593,7 @@ mod tests { .await .expect("Failed to create new segment_cache"); - cache.store_items(&items).await.expect("Failed to store items"); + cache.store_items_at_height(&items, 10).await.expect("Failed to store items"); cache.persist(tmp_dir.path()).await; @@ -601,26 +601,10 @@ mod tests { .await .expect("Failed to load new segment_cache"); -<<<<<<< HEAD - let recovered_items = cache.get_items(0..10).await.expect("Failed to load items"); - - assert_eq!(recovered_items, items); - assert_eq!(cache.segments.len(), 1); - - cache.clear_all().await.expect("Failed to clean on-memory and on-disk data"); - assert!(cache.segments.is_empty()); - - let segment = cache.get_segment(&0).await.expect("Failed to create a new segment"); - - assert!(segment.first_valid_offset().is_none()); - assert!(segment.last_valid_offset().is_none()); - assert_eq!(segment.state, SegmentState::Dirty); -======= assert_eq!( cache.get_items(10..20).await.expect("Failed to get items from segment cache"), items ); ->>>>>>> f40a2bd9 (storage manager trait implemented) } #[tokio::test] diff --git a/dash-spv/src/storage/state.rs b/dash-spv/src/storage/state.rs deleted file mode 100644 index 92229536..00000000 --- a/dash-spv/src/storage/state.rs +++ /dev/null @@ -1,344 +0,0 @@ -//! State persistence and StorageManager trait implementation. - -use async_trait::async_trait; -use std::collections::HashMap; - -use dashcore::{block::Header as BlockHeader, BlockHash, Txid}; - -use crate::error::StorageResult; -use crate::storage::headers::save_index_to_disk; -use crate::storage::{MasternodeState, StorageManager}; -use crate::types::{ChainState, MempoolState, UnconfirmedTransaction}; - -use super::io::atomic_write; -use super::manager::DiskStorageManager; - -impl DiskStorageManager { - /// Store chain state to disk. - pub async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()> { - // Store other state as JSON - let state_data = serde_json::json!({ - "last_chainlock_height": state.last_chainlock_height, - "last_chainlock_hash": state.last_chainlock_hash, - "current_filter_tip": state.current_filter_tip, - "last_masternode_diff_height": state.last_masternode_diff_height, - "sync_base_height": state.sync_base_height, - }); - - let path = self.base_path.join("state/chain.json"); - let json = state_data.to_string(); - atomic_write(&path, json.as_bytes()).await?; - - Ok(()) - } - - /// Load chain state from disk. - pub async fn load_chain_state(&self) -> StorageResult> { - let path = self.base_path.join("state/chain.json"); - if !path.exists() { - return Ok(None); - } - - let content = tokio::fs::read_to_string(path).await?; - let value: serde_json::Value = serde_json::from_str(&content).map_err(|e| { - crate::error::StorageError::Serialization(format!("Failed to parse chain state: {}", e)) - })?; - - let state = ChainState { - last_chainlock_height: value - .get("last_chainlock_height") - .and_then(|v| v.as_u64()) - .map(|h| h as u32), - last_chainlock_hash: value - .get("last_chainlock_hash") - .and_then(|v| v.as_str()) - .and_then(|s| s.parse().ok()), - current_filter_tip: value - .get("current_filter_tip") - .and_then(|v| v.as_str()) - .and_then(|s| s.parse().ok()), - masternode_engine: None, - last_masternode_diff_height: value - .get("last_masternode_diff_height") - .and_then(|v| v.as_u64()) - .map(|h| h as u32), - sync_base_height: value - .get("sync_base_height") - .and_then(|v| v.as_u64()) - .map(|h| h as u32) - .unwrap_or(0), - }; - - Ok(Some(state)) - } - - /// Store masternode state. - pub async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()> { - let path = self.base_path.join("state/masternode.json"); - let json = serde_json::to_string_pretty(state).map_err(|e| { - crate::error::StorageError::Serialization(format!( - "Failed to serialize masternode state: {}", - e - )) - })?; - - atomic_write(&path, json.as_bytes()).await?; - Ok(()) - } - - /// Load masternode state. - pub async fn load_masternode_state(&self) -> StorageResult> { - let path = self.base_path.join("state/masternode.json"); - if !path.exists() { - return Ok(None); - } - - let content = tokio::fs::read_to_string(path).await?; - let state = serde_json::from_str(&content).map_err(|e| { - crate::error::StorageError::Serialization(format!( - "Failed to deserialize masternode state: {}", - e - )) - })?; - - Ok(Some(state)) - } - - /// Store metadata. - pub async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()> { - let path = self.base_path.join(format!("state/{}.dat", key)); - atomic_write(&path, value).await?; - Ok(()) - } - - /// Load metadata. - pub async fn load_metadata(&self, key: &str) -> StorageResult>> { - let path = self.base_path.join(format!("state/{}.dat", key)); - if !path.exists() { - return Ok(None); - } - - let data = tokio::fs::read(path).await?; - Ok(Some(data)) - } -} - -/// Mempool storage methods -impl DiskStorageManager { - /// Store a mempool transaction. - pub async fn store_mempool_transaction( - &mut self, - txid: &Txid, - tx: &UnconfirmedTransaction, - ) -> StorageResult<()> { - self.mempool_transactions.write().await.insert(*txid, tx.clone()); - Ok(()) - } - - /// Remove a mempool transaction. - pub async fn remove_mempool_transaction(&mut self, txid: &Txid) -> StorageResult<()> { - self.mempool_transactions.write().await.remove(txid); - Ok(()) - } - - /// Get a mempool transaction. - pub async fn get_mempool_transaction( - &self, - txid: &Txid, - ) -> StorageResult> { - Ok(self.mempool_transactions.read().await.get(txid).cloned()) - } - - /// Get all mempool transactions. - pub async fn get_all_mempool_transactions( - &self, - ) -> StorageResult> { - Ok(self.mempool_transactions.read().await.clone()) - } - - /// Store mempool state. - pub async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()> { - *self.mempool_state.write().await = Some(state.clone()); - Ok(()) - } - - /// Load mempool state. - pub async fn load_mempool_state(&self) -> StorageResult> { - Ok(self.mempool_state.read().await.clone()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use dashcore::{block::Version, pow::CompactTarget}; - use dashcore_hashes::Hash; - use tempfile::TempDir; - - fn build_headers(count: usize) -> Vec { - let mut headers = Vec::with_capacity(count); - let mut prev_hash = BlockHash::from_byte_array([0u8; 32]); - - for i in 0..count { - let header = BlockHeader { - version: Version::from_consensus(1), - prev_blockhash: prev_hash, - merkle_root: dashcore::hashes::sha256d::Hash::from_byte_array( - [(i % 255) as u8; 32], - ) - .into(), - time: 1 + i as u32, - bits: CompactTarget::from_consensus(0x1d00ffff), - nonce: i as u32, - }; - prev_hash = header.block_hash(); - headers.push(header); - } - - headers - } - - #[tokio::test] - async fn test_load_headers() -> Result<(), Box> { - // Create a temporary directory for the test - let temp_dir = TempDir::new()?; - let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()) - .await - .expect("Unable to create storage"); - - // Create a test header - let test_header = BlockHeader { - version: Version::from_consensus(1), - prev_blockhash: BlockHash::from_byte_array([1; 32]), - merkle_root: dashcore::hashes::sha256d::Hash::from_byte_array([2; 32]).into(), - time: 12345, - bits: CompactTarget::from_consensus(0x1d00ffff), - nonce: 67890, - }; - - // Store just one header - storage.store_headers(&[test_header]).await?; - - let loaded_headers = storage.load_headers(0..1).await?; - - // Should only get back the one header we stored - assert_eq!(loaded_headers.len(), 1); - assert_eq!(loaded_headers[0], test_header); - - Ok(()) - } - - #[tokio::test] - async fn test_checkpoint_storage_indexing() -> StorageResult<()> { - use dashcore::TxMerkleNode; - use tempfile::tempdir; - - let temp_dir = tempdir().expect("Failed to create temp dir"); - let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()).await?; - - // Create test headers starting from checkpoint height - let checkpoint_height = 1_100_000; - let headers: Vec = (0..100) - .map(|i| BlockHeader { - version: Version::from_consensus(1), - prev_blockhash: BlockHash::from_byte_array([i as u8; 32]), - merkle_root: TxMerkleNode::from_byte_array([(i + 1) as u8; 32]), - time: 1234567890 + i, - bits: CompactTarget::from_consensus(0x1a2b3c4d), - nonce: 67890 + i, - }) - .collect(); - - let mut base_state = ChainState::new(); - base_state.sync_base_height = checkpoint_height; - storage.store_chain_state(&base_state).await?; - - storage.store_headers_at_height(&headers, checkpoint_height).await?; - assert_eq!(storage.get_stored_headers_len().await, headers.len() as u32); - - // Verify headers are stored at correct blockchain heights - let header_at_base = storage.get_header(checkpoint_height).await?; - assert_eq!( - header_at_base.expect("Header at base blockchain height should exist"), - headers[0] - ); - - let header_at_ending = storage.get_header(checkpoint_height + 99).await?; - assert_eq!( - header_at_ending.expect("Header at ending blockchain height should exist"), - headers[99] - ); - - // Test the reverse index (hash -> blockchain height) - let hash_0 = headers[0].block_hash(); - let height_0 = storage.get_header_height_by_hash(&hash_0).await?; - assert_eq!( - height_0, - Some(checkpoint_height), - "Hash should map to blockchain height 1,100,000" - ); - - let hash_99 = headers[99].block_hash(); - let height_99 = storage.get_header_height_by_hash(&hash_99).await?; - assert_eq!( - height_99, - Some(checkpoint_height + 99), - "Hash should map to blockchain height 1,100,099" - ); - - // Store chain state to persist sync_base_height - let mut chain_state = ChainState::new(); - chain_state.sync_base_height = checkpoint_height; - storage.store_chain_state(&chain_state).await?; - - // Force save to disk - storage.save_dirty().await; - - drop(storage); - - // Create a new storage instance to test index rebuilding - let storage2 = DiskStorageManager::new(temp_dir.path().to_path_buf()).await?; - - // Verify the index was rebuilt correctly - let height_after_rebuild = storage2.get_header_height_by_hash(&hash_0).await?; - assert_eq!( - height_after_rebuild, - Some(checkpoint_height), - "After index rebuild, hash should still map to blockchain height 1,100,000" - ); - - // Verify header can still be retrieved by blockchain height after reload - let header_after_reload = storage2.get_header(checkpoint_height).await?; - assert!( - header_after_reload.is_some(), - "Header at base blockchain height should exist after reload" - ); - assert_eq!(header_after_reload.unwrap(), headers[0]); - - Ok(()) - } - - #[tokio::test] - async fn test_shutdown_flushes_index() -> Result<(), Box> { - let temp_dir = TempDir::new()?; - let base_path = temp_dir.path().to_path_buf(); - let headers = build_headers(11_000); - let last_hash = headers.last().unwrap().block_hash(); - - { - let mut storage = DiskStorageManager::new(base_path.clone()).await?; - - storage.store_headers(&headers[..10_000]).await?; - storage.save_dirty().await; - - storage.store_headers(&headers[10_000..]).await?; - storage.shutdown().await; - } - - let storage = DiskStorageManager::new(base_path).await?; - let height = storage.get_header_height_by_hash(&last_hash).await?; - assert_eq!(height, Some(10_999)); - - Ok(()) - } -} diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index ef1b96e3..1faf92c3 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -49,6 +49,7 @@ pub struct HeaderSyncManager { config: ClientConfig, tip_manager: ChainTipManager, checkpoint_manager: CheckpointManager, + reorg_config: ReorgConfig, chain_state: Arc>, // WalletState removed - wallet functionality is now handled externally headers2_state: Headers2StateManager, @@ -80,6 +81,7 @@ impl HeaderSyncManager { config: config.clone(), tip_manager: ChainTipManager::new(reorg_config.max_forks), checkpoint_manager, + reorg_config, chain_state, // WalletState removed headers2_state: Headers2StateManager::new(), @@ -212,7 +214,6 @@ impl HeaderSyncManager { // Step 3: Process the Entire Validated Batch -<<<<<<< HEAD // Checkpoint Validation: Perform in-memory security check against checkpoints for (index, cached_header) in cached_headers.iter().enumerate() { let prospective_height = tip_height + (index as u32) + 1; @@ -229,8 +230,6 @@ impl HeaderSyncManager { } } -======= ->>>>>>> 7acccc0b (removed two methos that where invovled in the same process) storage .store_headers(headers) .await diff --git a/dash-spv/tests/edge_case_filter_sync_test.rs b/dash-spv/tests/edge_case_filter_sync_test.rs index c5d4760b..f79b4052 100644 --- a/dash-spv/tests/edge_case_filter_sync_test.rs +++ b/dash-spv/tests/edge_case_filter_sync_test.rs @@ -16,7 +16,7 @@ use dash_spv::{ client::ClientConfig, error::NetworkResult, network::NetworkManager, - storage::{DiskStorageManager, StorageManager}, + storage::{BlockHeaderStorage, DiskStorageManager, FilterHeaderStorage}, sync::filters::FilterSyncManager, }; use dashcore::{ diff --git a/dash-spv/tests/filter_header_verification_test.rs b/dash-spv/tests/filter_header_verification_test.rs index e8753411..0d4b6de4 100644 --- a/dash-spv/tests/filter_header_verification_test.rs +++ b/dash-spv/tests/filter_header_verification_test.rs @@ -19,7 +19,7 @@ use dash_spv::{ client::ClientConfig, error::{NetworkError, NetworkResult, SyncError}, network::NetworkManager, - storage::{DiskStorageManager, StorageManager}, + storage::{BlockHeaderStorage, DiskStorageManager, FilterHeaderStorage}, sync::filters::FilterSyncManager, types::PeerInfo, }; diff --git a/dash-spv/tests/rollback_test.rs b/dash-spv/tests/rollback_test.rs index 7634648c..5a985e83 100644 --- a/dash-spv/tests/rollback_test.rs +++ b/dash-spv/tests/rollback_test.rs @@ -5,7 +5,7 @@ #![cfg(feature = "skip_mock_implementation_incomplete")] -use dash_spv::storage::{DiskStorageManager, StorageManager}; +use dash_spv::storage::{BlockHeaderStorage, DiskStorageManager, FilterHeaderStorage}; use dashcore::{ block::{Header as BlockHeader, Version}, pow::CompactTarget, diff --git a/dash-spv/tests/storage_test.rs b/dash-spv/tests/storage_test.rs index 0d988a40..79833d09 100644 --- a/dash-spv/tests/storage_test.rs +++ b/dash-spv/tests/storage_test.rs @@ -1,14 +1,7 @@ //! Integration tests for storage layer functionality. -<<<<<<< HEAD -use dash_spv::{ - storage::{DiskStorageManager, StorageManager}, - StorageError, -}; -======= use dash_spv::error::StorageError; use dash_spv::storage::{BlockHeaderStorage, DiskStorageManager, StorageManager}; ->>>>>>> f40a2bd9 (storage manager trait implemented) use dashcore::{block::Header as BlockHeader, block::Version}; use dashcore_hashes::Hash; use tempfile::TempDir; From 81e3dec4268cfa5be3b4d0cecf5570c503d5d182 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Mon, 5 Jan 2026 17:25:50 +0000 Subject: [PATCH 20/21] masternodestate storage was not being persisted following the pattern other storages do --- dash-spv/src/storage/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index ddc45b6e..a572cb65 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -155,6 +155,7 @@ impl DiskStorageManager { let transactions = Arc::clone(&self.transactions); let metadata = Arc::clone(&self.metadata); let chainstate = Arc::clone(&self.chainstate); + let masternodestate = Arc::clone(&self.masternodestate); let storage_path = self.storage_path.clone(); @@ -170,6 +171,7 @@ impl DiskStorageManager { let _ = transactions.write().await.persist(&storage_path).await; let _ = metadata.write().await.persist(&storage_path).await; let _ = chainstate.write().await.persist(&storage_path).await; + let _ = masternodestate.write().await.persist(&storage_path).await; } }); @@ -192,6 +194,7 @@ impl DiskStorageManager { let _ = self.transactions.write().await.persist(storage_path).await; let _ = self.metadata.write().await.persist(storage_path).await; let _ = self.chainstate.write().await.persist(storage_path).await; + let _ = self.masternodestate.write().await.persist(storage_path).await; } } @@ -231,6 +234,8 @@ impl StorageManager for DiskStorageManager { self.metadata = Arc::new(RwLock::new(PersistentMetadataStorage::open(storage_path).await?)); self.chainstate = Arc::new(RwLock::new(PersistentChainStateStorage::open(storage_path).await?)); + self.masternodestate = + Arc::new(RwLock::new(PersistentMasternodeStateStorage::open(storage_path).await?)); // Restart the background worker for future operations self.start_worker().await; From 79cf7a2acb065fdf2d1112f7469a873cbd841a2d Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Mon, 5 Jan 2026 18:36:56 +0000 Subject: [PATCH 21/21] replaced write() locks where a read() can be used --- dash-spv/src/storage/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index a572cb65..6c212acb 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -266,7 +266,7 @@ impl blocks::BlockHeaderStorage for DiskStorageManager { } async fn load_headers(&self, range: Range) -> StorageResult> { - self.block_headers.write().await.load_headers(range).await + self.block_headers.read().await.load_headers(range).await } async fn get_tip_height(&self) -> Option { @@ -296,7 +296,7 @@ impl filters::FilterHeaderStorage for DiskStorageManager { } async fn load_filter_headers(&self, range: Range) -> StorageResult> { - self.filter_headers.write().await.load_filter_headers(range).await + self.filter_headers.read().await.load_filter_headers(range).await } async fn get_filter_tip_height(&self) -> StorageResult> { @@ -315,7 +315,7 @@ impl filters::FilterStorage for DiskStorageManager { } async fn load_filters(&self, range: Range) -> StorageResult>> { - self.filters.write().await.load_filters(range).await + self.filters.read().await.load_filters(range).await } }