From 0b7058a3b5421442763dee4f8d5c0671e6e9e243 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Thu, 5 Feb 2026 23:48:31 +0200 Subject: [PATCH 01/34] gnd(test): Migrate from monolithic test.rs to modular test/ structure Replaces monolithic gnd/src/commands/test.rs with organized test/ directory containing: - mod.rs: Main entry point and test orchestration - runner.rs: Test execution and infrastructure setup - assertion.rs: GraphQL assertion logic - block_stream.rs: Mock block stream implementation - noop.rs: Stub trait implementations - schema.rs: JSON schema and test types - trigger.rs: ABI encoding for test triggers - output.rs: Test result formatting - mock_chain.rs: Block pointer helpers Updates main.rs to make Test command async (.await). Adds dependencies for test runner (graph-chain-ethereum, graph-graphql, graph-store-postgres). --- gnd/Cargo.toml | 7 ++ gnd/src/commands/test.rs | 261 --------------------------------------- gnd/src/main.rs | 2 +- 3 files changed, 8 insertions(+), 262 deletions(-) delete mode 100644 gnd/src/commands/test.rs diff --git a/gnd/Cargo.toml b/gnd/Cargo.toml index 580e51a7f3f..fd60a5ec638 100644 --- a/gnd/Cargo.toml +++ b/gnd/Cargo.toml @@ -21,6 +21,13 @@ graph = { path = "../graph" } graph-chain-ethereum = { path = "../chain/ethereum" } graph-core = { path = "../core" } graph-node = { path = "../node" } +graph-chain-ethereum = { path = "../chain/ethereum" } +graph-graphql = { path = "../graphql" } +graph-store-postgres = { path = "../store/postgres" } + +# Test command dependencies +hex = "0.4" +async-trait = { workspace = true } # Direct dependencies from current dev.rs anyhow = { workspace = true } diff --git a/gnd/src/commands/test.rs b/gnd/src/commands/test.rs deleted file mode 100644 index 692db1abb0e..00000000000 --- a/gnd/src/commands/test.rs +++ /dev/null @@ -1,261 +0,0 @@ -//! Test command for running Matchstick tests. -//! -//! This command runs the Matchstick test runner for subgraph unit tests. -//! Matchstick is a testing framework for subgraphs that allows testing -//! event handlers, entity storage, and contract calls. - -use std::path::PathBuf; -use std::process::Command; - -use anyhow::{anyhow, Context, Result}; -use clap::Parser; - -use crate::output::{step, Step}; - -#[derive(Clone, Debug, Parser)] -#[clap(about = "Run Matchstick tests for the subgraph")] -pub struct TestOpt { - /// Specific data source to test (optional, tests all if not specified) - #[clap()] - pub datasource: Option, - - /// Run tests with coverage reporting - #[clap(short = 'c', long)] - pub coverage: bool, - - /// Run tests in a Docker container - #[clap(short = 'd', long)] - pub docker: bool, - - /// Force redownload of Matchstick binary / rebuild Docker image - #[clap(short = 'f', long)] - pub force: bool, - - /// Show debug logs (OS info, download URLs) - #[clap(short = 'l', long)] - pub logs: bool, - - /// Force recompilation of tests - #[clap(short = 'r', long)] - pub recompile: bool, - - /// Matchstick version to use - #[clap(short = 'v', long)] - pub version: Option, -} - -/// Run the test command. -pub fn run_test(opt: TestOpt) -> Result<()> { - // Check if Matchstick binary exists in node_modules or PATH - let matchstick_path = find_matchstick()?; - - if opt.docker { - run_docker_tests(&opt) - } else { - run_binary_tests(&matchstick_path, &opt) - } -} - -/// Check if a binary with the given name exists in any PATH directory. -fn is_in_path(name: &str) -> bool { - let Some(path_var) = std::env::var_os("PATH") else { - return false; - }; - std::env::split_paths(&path_var).any(|dir| dir.join(name).is_file()) -} - -/// Find the Matchstick binary. -fn find_matchstick() -> Result { - // First, check node_modules/.bin/graph-test (graph-cli's matchstick wrapper) - let node_modules_path = PathBuf::from("node_modules/.bin/graph-test"); - if node_modules_path.exists() { - return Ok(node_modules_path); - } - - // Check for matchstick directly in node_modules - let matchstick_path = PathBuf::from("node_modules/.bin/matchstick"); - if matchstick_path.exists() { - return Ok(matchstick_path); - } - - // Check if matchstick is in PATH - if is_in_path("matchstick") { - return Ok(PathBuf::from("matchstick")); - } - - Err(anyhow!( - "Matchstick not found. Please install it with:\n \ - npm install --save-dev matchstick-as\n\n\ - Or use Docker mode:\n \ - gnd test -d" - )) -} - -/// Run tests using the Matchstick binary. -fn run_binary_tests(matchstick_path: &PathBuf, opt: &TestOpt) -> Result<()> { - step(Step::Generate, "Running Matchstick tests"); - - let mut cmd = Command::new(matchstick_path); - - // Add flags - if opt.coverage { - cmd.arg("-c"); - } - if opt.recompile { - cmd.arg("-r"); - } - - // Add datasource filter if specified - if let Some(datasource) = &opt.datasource { - cmd.arg(datasource); - } - - let status = cmd.status().context("Failed to run Matchstick")?; - - if status.success() { - step(Step::Done, "Tests passed"); - Ok(()) - } else { - Err(anyhow!("Tests failed")) - } -} - -/// Run tests using Docker. -fn run_docker_tests(opt: &TestOpt) -> Result<()> { - step(Step::Generate, "Running Matchstick tests in Docker"); - - // Check if Docker is available - Command::new("docker") - .arg("--version") - .output() - .context("Docker not found. Please install Docker to use -d/--docker mode.")?; - - // Build test arguments - let mut test_args = String::new(); - if opt.coverage { - test_args.push_str(" -c"); - } - if opt.recompile { - test_args.push_str(" -r"); - } - if let Some(datasource) = &opt.datasource { - test_args.push_str(&format!(" {}", datasource)); - } - - // Get current working directory - let cwd = std::env::current_dir().context("Failed to get current directory")?; - - // Build docker run command - let mut cmd = Command::new("docker"); - cmd.args([ - "run", - "-it", - "--rm", - "--mount", - &format!("type=bind,source={},target=/matchstick", cwd.display()), - ]); - - if !test_args.is_empty() { - cmd.args(["-e", &format!("ARGS={}", test_args.trim())]); - } - - cmd.arg("matchstick"); - - // Check if matchstick image exists - let image_check = Command::new("docker") - .args(["images", "-q", "matchstick"]) - .output() - .context("Failed to check for Docker image")?; - - let image_exists = !image_check.stdout.is_empty(); - - if !image_exists || opt.force { - // Need to build the image first - step(Step::Generate, "Building Matchstick Docker image"); - - // Create Dockerfile if it doesn't exist - let dockerfile_path = PathBuf::from("tests/.docker/Dockerfile"); - if !dockerfile_path.exists() || opt.force { - create_dockerfile(&dockerfile_path, opt.version.as_deref())?; - } - - let build_status = Command::new("docker") - .args([ - "build", - "-f", - &dockerfile_path.to_string_lossy(), - "-t", - "matchstick", - ".", - ]) - .status() - .context("Failed to build Docker image")?; - - if !build_status.success() { - return Err(anyhow!("Failed to build Matchstick Docker image")); - } - } - - // Run the container - let status = cmd.status().context("Failed to run Docker container")?; - - if status.success() { - step(Step::Done, "Tests passed"); - Ok(()) - } else { - Err(anyhow!("Tests failed")) - } -} - -/// Create the Dockerfile for Matchstick. -fn create_dockerfile(path: &PathBuf, version: Option<&str>) -> Result<()> { - use std::fs; - - // Create directory if needed - if let Some(parent) = path.parent() { - fs::create_dir_all(parent)?; - } - - let version = version.unwrap_or("0.6.0"); - - let dockerfile_content = format!( - r#"FROM node:18-slim - -# Install build dependencies -RUN apt-get update && apt-get install -y \ - build-essential \ - git \ - && rm -rf /var/lib/apt/lists/* - -# Install matchstick -RUN npm install -g matchstick-as@{version} - -WORKDIR /matchstick - -# Entry point runs tests -ENTRYPOINT ["sh", "-c", "npm install && graph test $ARGS"] -"#, - version = version - ); - - fs::write(path, dockerfile_content) - .with_context(|| format!("Failed to write Dockerfile to {}", path.display()))?; - - step(Step::Write, &format!("Created {}", path.display())); - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_find_matchstick_not_found() { - // In test environment, matchstick likely isn't installed - // This should return an error with helpful message - let result = find_matchstick(); - // Either finds it or returns error - both are valid - assert!(result.is_ok() || result.is_err()); - } -} diff --git a/gnd/src/main.rs b/gnd/src/main.rs index 24384bb0158..351dd9d220a 100644 --- a/gnd/src/main.rs +++ b/gnd/src/main.rs @@ -177,7 +177,7 @@ async fn main() -> Result<()> { Commands::Remove(remove_opt) => run_remove(remove_opt).await, Commands::Auth(auth_opt) => run_auth(auth_opt), Commands::Publish(publish_opt) => run_publish(publish_opt).await, - Commands::Test(test_opt) => run_test(test_opt), + Commands::Test(test_opt) => run_test(test_opt).await, Commands::Clean(clean_opt) => run_clean(clean_opt), Commands::Completions(completions_opt) => generate_completions(completions_opt), }; From da09875a3d6c4e6b52ca6adf4b2e492f62d41308 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Thu, 5 Feb 2026 23:48:11 +0200 Subject: [PATCH 02/34] gnd(test): Add remaining test module files Adds supporting modules for test infrastructure: - mock_chain: Helpers for block pointer construction - schema: JSON schema types and parsing - output: Console output formatting - trigger: ABI encoding of test triggers --- gnd/src/commands/test/mock_chain.rs | 30 ++ gnd/src/commands/test/output.rs | 66 +++++ gnd/src/commands/test/schema.rs | 221 +++++++++++++++ gnd/src/commands/test/trigger.rs | 412 ++++++++++++++++++++++++++++ 4 files changed, 729 insertions(+) create mode 100644 gnd/src/commands/test/mock_chain.rs create mode 100644 gnd/src/commands/test/output.rs create mode 100644 gnd/src/commands/test/schema.rs create mode 100644 gnd/src/commands/test/trigger.rs diff --git a/gnd/src/commands/test/mock_chain.rs b/gnd/src/commands/test/mock_chain.rs new file mode 100644 index 00000000000..d7298fdd861 --- /dev/null +++ b/gnd/src/commands/test/mock_chain.rs @@ -0,0 +1,30 @@ +//! Mock blockchain helpers for test block streams. +//! +//! Provides utility functions for working with mock block pointers. +//! The actual block stream infrastructure (StaticStream, StaticStreamBuilder) +//! lives in `runner.rs` since it's tightly coupled to the test execution flow. + +use graph::blockchain::block_stream::BlockWithTriggers; +use graph::prelude::alloy::primitives::B256; +use graph::prelude::BlockPtr; +use graph_chain_ethereum::Chain; + +/// Get the final block pointer from a list of mock blocks. +/// +/// Used as the "stop block" target — the indexer will process blocks +/// until it reaches this pointer, at which point we know all test +/// data has been indexed and we can run assertions. +pub fn final_block_ptr(blocks: &[BlockWithTriggers]) -> Option { + blocks.last().map(|b| b.ptr()) +} + +/// Get a genesis block pointer (block 0 with zero hash). +/// +/// Used as a fallback stop block for test files with no blocks, +/// so the indexer has a valid target to sync to. +pub fn genesis_ptr() -> BlockPtr { + BlockPtr { + hash: B256::ZERO.into(), + number: 0, + } +} diff --git a/gnd/src/commands/test/output.rs b/gnd/src/commands/test/output.rs new file mode 100644 index 00000000000..2596048702b --- /dev/null +++ b/gnd/src/commands/test/output.rs @@ -0,0 +1,66 @@ +//! Console output formatting for test results. +//! +//! Formats test results with colored pass/fail indicators and detailed +//! assertion failure diffs showing expected vs actual JSON values. + +use console::style; + +use super::schema::{AssertionFailure, TestResult}; +use crate::output::{step, Step}; + +/// Print the header line when starting a test file. +pub fn print_test_start(path: &std::path::Path) { + step(Step::Load, &format!("Running {}", path.display())); +} + +/// Print the result of a single test case (pass or fail with details). +pub fn print_test_result(name: &str, result: &TestResult) { + match result { + TestResult::Passed => { + println!(" {} {}", style("✔").green(), name); + } + TestResult::Failed { + handler_error, + assertion_failures, + } => { + println!(" {} {}", style("✘").red(), name); + if let Some(err) = handler_error { + println!(" {} {}", style("Handler error:").red(), err); + } + for failure in assertion_failures { + print_assertion_failure(failure); + } + } + } +} + +/// Print a detailed assertion failure showing query, expected, and actual values. +fn print_assertion_failure(failure: &AssertionFailure) { + println!(" {} {}", style("Query:").yellow(), failure.query); + println!( + " {} {}", + style("Expected:").green(), + serde_json::to_string_pretty(&failure.expected).unwrap_or_default() + ); + println!( + " {} {}", + style("Actual:").red(), + serde_json::to_string_pretty(&failure.actual).unwrap_or_default() + ); +} + +/// Print the final summary line with total pass/fail counts. +pub fn print_summary(passed: usize, failed: usize) { + println!(); + if failed == 0 { + println!( + "{}", + style(format!("Tests: {} passed, {} failed", passed, failed)).green() + ); + } else { + println!( + "{}", + style(format!("Tests: {} passed, {} failed", passed, failed)).red() + ); + } +} diff --git a/gnd/src/commands/test/schema.rs b/gnd/src/commands/test/schema.rs new file mode 100644 index 00000000000..d651c0fa730 --- /dev/null +++ b/gnd/src/commands/test/schema.rs @@ -0,0 +1,221 @@ +//! JSON schema types for test files and result types. +//! +//! Test files are JSON documents that describe a sequence of mock blockchain +//! blocks with triggers (log events, block events) and GraphQL assertions to +//! validate the resulting entity state after indexing. +//! +//! ## Test file format +//! +//! ```json +//! { +//! "name": "Transfer creates entity", +//! "blocks": [ +//! { +//! "number": 1, +//! "triggers": [ +//! { +//! "type": "log", +//! "address": "0x1234...", +//! "event": "Transfer(address indexed from, address indexed to, uint256 value)", +//! "params": { "from": "0xaaaa...", "to": "0xbbbb...", "value": "1000" } +//! } +//! ] +//! } +//! ], +//! "assertions": [ +//! { +//! "query": "{ transfer(id: \"1\") { from to value } }", +//! "expected": { "transfer": { "from": "0xaaaa...", "to": "0xbbbb...", "value": "1000" } } +//! } +//! ] +//! } +//! ``` + +use serde::Deserialize; +use serde_json::Value; +use std::path::{Path, PathBuf}; + +// ============ JSON Input Types ============ + +/// Top-level test file structure. Each file represents one named test case +/// with a sequence of blocks to index and assertions to check afterward. +#[derive(Debug, Clone, Deserialize)] +pub struct TestFile { + /// Human-readable name for this test case (shown in output). + pub name: String, + + /// Ordered sequence of blocks to feed through the indexer. + /// Blocks are processed sequentially; triggers within each block are + /// sorted by graph-node's standard trigger ordering logic. + #[serde(default)] + pub blocks: Vec, + + /// GraphQL assertions to run after all blocks have been indexed. + /// Each assertion queries the subgraph and compares the result to an expected value. + #[serde(default)] + pub assertions: Vec, +} + +/// A mock blockchain block containing zero or more triggers. +#[derive(Debug, Clone, Deserialize)] +pub struct TestBlock { + /// Block number. If omitted, auto-increments starting from `start_block` + /// (default 1). Explicit numbers allow gaps (e.g., blocks 1, 5, 100). + #[serde(default)] + pub number: Option, + + /// Block hash as hex string (e.g., "0xabc..."). If omitted, generated + /// deterministically as `keccak256(block_number)`. + #[serde(default)] + pub hash: Option, + + /// Unix timestamp. If omitted, defaults to `block_number * 12` + /// (simulating 12-second block times). + #[serde(default)] + pub timestamp: Option, + + /// Triggers within this block (log events, block events). + /// Multiple triggers per block are supported and will be sorted by + /// graph-node's trigger ordering (block start -> events by logIndex -> block end). + #[serde(default)] + pub triggers: Vec, +} + +/// A trigger within a block. The `type` field determines the variant. +/// +/// JSON example for a log trigger: +/// ```json +/// { "type": "log", "address": "0x...", "event": "Transfer(...)", "params": {...} } +/// ``` +/// +/// JSON example for a block trigger: +/// ```json +/// { "type": "block" } +/// ``` +#[derive(Debug, Clone, Deserialize)] +#[serde(tag = "type", rename_all = "lowercase")] +pub enum TestTrigger { + /// An Ethereum log (event) trigger. This is the most common trigger type. + Log(LogTrigger), + /// A block-level trigger that fires at the end of block processing. + Block(BlockTrigger), +} + +/// A mock Ethereum event log trigger. +/// +/// The event signature is parsed and parameters are ABI-encoded into the +/// proper topics (indexed params) and data (non-indexed params) format +/// that graph-node expects. +#[derive(Debug, Clone, Deserialize)] +pub struct LogTrigger { + /// Contract address that emitted the event (checksummed or lowercase hex). + pub address: String, + + /// Full event signature including parameter names and `indexed` keywords. + /// Example: `"Transfer(address indexed from, address indexed to, uint256 value)"` + /// + /// The signature is parsed to determine: + /// - topic0 (keccak256 hash of the canonical signature) + /// - Which parameters are indexed (become topics) vs non-indexed (become data) + pub event: String, + + /// Event parameter values keyed by name. Values are JSON strings/numbers + /// that get converted to the appropriate Solidity type: + /// - Addresses: hex string `"0x1234..."` + /// - Integers: string `"1000000000000000000"` or number `1000` + /// - Booleans: `true` / `false` + /// - Bytes: hex string `"0xdeadbeef"` + #[serde(default)] + pub params: serde_json::Map, + + /// Transaction hash. If omitted, generated deterministically as + /// `keccak256(block_number || log_index)`. + #[serde(default)] + pub tx_hash: Option, +} + +/// A block-level trigger. Fires as `EthereumBlockTriggerType::End`, +/// meaning it runs after all event handlers in the block. +/// No additional fields needed — the block data comes from the parent TestBlock. +#[derive(Debug, Clone, Default, Deserialize)] +pub struct BlockTrigger {} + +/// A GraphQL assertion to validate indexed entity state. +#[derive(Debug, Clone, Deserialize)] +#[allow(dead_code)] +pub struct Assertion { + /// GraphQL query string. Example: `"{ transfer(id: \"1\") { from to value } }"` + pub query: String, + + /// Expected JSON result. Compared against the actual query response. + /// Object key order doesn't matter. String-vs-number coercion is applied + /// to handle GraphQL's BigInt/BigDecimal string representation. + pub expected: Value, +} + +// ============ Result Types ============ + +/// Outcome of running a single test case. +#[derive(Debug)] +pub enum TestResult { + /// All assertions passed and no handler errors occurred. + Passed, + /// The test failed due to handler errors and/or assertion mismatches. + Failed { + /// If the subgraph handler threw a fatal error during indexing, + /// this contains the error message. The test fails immediately + /// without running assertions. + handler_error: Option, + /// List of assertions where actual != expected. + assertion_failures: Vec, + }, +} + +/// Details about a single failed assertion. +#[derive(Debug)] +pub struct AssertionFailure { + /// The GraphQL query that was executed. + pub query: String, + /// What the test expected to get back. + pub expected: Value, + /// What the query actually returned. + pub actual: Value, +} + +// ============ Parsing ============ + +/// Parse a JSON test file from disk into a [`TestFile`]. +pub fn parse_test_file(path: &Path) -> anyhow::Result { + let content = std::fs::read_to_string(path) + .map_err(|e| anyhow::anyhow!("Failed to read test file {}: {}", path.display(), e))?; + serde_json::from_str(&content) + .map_err(|e| anyhow::anyhow!("Failed to parse test file {}: {}", path.display(), e)) +} + +/// Discover test files in a directory. +/// +/// Matches `*.json` and `*.test.json` files (non-recursive). +/// Returns paths sorted alphabetically for deterministic execution order. +pub fn discover_test_files(dir: &Path) -> anyhow::Result> { + let mut files = Vec::new(); + + if !dir.exists() { + return Ok(files); + } + + for entry in std::fs::read_dir(dir)? { + let entry = entry?; + let path = entry.path(); + + if path.is_file() { + if let Some(name) = path.file_name().and_then(|n| n.to_str()) { + if name.ends_with(".test.json") || name.ends_with(".json") { + files.push(path); + } + } + } + } + + files.sort(); + Ok(files) +} diff --git a/gnd/src/commands/test/trigger.rs b/gnd/src/commands/test/trigger.rs new file mode 100644 index 00000000000..31cec3fb39b --- /dev/null +++ b/gnd/src/commands/test/trigger.rs @@ -0,0 +1,412 @@ +//! ABI encoding of test triggers into graph-node's Ethereum trigger types. +//! +//! This module converts the human-readable JSON test format (event signatures +//! with named parameters) into the binary format that graph-node expects: +//! - Event signatures → topic0 (keccak256 hash) +//! - Indexed parameters → topics[1..3] (ABI-encoded, 32 bytes each) +//! - Non-indexed parameters → data (ABI-encoded tuple) +//! +//! ## Encoding example +//! +//! For `Transfer(address indexed from, address indexed to, uint256 value)`: +//! - topic0 = keccak256("Transfer(address,address,uint256)") +//! - topic1 = left-padded `from` address +//! - topic2 = left-padded `to` address +//! - data = ABI-encoded `value` (uint256, 32 bytes) +//! +//! ## Block construction +//! +//! Each test block is converted to a `BlockWithTriggers` containing: +//! - A `LightEthereumBlock` with proper parent hash chaining +//! - Dummy transactions for each unique tx hash (graph-node requires +//! matching transactions in the block for log processing) +//! - `EthereumTrigger` variants for each trigger in the test JSON + +use super::schema::{LogTrigger, TestFile, TestTrigger}; +use anyhow::{anyhow, Context, Result}; +use graph::blockchain::block_stream::BlockWithTriggers; +use graph::prelude::alloy::dyn_abi::{DynSolType, DynSolValue}; +use graph::prelude::alloy::json_abi::Event; +use graph::prelude::alloy::primitives::{keccak256, Address, Bytes, B256, I256, U256}; +use graph::prelude::alloy::rpc::types::Log; +use graph::prelude::{BlockPtr, LightEthereumBlock}; +use graph_chain_ethereum::chain::BlockFinality; +use graph_chain_ethereum::trigger::{EthereumBlockTriggerType, EthereumTrigger, LogRef}; +use graph_chain_ethereum::Chain; +use std::sync::Arc; + +/// Convert all blocks from a test file into graph-node's `BlockWithTriggers` format. +/// +/// Blocks are chained together with proper parent hashes (each block's parent_hash +/// points to the previous block's hash). Block numbers auto-increment from +/// `start_block` when not explicitly specified in the test JSON. +/// +/// The returned blocks can be fed directly into a `StaticStreamBuilder` for indexing. +pub fn build_blocks_with_triggers( + test_file: &TestFile, + start_block: u64, +) -> Result>> { + let mut blocks = Vec::new(); + let mut current_number = start_block; + // Chain blocks together: each block's parent_hash = previous block's hash. + let mut parent_hash = B256::ZERO; + + for test_block in &test_file.blocks { + // Use explicit block number or auto-increment from the last block. + let number = test_block.number.unwrap_or(current_number); + + // Use explicit hash or generate deterministically from block number. + let hash = test_block + .hash + .as_ref() + .map(|h| h.parse::()) + .transpose() + .context("Invalid block hash")? + .unwrap_or_else(|| keccak256(number.to_be_bytes())); + + // Default timestamp simulates 12-second block times. + let timestamp = test_block.timestamp.unwrap_or(number * 12); + + let mut triggers = Vec::new(); + + for (log_index, trigger) in test_block.triggers.iter().enumerate() { + match trigger { + TestTrigger::Log(log_trigger) => { + let eth_trigger = + build_log_trigger(number, hash, log_index as u64, log_trigger)?; + triggers.push(eth_trigger); + } + TestTrigger::Block(_) => { + // Block triggers fire at block end, after all event handlers. + triggers.push(EthereumTrigger::Block( + BlockPtr::new(hash.into(), number as i32), + EthereumBlockTriggerType::End, + )); + } + } + } + + let block = create_block_with_triggers(number, hash, parent_hash, timestamp, triggers)?; + blocks.push(block); + + // Chain to next block. + parent_hash = hash; + current_number = number + 1; + } + + Ok(blocks) +} + +/// Build a single Ethereum log trigger from a test JSON log trigger. +/// +/// Creates a fully-formed `EthereumTrigger::Log` with: +/// - ABI-encoded topics and data from the event signature and parameters +/// - Block context (hash, number) +/// - Transaction hash (explicit or deterministic from block_number + log_index) +fn build_log_trigger( + block_number: u64, + block_hash: B256, + log_index: u64, + trigger: &LogTrigger, +) -> Result { + let address: Address = trigger + .address + .parse() + .context("Invalid contract address")?; + + // Encode the event signature and parameters into EVM log format. + let (topics, data) = encode_event_log(&trigger.event, &trigger.params)?; + + // Generate deterministic tx hash if not provided: keccak256(block_number || log_index). + // This ensures each log in a block gets a unique tx hash by default. + let tx_hash = trigger + .tx_hash + .as_ref() + .map(|h| h.parse::()) + .transpose() + .context("Invalid tx hash")? + .unwrap_or_else(|| { + keccak256([block_number.to_be_bytes(), log_index.to_be_bytes()].concat()) + }); + + // Construct the alloy Log type that graph-node's trigger processing expects. + let inner_log = graph::prelude::alloy::primitives::Log { + address, + data: graph::prelude::alloy::primitives::LogData::new_unchecked(topics, data), + }; + + let full_log = Arc::new(Log { + inner: inner_log, + block_hash: Some(block_hash), + block_number: Some(block_number), + block_timestamp: None, + transaction_hash: Some(tx_hash), + transaction_index: Some(0), + log_index: Some(log_index), + removed: false, + }); + + Ok(EthereumTrigger::Log(LogRef::FullLog(full_log, None))) +} + +/// Encode event parameters into EVM log topics and data using `alloy::json_abi::Event::parse()`. +/// +/// Given a human-readable event signature like: +/// `"Transfer(address indexed from, address indexed to, uint256 value)"` +/// and parameter values like: +/// `{"from": "0xaaaa...", "to": "0xbbbb...", "value": "1000"}` +/// +/// Produces: +/// - topics[0] = keccak256("Transfer(address,address,uint256)") (the event selector) +/// - topics[1] = left-padded `from` address (indexed) +/// - topics[2] = left-padded `to` address (indexed) +/// - data = ABI-encoded `value` as uint256 (non-indexed) +/// +/// Indexed parameters become topics (max 3 after topic0), non-indexed parameters +/// are ABI-encoded together as the log data. +pub fn encode_event_log( + event_sig: &str, + params: &serde_json::Map, +) -> Result<(Vec, Bytes)> { + // Event::parse expects "event EventName(...)" format. + // If the user already wrote "event Transfer(...)" use as-is, + // otherwise prepend "event ". + let sig_with_prefix = if event_sig.trim_start().starts_with("event ") { + event_sig.to_string() + } else { + format!("event {}", event_sig) + }; + + let event = Event::parse(&sig_with_prefix) + .map_err(|e| anyhow!("Failed to parse event signature '{}': {:?}", event_sig, e))?; + + // topic0 is the event selector (keccak256 of canonical signature) + let topic0 = event.selector(); + let mut topics = vec![topic0]; + let mut data_values = Vec::new(); + + for input in &event.inputs { + let value = params + .get(&input.name) + .ok_or_else(|| anyhow!("Missing parameter: {}", input.name))?; + + let sol_type: DynSolType = input + .ty + .parse() + .map_err(|e| anyhow!("Invalid type '{}': {:?}", input.ty, e))?; + + let sol_value = json_to_sol_value(&sol_type, value)?; + + if input.indexed { + let topic = sol_value_to_topic(&sol_value)?; + topics.push(topic); + } else { + data_values.push(sol_value); + } + } + + let data = if data_values.is_empty() { + Bytes::new() + } else { + let tuple = DynSolValue::Tuple(data_values); + Bytes::from(tuple.abi_encode_params()) + }; + + Ok((topics, data)) +} + +/// Convert a JSON value to the corresponding Solidity dynamic value type. +/// +/// Handles the common Solidity types that appear in event parameters: +/// - `address`: hex string → 20-byte address +/// - `uint8`..`uint256`: string (decimal/hex) or JSON number → unsigned integer +/// - `int8`..`int256`: string (decimal/hex, optionally negative) → signed integer (two's complement) +/// - `bool`: JSON boolean +/// - `bytes`: hex string → dynamic byte array +/// - `string`: JSON string +/// - `bytes1`..`bytes32`: hex string → fixed-length byte array (right-zero-padded to 32 bytes) +fn json_to_sol_value(sol_type: &DynSolType, value: &serde_json::Value) -> Result { + match sol_type { + DynSolType::Address => { + let s = value + .as_str() + .ok_or_else(|| anyhow!("Expected string for address"))?; + let addr: Address = s.parse().context("Invalid address")?; + Ok(DynSolValue::Address(addr)) + } + DynSolType::Uint(bits) => { + let n = match value { + // String values support both decimal and "0x"-prefixed hex. + serde_json::Value::String(s) => U256::from_str_radix( + s.trim_start_matches("0x"), + if s.starts_with("0x") { 16 } else { 10 }, + ) + .context("Invalid uint")?, + // JSON numbers are limited to u64 range — use strings for larger values. + serde_json::Value::Number(n) => U256::from(n.as_u64().ok_or_else(|| { + anyhow!("uint value {} does not fit in u64, use a string instead", n) + })?), + _ => return Err(anyhow!("Expected string or number for uint")), + }; + Ok(DynSolValue::Uint(n, *bits)) + } + DynSolType::Int(bits) => { + // Signed integers use two's complement representation in U256. + // Negative values: negate via !abs + 1 (two's complement). + let n = match value { + serde_json::Value::String(s) => { + let is_negative = s.starts_with('-'); + let s_clean = s.trim_start_matches('-'); + let abs = U256::from_str_radix( + s_clean.trim_start_matches("0x"), + if s_clean.starts_with("0x") { 16 } else { 10 }, + ) + .context("Invalid int")?; + if is_negative { + !abs + U256::from(1) // Two's complement negation + } else { + abs + } + } + serde_json::Value::Number(n) => { + if let Some(i) = n.as_i64() { + if i < 0 { + !U256::from((-i) as u64) + U256::from(1) + } else { + U256::from(i as u64) + } + } else { + U256::from(n.as_u64().unwrap_or(0)) + } + } + _ => return Err(anyhow!("Expected string or number for int")), + }; + Ok(DynSolValue::Int(I256::from_raw(n), *bits)) + } + DynSolType::Bool => { + let b = value.as_bool().ok_or_else(|| anyhow!("Expected bool"))?; + Ok(DynSolValue::Bool(b)) + } + DynSolType::Bytes => { + let s = value + .as_str() + .ok_or_else(|| anyhow!("Expected string for bytes"))?; + let bytes = hex::decode(s.trim_start_matches("0x")).context("Invalid hex")?; + Ok(DynSolValue::Bytes(bytes)) + } + DynSolType::String => { + let s = value.as_str().ok_or_else(|| anyhow!("Expected string"))?; + Ok(DynSolValue::String(s.to_string())) + } + DynSolType::FixedBytes(len) => { + let s = value + .as_str() + .ok_or_else(|| anyhow!("Expected string for bytes{}", len))?; + let bytes = hex::decode(s.trim_start_matches("0x")).context("Invalid hex")?; + if bytes.len() > *len { + return Err(anyhow!( + "bytes{}: got {} bytes, expected at most {}", + len, + bytes.len(), + len + )); + } + // DynSolValue::FixedBytes always wraps a B256 (32 bytes) plus the actual + // byte count. Right-zero-pad the input to fill the full 32 bytes. + let mut padded = [0u8; 32]; + padded[..bytes.len()].copy_from_slice(&bytes); + Ok(DynSolValue::FixedBytes(B256::from(padded), *len)) + } + _ => Err(anyhow!("Unsupported type: {:?}", sol_type)), + } +} + +/// Convert a Solidity value to a 32-byte topic for indexed event parameters. +/// +/// EVM log topics are always exactly 32 bytes. The encoding depends on the type: +/// - Addresses: left-padded to 32 bytes (12 zero bytes + 20 address bytes) +/// - Integers: stored as big-endian 32-byte values +/// - Booleans: 0x00...00 (false) or 0x00...01 (true) +/// - Fixed bytes: stored directly (already 32 bytes in B256) +/// - Dynamic types (bytes, string): keccak256-hashed (the value itself is not recoverable) +fn sol_value_to_topic(value: &DynSolValue) -> Result { + match value { + DynSolValue::Address(addr) => { + // Addresses are left-padded: 12 zero bytes + 20 address bytes. + let mut bytes = [0u8; 32]; + bytes[12..].copy_from_slice(addr.as_slice()); + Ok(B256::from(bytes)) + } + DynSolValue::Uint(n, _) => Ok(B256::from(*n)), + DynSolValue::Int(n, _) => Ok(B256::from(n.into_raw())), + DynSolValue::Bool(b) => { + let mut bytes = [0u8; 32]; + if *b { + bytes[31] = 1; + } + Ok(B256::from(bytes)) + } + DynSolValue::FixedBytes(b, _) => Ok(*b), + // Dynamic types are hashed per Solidity spec — the original value + // cannot be recovered from the topic. + DynSolValue::Bytes(b) => Ok(keccak256(b)), + DynSolValue::String(s) => Ok(keccak256(s.as_bytes())), + _ => Err(anyhow!("Cannot convert {:?} to topic", value)), + } +} + +/// Create a `BlockWithTriggers` from block metadata and triggers. +/// +/// Constructs a minimal but valid Ethereum block including: +/// - Block header with number, hash, parent_hash +/// - Dummy transactions for each unique tx hash referenced by log triggers +/// (graph-node requires matching transactions in the block body) +/// - The triggers themselves, which get sorted by graph-node's ordering logic +fn create_block_with_triggers( + number: u64, + hash: B256, + parent_hash: B256, + _timestamp: u64, + triggers: Vec, +) -> Result> { + use graph::prelude::alloy::rpc::types::BlockTransactions; + use graph::prelude::{create_dummy_transaction, create_minimal_block_for_test}; + use std::collections::HashSet; + + // Collect unique transaction hashes from log triggers. + // Graph-node looks up the transaction by hash during log processing, + // so we need corresponding dummy transactions in the block body. + let mut tx_hashes: HashSet = HashSet::new(); + for trigger in &triggers { + if let EthereumTrigger::Log(LogRef::FullLog(log, _)) = trigger { + if let Some(tx_hash) = log.transaction_hash { + tx_hashes.insert(tx_hash); + } + } + } + + let transactions: Vec<_> = tx_hashes + .into_iter() + .enumerate() + .map(|(idx, tx_hash)| create_dummy_transaction(number, hash, Some(idx as u64), tx_hash)) + .collect(); + + // Build a minimal block with our hash/parent_hash and attach transactions. + let alloy_block = create_minimal_block_for_test(number, hash) + .map_header(|mut header| { + header.inner.parent_hash = parent_hash; + header + }) + .with_transactions(BlockTransactions::Full(transactions)); + + let light_block = LightEthereumBlock::new(alloy_block.into()); + let finality_block = BlockFinality::Final(Arc::new(light_block)); + + // BlockWithTriggers::new automatically sorts triggers by graph-node's + // standard ordering (block start → events by logIndex → block end). + Ok(BlockWithTriggers::new( + finality_block, + triggers, + &graph::log::logger(false), + )) +} From 01d1a76bef3ef8a8590a18a89750d4693de7ecca Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Thu, 5 Feb 2026 23:47:39 +0200 Subject: [PATCH 03/34] gnd(test): Update module structure with new submodules Adds module declarations for refactored components: - mod assertion - mod block_stream - mod noop Updates module documentation to reflect the new structure and improved separation of concerns. --- gnd/src/commands/test/mod.rs | 402 +++++++++++++++++++++++++++++++++++ 1 file changed, 402 insertions(+) create mode 100644 gnd/src/commands/test/mod.rs diff --git a/gnd/src/commands/test/mod.rs b/gnd/src/commands/test/mod.rs new file mode 100644 index 00000000000..61065adb310 --- /dev/null +++ b/gnd/src/commands/test/mod.rs @@ -0,0 +1,402 @@ +//! Mock-based subgraph test runner for `gnd test`. +//! +//! This module replaces the old Matchstick-only test command with a mock-based +//! integration testing solution. Tests are defined as JSON files containing mock +//! blockchain data (blocks, log events, block triggers) and GraphQL assertions +//! that validate the resulting entity state. +//! +//! ## How it works +//! +//! 1. Build the subgraph (unless `--skip-build`) +//! 2. Discover `*.json` / `*.test.json` files in the test directory +//! 3. For each test file: +//! a. Parse JSON into mock blocks with triggers +//! b. Spin up a temporary PostgreSQL database (pgtemp on Unix) +//! c. Initialize graph-node stores and deploy the subgraph +//! d. Feed mock blocks through a static block stream (no real RPC) +//! e. Wait for the indexer to process all blocks +//! f. Run GraphQL assertions against the indexed data +//! 4. Report pass/fail results +//! +//! The key insight is that we reuse real graph-node infrastructure (stores, +//! subgraph deployment, WASM runtime) and only mock the blockchain layer. +//! This means tests exercise the same code paths as production indexing. +//! +//! ## Legacy mode +//! +//! The `--matchstick` flag falls back to the external Matchstick test runner +//! for backward compatibility with existing test suites. +//! +//! ## Module structure +//! +//! - [`schema`]: JSON input types (TestFile, TestBlock, etc.) and result types +//! - [`trigger`]: ABI encoding of event parameters into Ethereum log triggers +//! - [`mock_chain`]: Helpers for block pointer construction +//! - [`runner`]: Test execution orchestration (store setup, indexing, sync) +//! - [`assertion`]: GraphQL assertion execution and JSON comparison +//! - [`block_stream`]: Mock block stream that feeds pre-built blocks +//! - [`noop`]: Noop/stub trait implementations for the mock chain +//! - [`output`]: Console output formatting for test results + +mod assertion; +mod block_stream; +mod mock_chain; +mod noop; +mod output; +mod runner; +mod schema; +mod trigger; + +use anyhow::{anyhow, Context, Result}; +use clap::Parser; +use console::style; +use std::path::PathBuf; + +pub use schema::TestResult; + +use crate::output::{step, Step}; + +#[derive(Clone, Debug, Parser)] +#[clap(about = "Run subgraph tests")] +pub struct TestOpt { + /// Path to subgraph manifest + #[clap(default_value = "subgraph.yaml")] + pub manifest: PathBuf, + + /// Test files directory + #[clap(short = 't', long, default_value = "tests")] + pub test_dir: PathBuf, + + /// Skip building the subgraph before testing + #[clap(long)] + pub skip_build: bool, + + /// PostgreSQL connection URL. If not provided, a temporary database will be created (Unix only). + #[clap(long, env = "POSTGRES_URL")] + pub postgres_url: Option, + + /// Use Matchstick runner instead (legacy mode) + #[clap(long)] + pub matchstick: bool, + + /// Run Matchstick tests in Docker (recommended on macOS where the native binary is bugged) + #[clap(short = 'd', long, requires = "matchstick")] + pub docker: bool, + + /// Run tests with coverage reporting (Matchstick only) + #[clap(short = 'c', long, requires = "matchstick")] + pub coverage: bool, + + /// Force recompilation of tests (Matchstick only) + #[clap(short = 'r', long, requires = "matchstick")] + pub recompile: bool, + + /// Force redownload of Matchstick binary / rebuild Docker image + #[clap(short = 'f', long, requires = "matchstick")] + pub force: bool, + + /// Matchstick version to use (default: 0.6.0) + #[clap(long, requires = "matchstick")] + pub matchstick_version: Option, + + /// Specific data source to test (Matchstick only) + #[clap(long, requires = "matchstick")] + pub datasource: Option, +} + +/// Entry point for the `gnd test` command. +/// +/// Orchestrates the full test lifecycle: build -> discover -> run -> report. +/// Each test file gets its own isolated database and subgraph deployment. +/// Returns an error if any tests fail (for non-zero exit code). +pub async fn run_test(opt: TestOpt) -> Result<()> { + if opt.matchstick { + return run_matchstick_tests(&opt); + } + + // Build the subgraph first so the WASM and schema are available in build/. + // This mirrors what a user would do manually before running tests. + if !opt.skip_build { + step(Step::Generate, "Building subgraph"); + let build_opt = crate::commands::BuildOpt { + manifest: opt.manifest.clone(), + output_dir: std::path::PathBuf::from("build"), + output_format: "wasm".to_string(), + skip_migrations: false, + watch: false, + ipfs: None, + network: None, + network_file: std::path::PathBuf::from("networks.json"), + skip_asc_version_check: false, + }; + crate::commands::run_build(build_opt).await?; + step(Step::Done, "Build complete"); + } + + // Find all test JSON files in the test directory (sorted for deterministic order). + step(Step::Load, "Discovering test files"); + let test_files = schema::discover_test_files(&opt.test_dir)?; + + if test_files.is_empty() { + step(Step::Warn, "No test files found"); + println!( + " Looking in: {}", + opt.test_dir + .canonicalize() + .unwrap_or(opt.test_dir.clone()) + .display() + ); + println!(" Expected: *.test.json or *.json files"); + return Ok(()); + } + + let mut passed = 0; + let mut failed = 0; + + for path in test_files { + output::print_test_start(&path); + + // Parse the JSON test file into our schema types. + let test_file = match schema::parse_test_file(&path) { + Ok(tf) => tf, + Err(e) => { + println!(" {} Failed to parse: {}", style("✘").red(), e); + failed += 1; + continue; + } + }; + + // Run the test: set up infra, index blocks, check assertions. + // Each test gets a fresh database so tests are fully isolated. + match runner::run_single_test(&opt, &test_file).await { + Ok(result) => { + output::print_test_result(&test_file.name, &result); + match result { + TestResult::Passed => passed += 1, + TestResult::Failed { .. } => failed += 1, + } + } + Err(e) => { + println!(" {} {} - Error: {}", style("✘").red(), test_file.name, e); + failed += 1; + } + } + } + + output::print_summary(passed, failed); + + if failed > 0 { + Err(anyhow!("{} test(s) failed", failed)) + } else { + Ok(()) + } +} + +/// Backward-compatible Matchstick test runner. +/// +/// Dispatches to Docker mode or binary mode depending on the `--docker` flag. +/// This is the legacy path for projects that haven't migrated to the new +/// JSON-based test format yet. +fn run_matchstick_tests(opt: &TestOpt) -> Result<()> { + if opt.docker { + run_docker_tests(opt) + } else { + run_binary_tests(opt) + } +} + +/// Run Matchstick tests using a locally installed binary. +/// +/// Searches for the Matchstick binary in well-known locations and executes it, +/// passing through any relevant CLI flags. +fn run_binary_tests(opt: &TestOpt) -> Result<()> { + step(Step::Generate, "Running Matchstick tests (legacy mode)"); + + let path = find_matchstick().ok_or_else(|| { + anyhow!( + "Matchstick not found. Please install it with:\n \ + npm install --save-dev matchstick-as\n\n\ + Or use Docker mode:\n \ + gnd test --matchstick -d" + ) + })?; + + let workdir = opt.manifest.parent().unwrap_or(std::path::Path::new(".")); + let mut cmd = std::process::Command::new(&path); + cmd.current_dir(workdir); + + if opt.coverage { + cmd.arg("-c"); + } + if opt.recompile { + cmd.arg("-r"); + } + if let Some(datasource) = &opt.datasource { + cmd.arg(datasource); + } + + let status = cmd + .status() + .with_context(|| format!("Failed to execute Matchstick binary: {}", path))?; + + if status.success() { + step(Step::Done, "Matchstick tests passed"); + Ok(()) + } else { + Err(anyhow!("Matchstick tests failed")) + } +} + +/// Find the Matchstick binary by searching well-known locations and PATH. +/// +/// Search order: +/// 1. `node_modules/.bin/graph-test` +/// 2. `node_modules/.bin/matchstick` +/// 3. `node_modules/matchstick-as/bin/matchstick` +/// 4. `graph-test` on PATH +/// 5. `matchstick` on PATH +fn find_matchstick() -> Option { + let local_paths = [ + "node_modules/.bin/graph-test", + "node_modules/.bin/matchstick", + "node_modules/matchstick-as/bin/matchstick", + ]; + + local_paths + .iter() + .find(|p| std::path::Path::new(p).exists()) + .map(|p| p.to_string()) + .or_else(|| { + which::which("graph-test") + .ok() + .map(|p| p.to_string_lossy().into_owned()) + }) + .or_else(|| { + which::which("matchstick") + .ok() + .map(|p| p.to_string_lossy().into_owned()) + }) +} + +/// Run Matchstick tests inside a Docker container. +/// +/// This is the recommended mode on macOS where the native Matchstick binary +/// has known issues. The Docker image is built automatically if it doesn't +/// exist or if `--force` is specified. +fn run_docker_tests(opt: &TestOpt) -> Result<()> { + step(Step::Generate, "Running Matchstick tests in Docker"); + + std::process::Command::new("docker") + .arg("--version") + .output() + .context("Docker not found. Please install Docker to use -d/--docker mode.")?; + + let mut test_args = String::new(); + if opt.coverage { + test_args.push_str(" -c"); + } + if opt.recompile { + test_args.push_str(" -r"); + } + if let Some(datasource) = &opt.datasource { + test_args.push_str(&format!(" {}", datasource)); + } + + let cwd = std::env::current_dir().context("Failed to get current directory")?; + + let mut cmd = std::process::Command::new("docker"); + cmd.args([ + "run", + "-it", + "--rm", + "--mount", + &format!("type=bind,source={},target=/matchstick", cwd.display()), + ]); + if !test_args.is_empty() { + cmd.args(["-e", &format!("ARGS={}", test_args.trim())]); + } + cmd.arg("matchstick"); + + // Check if the Docker image already exists. + let image_check = std::process::Command::new("docker") + .args(["images", "-q", "matchstick"]) + .output() + .context("Failed to check for Docker image")?; + let image_exists = !image_check.stdout.is_empty(); + + if !image_exists || opt.force { + step(Step::Generate, "Building Matchstick Docker image"); + let dockerfile_path = PathBuf::from("tests/.docker/Dockerfile"); + if !dockerfile_path.exists() || opt.force { + create_dockerfile(&dockerfile_path, opt.matchstick_version.as_deref())?; + } + let build_status = std::process::Command::new("docker") + .args([ + "build", + "-f", + &dockerfile_path.to_string_lossy(), + "-t", + "matchstick", + ".", + ]) + .status() + .context("Failed to build Docker image")?; + if !build_status.success() { + return Err(anyhow!("Failed to build Matchstick Docker image")); + } + } + + let status = cmd.status().context("Failed to run Docker container")?; + if status.success() { + step(Step::Done, "Tests passed"); + Ok(()) + } else { + Err(anyhow!("Tests failed")) + } +} + +/// Create a Dockerfile for running Matchstick tests in a container. +/// +/// The Dockerfile downloads the Matchstick binary directly from GitHub releases +/// (not npm — `matchstick-as` is the AssemblyScript library, not the runner binary). +/// Based on . +fn create_dockerfile(path: &PathBuf, version: Option<&str>) -> Result<()> { + use std::fs; + + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + + let version = version.unwrap_or("0.6.0"); + let dockerfile_content = format!( + r#"FROM --platform=linux/x86_64 ubuntu:22.04 + +ARG DEBIAN_FRONTEND=noninteractive +ENV ARGS="" + +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + curl ca-certificates postgresql postgresql-contrib \ + && rm -rf /var/lib/apt/lists/* + +RUN curl -fsSL https://deb.nodesource.com/setup_18.x | bash - \ + && apt-get install -y --no-install-recommends nodejs \ + && rm -rf /var/lib/apt/lists/* + +RUN curl -fsSL -o /usr/local/bin/matchstick \ + https://github.com/LimeChain/matchstick/releases/download/{version}/binary-linux-22 \ + && chmod +x /usr/local/bin/matchstick + +RUN mkdir /matchstick +WORKDIR /matchstick + +CMD ["sh", "-c", "matchstick $ARGS"] +"#, + version = version + ); + + fs::write(path, dockerfile_content) + .with_context(|| format!("Failed to write Dockerfile to {}", path.display()))?; + step(Step::Write, &format!("Created {}", path.display())); + Ok(()) +} From e69ba88d6b51de9ea8d82b9cdd330b880944e9c8 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Thu, 5 Feb 2026 23:47:09 +0200 Subject: [PATCH 04/34] gnd(test): Refactor runner.rs for readability - remove extracted code Removes ~500 lines from runner.rs by delegating to new focused modules: - block_stream: Mock block delivery infrastructure - noop: Stub trait implementations - assertion: GraphQL assertion logic runner.rs now focuses exclusively on test orchestration: - setup_stores: Initialize PostgreSQL and chain store - setup_chain: Construct mock Ethereum chain - setup_context: Wire up graph-node components - wait_for_sync: Poll store until indexing completes Reduced from 1198 to 729 lines (39% reduction). Improves readability by separating concerns. --- gnd/src/commands/test/runner.rs | 728 ++++++++++++++++++++++++++++++++ 1 file changed, 728 insertions(+) create mode 100644 gnd/src/commands/test/runner.rs diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs new file mode 100644 index 00000000000..c99f1316994 --- /dev/null +++ b/gnd/src/commands/test/runner.rs @@ -0,0 +1,728 @@ +//! Test runner: orchestrates subgraph indexing with mock blockchain data. +//! +//! This is the core of `gnd test`. For each test file, it: +//! +//! 1. Creates a temporary PostgreSQL database (pgtemp) for complete test isolation +//! 2. Initializes graph-node stores (entity storage, block storage, chain store) +//! 3. Constructs a mock Ethereum chain that feeds pre-defined blocks +//! 4. Deploys the subgraph and starts the indexer +//! 5. Waits for all blocks to be processed (or a fatal error) +//! 6. Runs GraphQL assertions against the indexed entity state +//! +//! ## Architecture +//! +//! The runner reuses real graph-node infrastructure — the same store, WASM runtime, +//! and trigger processing code used in production. Only the blockchain layer is +//! mocked via `StaticStreamBuilder` (see [`super::block_stream`]), which feeds +//! pre-built `BlockWithTriggers` from the test JSON instead of fetching from an +//! RPC endpoint. +//! +//! This approach follows the same pattern as `gnd dev`, which also uses +//! `FileLinkResolver` and filesystem-based deployment hashes instead of IPFS. +//! +//! Noop/stub adapters (see [`super::noop`]) satisfy the `Chain` constructor's +//! trait bounds without making real network calls. + +use super::assertion::run_assertions; +use super::block_stream::{MutexBlockStreamBuilder, StaticStreamBuilder}; +use super::mock_chain; +use super::noop::{NoopAdapterSelector, NoopRuntimeAdapterBuilder, StaticBlockRefetcher}; +use super::schema::{TestFile, TestResult}; +use super::trigger::build_blocks_with_triggers; +use super::TestOpt; +use anyhow::{anyhow, Context, Result}; +use graph::amp::FlightClient; +use graph::blockchain::block_stream::BlockWithTriggers; +use graph::blockchain::{BlockPtr, BlockchainMap, ChainIdentifier}; +use graph::cheap_clone::CheapClone; +use graph::components::link_resolver::{ArweaveClient, FileLinkResolver}; +use graph::components::metrics::MetricsRegistry; +use graph::components::network_provider::ChainName; +use graph::components::store::DeploymentLocator; +use graph::components::subgraph::{Settings, SubgraphInstanceManager as _}; +use graph::data::graphql::load_manager::LoadManager; +use graph::data::subgraph::schema::SubgraphError; +use graph::endpoint::EndpointMetrics; +use graph::env::EnvVars; +use graph::firehose::{FirehoseEndpoint, FirehoseEndpoints, SubgraphLimit}; +use graph::ipfs::{IpfsMetrics, IpfsRpcClient, ServerAddress}; +use graph::prelude::{ + DeploymentHash, LoggerFactory, NodeId, SubgraphCountMetric, SubgraphName, SubgraphRegistrar, + SubgraphStore as SubgraphStoreTrait, SubgraphVersionSwitchingMode, +}; +use graph::slog::{info, o, Drain, Logger, OwnedKVList, Record}; +use graph_chain_ethereum::network::EthereumNetworkAdapters; +use graph_chain_ethereum::Chain; +use graph_core::polling_monitor::{arweave_service, ipfs_service}; +use graph_graphql::prelude::GraphQlRunner; +use graph_node::config::Config; +use graph_node::manager::PanicSubscriptionManager; +use graph_node::store_builder::StoreBuilder; +use graph_store_postgres::{ChainHeadUpdateListener, ChainStore, Store, SubgraphStore}; +use std::marker::PhantomData; +use std::path::{Path, PathBuf}; +use std::sync::{Arc, Mutex}; +use std::time::{Duration, Instant}; + +#[cfg(unix)] +use pgtemp::PgTempDBBuilder; + +/// Node ID used for all test deployments. Visible in store metadata. +const NODE_ID: &str = "gnd-test"; + +// ============ Test Infrastructure Types ============ + +/// A slog drain that suppresses the "Store event stream ended" error message. +/// +/// When a test completes and the pgtemp database is dropped, the store's +/// background subscription listener loses its connection and logs an error. +/// This is expected during cleanup and not a real problem, so we filter it +/// out to avoid confusing test output. All other log messages pass through. +struct FilterStoreEventEndedDrain { + inner: D, +} + +impl Drain for FilterStoreEventEndedDrain { + type Ok = Option; + type Err = D::Err; + + fn log(&self, record: &Record, values: &OwnedKVList) -> Result { + if record + .msg() + .to_string() + .contains("Store event stream ended") + { + return Ok(None); + } + self.inner.log(record, values).map(Some) + } +} + +/// Bundles the store infrastructure needed for test execution. +/// +/// Created once per test and holds the connection pools, chain store, +/// and chain head listener that the indexer needs. +struct TestStores { + /// Network name from the subgraph manifest (e.g., "mainnet"). + /// Must match the chain config so graph-node routes triggers correctly. + network_name: ChainName, + /// Listens for chain head updates — needed by the Chain constructor. + chain_head_listener: Arc, + /// The top-level store (wraps subgraph store + block store). + network_store: Arc, + /// Per-chain block storage. + chain_store: Arc, +} + +/// All the pieces needed to run a test after infrastructure setup. +/// +/// Holds references to the subgraph provider (for starting indexing), +/// the store (for querying sync status), the deployment locator, +/// and the GraphQL runner (for assertions). +pub(super) struct TestContext { + #[allow(dead_code)] + pub(super) logger: Logger, + /// Starts/stops subgraph indexing. + pub(super) provider: Arc, + /// Used to check sync progress and health status. + pub(super) store: Arc, + /// Identifies this specific subgraph deployment in the store. + pub(super) deployment: DeploymentLocator, + /// Executes GraphQL queries against the indexed data. + pub(super) graphql_runner: Arc>, +} + +// ============ Test Execution ============ + +/// Extract the network name (e.g., "mainnet") from the first data source in a manifest. +/// +/// The network name must match the chain configuration passed to the store, +/// otherwise graph-node won't route triggers to the correct chain. +/// Falls back to "mainnet" if not found (the common case for Ethereum subgraphs). +fn extract_network_from_manifest(manifest_path: &Path) -> Result { + let content = std::fs::read_to_string(manifest_path) + .with_context(|| format!("Failed to read manifest: {}", manifest_path.display()))?; + let manifest: serde_yaml::Value = serde_yaml::from_str(&content) + .with_context(|| format!("Failed to parse manifest: {}", manifest_path.display()))?; + + let network = manifest + .get("dataSources") + .and_then(|ds| ds.as_sequence()) + .and_then(|seq| seq.first()) + .and_then(|first| first.get("network")) + .and_then(|n| n.as_str()) + .map(|s| s.to_string()) + .unwrap_or_else(|| "mainnet".to_string()); + + Ok(network) +} + +/// Run a single test file end-to-end. +/// +/// This is the main entry point called from `mod.rs` for each test file. +/// It creates isolated infrastructure (database, stores, chain), indexes +/// the mock blocks, and checks the GraphQL assertions. +/// +/// Returns `TestResult::Passed` if all assertions match, or `TestResult::Failed` +/// with details about handler errors or assertion mismatches. +pub async fn run_single_test(opt: &TestOpt, test_file: &TestFile) -> Result { + // Convert test JSON blocks into graph-node's internal block format. + let blocks = build_blocks_with_triggers(test_file, 1)?; + + // Empty test with no blocks and no assertions is trivially passing. + if blocks.is_empty() && test_file.assertions.is_empty() { + return Ok(TestResult::Passed); + } + + // Resolve paths relative to the manifest location. + let manifest_dir = opt + .manifest + .parent() + .map(|p| p.to_path_buf()) + .unwrap_or_else(|| PathBuf::from(".")); + + // The build directory contains compiled WASM, schema, and the built manifest. + // Created by `gnd build` (which runs automatically unless --skip-build). + let build_dir = manifest_dir.join("build"); + + let manifest_filename = opt + .manifest + .file_name() + .and_then(|s| s.to_str()) + .unwrap_or("subgraph.yaml"); + let built_manifest_path = build_dir.join(manifest_filename); + let built_manifest_path = built_manifest_path.canonicalize().with_context(|| { + format!( + "Built manifest not found: {}", + built_manifest_path.display() + ) + })?; + + // The network name from the manifest (e.g., "mainnet") determines which + // chain config the store uses. Must match exactly. + let network_name: ChainName = extract_network_from_manifest(&built_manifest_path)?.into(); + + // Create a temporary database for this test. The `_temp_db` handle must + // be kept alive for the duration of the test — dropping it destroys the database. + let (db_url, _temp_db) = get_database_url(opt, &build_dir)?; + + let logger = graph::log::logger(false).new(o!("test" => test_file.name.clone())); + + // Initialize stores with the network name from the manifest. + let stores = setup_stores(&logger, &db_url, &network_name).await?; + + // Create the mock Ethereum chain that will feed our pre-built blocks. + let chain = setup_chain(&test_file.name, blocks.clone(), &stores).await?; + + // Use the built manifest path as the deployment hash, matching gnd dev's pattern. + // FileLinkResolver resolves the hash back to the filesystem path when loading. + let deployment_id = built_manifest_path.display().to_string(); + let hash = DeploymentHash::new(&deployment_id).map_err(|_| { + anyhow!( + "Failed to create deployment hash from path: {}", + deployment_id + ) + })?; + + // Sanitize test name for use as a subgraph name (alphanumeric + hyphens + underscores). + let test_name_sanitized = test_file + .name + .chars() + .filter(|c| c.is_alphanumeric() || *c == '-' || *c == '_') + .collect::(); + let subgraph_name = + SubgraphName::new(format!("test/{}", test_name_sanitized)).map_err(|e| anyhow!("{}", e))?; + + // Wire up all graph-node components (instance manager, provider, registrar, etc.) + // and deploy the subgraph. + let ctx = setup_context( + &logger, + &stores, + &chain, + &build_dir, + hash, + subgraph_name.clone(), + ) + .await?; + + // Determine the target block — the indexer will process until it reaches this. + let stop_block = if blocks.is_empty() { + mock_chain::genesis_ptr() + } else { + mock_chain::final_block_ptr(&blocks).ok_or_else(|| anyhow!("No blocks to process"))? + }; + + // Start the indexer and wait for it to process all blocks. + info!(logger, "Starting subgraph indexing"; "stop_block" => stop_block.number); + + ctx.provider + .clone() + .start_subgraph(ctx.deployment.clone(), Some(stop_block.number)) + .await; + + match wait_for_sync( + &logger, + ctx.store.clone(), + &ctx.deployment, + stop_block.clone(), + ) + .await + { + Ok(()) => { + // Indexing succeeded — now validate the entity state via GraphQL. + run_assertions(&ctx, &test_file.assertions).await + } + Err(subgraph_error) => { + // The subgraph handler threw a fatal error during indexing. + // Report it as a test failure without running assertions. + Ok(TestResult::Failed { + handler_error: Some(subgraph_error.message), + assertion_failures: vec![], + }) + } + } +} + +/// Get a PostgreSQL connection URL for the test. +/// +/// If `--postgres-url` was provided, uses that directly. +/// Otherwise, on Unix, creates a temporary database via pgtemp in the build +/// directory (matching `gnd dev`'s pattern). The database is automatically +/// destroyed when `TempPgHandle` is dropped. +/// +/// On non-Unix systems, `--postgres-url` is required. +fn get_database_url(opt: &TestOpt, build_dir: &Path) -> Result<(String, Option)> { + if let Some(url) = &opt.postgres_url { + return Ok((url.clone(), None)); + } + + #[cfg(unix)] + { + if !build_dir.exists() { + anyhow::bail!( + "Build directory does not exist: {}. Run 'gnd build' first.", + build_dir.display() + ); + } + + let db = PgTempDBBuilder::new() + .with_data_dir_prefix(build_dir) + .persist_data(false) + .with_initdb_arg("-E", "UTF8") + .with_initdb_arg("--locale", "C") + .start(); + + let url = db.connection_uri().to_string(); + Ok((url, Some(TempPgHandle(db)))) + } + + #[cfg(not(unix))] + { + let _ = build_dir; + Err(anyhow!( + "On non-Unix systems, please provide --postgres-url" + )) + } +} + +/// RAII handle that keeps a pgtemp database alive for the test's duration. +/// +/// The inner `PgTempDB` is never read directly — its purpose is to prevent +/// the temporary database from being destroyed until this handle is dropped. +#[cfg(unix)] +struct TempPgHandle(#[allow(dead_code)] pgtemp::PgTempDB); + +#[cfg(not(unix))] +struct TempPgHandle; + +/// Initialize graph-node stores from a database URL. +/// +/// Creates: +/// - A TOML config with the database URL and a chain entry for the test network +/// - A `StoreBuilder` that runs database migrations and creates connection pools +/// - A chain store for the test chain with a synthetic genesis block (hash=0x0) +/// +/// Uses a filtered logger to suppress the expected "Store event stream ended" +/// error that occurs when pgtemp is dropped during cleanup. +async fn setup_stores( + _logger: &Logger, + db_url: &str, + network_name: &ChainName, +) -> Result { + // Minimal graph-node config with one primary shard and one chain. + // The chain provider URL is a dummy — no real RPC calls are made. + let config_str = format!( + r#" +[store] +[store.primary] +connection = "{}" +pool_size = 10 + +[deployment] +[[deployment.rule]] +store = "primary" +indexers = [ "default" ] + +[chains] +ingestor = "default" + +[chains.{}] +shard = "primary" +provider = [ + {{ label = "test", url = "http://localhost:1/", features = [] }} +] +"#, + db_url, network_name + ); + + let config = Config::from_str(&config_str, "default") + .map_err(|e| anyhow!("Failed to parse config: {}", e))?; + + let mock_registry = Arc::new(MetricsRegistry::mock()); + let node_id = NodeId::new(NODE_ID).unwrap(); + + // Filter out the "Store event stream ended" error that fires during + // cleanup when pgtemp drops the database out from under the listener. + let base_logger = graph::log::logger(false); + let filtered_drain = FilterStoreEventEndedDrain { + inner: base_logger.clone(), + }; + let store_logger = Logger::root(filtered_drain.fuse(), o!()); + + // StoreBuilder runs migrations and creates connection pools. + let store_builder = StoreBuilder::new( + &store_logger, + &node_id, + &config, + None, + mock_registry.clone(), + ) + .await; + + let chain_head_listener = store_builder.chain_head_update_listener(); + let network_identifiers: Vec = vec![network_name.clone()]; + let network_store = store_builder.network_store(network_identifiers).await; + + // Synthetic chain identifier — net_version "1" with zero genesis hash. + let ident = ChainIdentifier { + net_version: "1".into(), + genesis_block_hash: graph::prelude::alloy::primitives::B256::ZERO.into(), + }; + + let chain_store = network_store + .block_store() + .create_chain_store(network_name, ident) + .await + .context("Failed to create chain store")?; + + Ok(TestStores { + network_name: network_name.clone(), + chain_head_listener, + network_store, + chain_store, + }) +} + +/// Construct a mock Ethereum `Chain` with pre-built blocks. +/// +/// The chain uses: +/// - `StaticStreamBuilder`: feeds pre-defined blocks instead of RPC/Firehose +/// - `NoopAdapterSelector` / `NoopRuntimeAdapterBuilder`: stubs for unused interfaces +/// - `StaticBlockRefetcher`: no-op since there are no reorgs in tests +/// - A dummy firehose endpoint (never actually connected to) +async fn setup_chain( + test_name: &str, + blocks: Vec>, + stores: &TestStores, +) -> Result> { + let logger = graph::log::logger(false).new(o!("test" => test_name.to_string())); + let mock_registry = Arc::new(MetricsRegistry::mock()); + let logger_factory = LoggerFactory::new(logger.clone(), None, mock_registry.clone()); + + // Dummy firehose endpoint — required by Chain constructor but never used. + // Uses 0.0.0.0:0 to prevent accidental DNS lookups if the endpoint is ever reached. + let firehose_endpoints = FirehoseEndpoints::for_testing(vec![Arc::new(FirehoseEndpoint::new( + "", + "http://0.0.0.0:0", + None, + None, + true, + false, + SubgraphLimit::Unlimited, + Arc::new(EndpointMetrics::mock()), + ))]); + + let client = + Arc::new(graph::blockchain::client::ChainClient::::new_firehose(firehose_endpoints)); + + let static_block_stream = Arc::new(StaticStreamBuilder { chain: blocks }); + let block_stream_builder = Arc::new(MutexBlockStreamBuilder(Mutex::new(static_block_stream))); + + let eth_adapters = Arc::new(EthereumNetworkAdapters::empty_for_testing()); + + let chain = Chain::new( + logger_factory, + stores.network_name.clone(), + mock_registry, + stores.chain_store.cheap_clone(), + stores.chain_store.cheap_clone(), + client, + stores.chain_head_listener.cheap_clone(), + block_stream_builder, + Arc::new(StaticBlockRefetcher { + _phantom: PhantomData, + }), + Arc::new(NoopAdapterSelector { + _phantom: PhantomData, + }), + Arc::new(NoopRuntimeAdapterBuilder), + eth_adapters, + graph::prelude::ENV_VARS.reorg_threshold(), + graph::prelude::ENV_VARS.ingestor_polling_interval, + true, + ); + + Ok(Arc::new(chain)) +} + +/// Wire up all graph-node components and deploy the subgraph. +/// +/// This mirrors what `gnd dev` does via the launcher, but assembled directly: +/// 1. Clean up any leftover deployment from a previous run +/// 2. Create blockchain map (just our mock chain) +/// 3. Set up link resolver (FileLinkResolver for local filesystem) +/// 4. Create the subgraph instance manager (WASM runtime, trigger processing) +/// 5. Create the subgraph provider (lifecycle management) +/// 6. Create the GraphQL runner (for assertions) +/// 7. Register and deploy the subgraph via the registrar +async fn setup_context( + logger: &Logger, + stores: &TestStores, + chain: &Arc, + build_dir: &Path, + hash: DeploymentHash, + subgraph_name: SubgraphName, +) -> Result { + let env_vars = Arc::new(EnvVars::from_env().unwrap_or_default()); + let mock_registry = Arc::new(MetricsRegistry::mock()); + let logger_factory = LoggerFactory::new(logger.clone(), None, mock_registry.clone()); + let node_id = NodeId::new(NODE_ID).unwrap(); + + let subgraph_store = stores.network_store.subgraph_store(); + + // Remove any leftover deployment from a previous test run (idempotent). + cleanup(&subgraph_store, &subgraph_name, &hash).await.ok(); + + // Map the network name to our mock chain so graph-node routes triggers correctly. + let mut blockchain_map = BlockchainMap::new(); + blockchain_map.insert(stores.network_name.clone(), chain.clone()); + let blockchain_map = Arc::new(blockchain_map); + + // FileLinkResolver loads the manifest and WASM from the build directory + // instead of fetching from IPFS. This matches gnd dev's approach. + let link_resolver: Arc = + Arc::new(FileLinkResolver::with_base_dir(build_dir)); + + // IPFS client is required by the instance manager constructor but not used + // for manifest loading (FileLinkResolver handles that). + let ipfs_metrics = IpfsMetrics::new(&mock_registry); + let ipfs_client = Arc::new( + IpfsRpcClient::new_unchecked(ServerAddress::local_rpc_api(), ipfs_metrics, logger) + .context("Failed to create IPFS client")?, + ); + + let ipfs_service = ipfs_service( + ipfs_client, + env_vars.mappings.max_ipfs_file_bytes, + env_vars.mappings.ipfs_timeout, + env_vars.mappings.ipfs_request_limit, + ); + + let arweave_resolver = Arc::new(ArweaveClient::default()); + let arweave_service = arweave_service( + arweave_resolver.cheap_clone(), + env_vars.mappings.ipfs_request_limit, + graph::components::link_resolver::FileSizeLimit::MaxBytes( + env_vars.mappings.max_ipfs_file_bytes as u64, + ), + ); + + let sg_count = Arc::new(SubgraphCountMetric::new(mock_registry.cheap_clone())); + let static_filters = env_vars.experimental_static_filters; + + // The instance manager handles WASM compilation, trigger processing, + // and entity storage for running subgraphs. + let subgraph_instance_manager = Arc::new(graph_core::subgraph::SubgraphInstanceManager::< + SubgraphStore, + FlightClient, + >::new( + &logger_factory, + env_vars.cheap_clone(), + subgraph_store.clone(), + blockchain_map.clone(), + sg_count.cheap_clone(), + mock_registry.clone(), + link_resolver.cheap_clone(), + ipfs_service, + arweave_service, + None, + static_filters, + )); + + // The provider manages subgraph lifecycle (start/stop indexing). + let mut subgraph_instance_managers = + graph_core::subgraph_provider::SubgraphInstanceManagers::new(); + subgraph_instance_managers.add( + graph_core::subgraph_provider::SubgraphProcessingKind::Trigger, + subgraph_instance_manager.cheap_clone(), + ); + + let subgraph_provider = Arc::new(graph_core::subgraph_provider::SubgraphProvider::new( + &logger_factory, + sg_count.cheap_clone(), + subgraph_store.clone(), + link_resolver.cheap_clone(), + tokio_util::sync::CancellationToken::new(), + subgraph_instance_managers, + )); + + // GraphQL runner for executing assertion queries against indexed data. + let load_manager = LoadManager::new(logger, Vec::new(), Vec::new(), mock_registry.clone()); + let graphql_runner = Arc::new(GraphQlRunner::new( + logger, + stores.network_store.clone(), + Arc::new(load_manager), + mock_registry.clone(), + )); + + // The registrar handles subgraph naming and version management. + // Uses PanicSubscriptionManager because tests don't need GraphQL subscriptions. + let panicking_subscription_manager = Arc::new(PanicSubscriptionManager {}); + let subgraph_registrar = Arc::new(graph_core::subgraph::SubgraphRegistrar::new( + &logger_factory, + link_resolver.cheap_clone(), + subgraph_provider.cheap_clone(), + subgraph_store.clone(), + panicking_subscription_manager, + Option::>::None, + blockchain_map.clone(), + node_id.clone(), + SubgraphVersionSwitchingMode::Instant, + Arc::new(Settings::default()), + )); + + // Register the subgraph name (e.g., "test/TransferCreatesEntity"). + SubgraphRegistrar::create_subgraph(subgraph_registrar.as_ref(), subgraph_name.clone()).await?; + + // Deploy the subgraph version (loads manifest, compiles WASM, creates schema tables). + let deployment = SubgraphRegistrar::create_subgraph_version( + subgraph_registrar.as_ref(), + subgraph_name.clone(), + hash.clone(), + node_id.clone(), + None, + None, + None, + None, + false, + ) + .await?; + + Ok(TestContext { + logger: logger_factory.subgraph_logger(&deployment), + provider: subgraph_provider, + store: subgraph_store, + deployment, + graphql_runner, + }) +} + +/// Remove a previous subgraph deployment and its data. +/// +/// Called before each test to ensure a clean slate. Errors are ignored +/// (the deployment might not exist on first run). +async fn cleanup( + subgraph_store: &SubgraphStore, + name: &SubgraphName, + hash: &DeploymentHash, +) -> Result<()> { + let locators = SubgraphStoreTrait::locators(subgraph_store, hash).await?; + + match subgraph_store.remove_subgraph(name.clone()).await { + Ok(_) | Err(graph::prelude::StoreError::SubgraphNotFound(_)) => {} + Err(e) => return Err(e.into()), + } + + for locator in locators { + subgraph_store.remove_deployment(locator.id.into()).await?; + } + + Ok(()) +} + +/// Poll the store until the subgraph reaches the target block or fails. +/// +/// Periodically flushes the store's write buffer to speed up block processing +/// (the store batches writes and flush forces them through immediately). +/// +/// Returns `Ok(())` when the subgraph reaches `stop_block`, or `Err(SubgraphError)` +/// if the subgraph fails with a fatal error or times out after 60 seconds. +async fn wait_for_sync( + logger: &Logger, + store: Arc, + deployment: &DeploymentLocator, + stop_block: BlockPtr, +) -> Result<(), SubgraphError> { + const MAX_WAIT: Duration = Duration::from_secs(60); + const WAIT_TIME: Duration = Duration::from_millis(500); + + let start = Instant::now(); + + /// Force the store to flush its write buffer, making pending entity + /// changes visible to queries sooner. + async fn flush(logger: &Logger, store: &Arc, deployment: &DeploymentLocator) { + if let Ok(writable) = store + .clone() + .writable(logger.clone(), deployment.id, Arc::new(vec![])) + .await + { + let _ = writable.flush().await; + } + } + + // Initial flush to ensure any pre-existing writes are visible. + flush(logger, &store, deployment).await; + + while start.elapsed() < MAX_WAIT { + tokio::time::sleep(WAIT_TIME).await; + flush(logger, &store, deployment).await; + + // Check current indexing progress. + let block_ptr = match store.least_block_ptr(&deployment.hash).await { + Ok(Some(ptr)) => ptr, + _ => continue, // Not started yet + }; + + info!(logger, "Sync progress"; "current" => block_ptr.number, "target" => stop_block.number); + + // Check if the subgraph hit a fatal error (e.g., handler panic, deterministic error). + let status = store.status_for_id(deployment.id).await; + if let Some(fatal_error) = status.fatal_error { + return Err(fatal_error); + } + + if block_ptr.number >= stop_block.number { + info!(logger, "Reached stop block"); + return Ok(()); + } + } + + // Timeout — return a synthetic error. + Err(SubgraphError { + subgraph_id: deployment.hash.clone(), + message: format!("Sync timeout after {}s", MAX_WAIT.as_secs()), + block_ptr: None, + handler: None, + deterministic: false, + }) +} From 347bff0d92c6f60314eac03a39eee6fffa339114 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Thu, 5 Feb 2026 23:46:52 +0200 Subject: [PATCH 05/34] gnd(test): Extract GraphQL assertion logic to dedicated module Moves assertion execution to gnd/src/commands/test/assertion.rs: - run_assertions: Execute all test assertions - run_single_assertion: Execute and compare a single query - r_value_to_json: Convert graph-node's r::Value to serde_json - json_equal: Compare JSON with string-vs-number coercion Makes TestContext fields pub(super) to allow assertion module access. --- gnd/src/commands/test/assertion.rs | 144 +++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) create mode 100644 gnd/src/commands/test/assertion.rs diff --git a/gnd/src/commands/test/assertion.rs b/gnd/src/commands/test/assertion.rs new file mode 100644 index 00000000000..9a0bbd19f6e --- /dev/null +++ b/gnd/src/commands/test/assertion.rs @@ -0,0 +1,144 @@ +//! GraphQL assertion execution for test validation. +//! +//! After all mock blocks have been indexed, this module executes GraphQL +//! queries against the indexed data and compares results to expected values +//! from the test file. + +use super::runner::TestContext; +use super::schema::{Assertion, AssertionFailure, TestResult}; +use anyhow::{anyhow, Result}; +use graph::data::query::{Query, QueryResults, QueryTarget}; +use graph::prelude::{q, r, ApiVersion, GraphQlRunner as GraphQlRunnerTrait}; + +/// Run all GraphQL assertions from the test file. +/// +/// Each assertion is a GraphQL query + expected JSON result. Returns `Passed` +/// if all assertions match, or `Failed` with the list of mismatches. +pub(super) async fn run_assertions( + ctx: &TestContext, + assertions: &[Assertion], +) -> Result { + let mut failures = Vec::new(); + + for assertion in assertions { + match run_single_assertion(ctx, assertion).await { + Ok(None) => {} // Passed + Ok(Some(failure)) => failures.push(failure), + Err(e) => { + // Query execution error — record as a failure with the error message. + failures.push(AssertionFailure { + query: assertion.query.clone(), + expected: assertion.expected.clone(), + actual: serde_json::json!({ "error": e.to_string() }), + }); + } + } + } + + if failures.is_empty() { + Ok(TestResult::Passed) + } else { + Ok(TestResult::Failed { + handler_error: None, + assertion_failures: failures, + }) + } +} + +/// Execute a single GraphQL assertion and compare the result. +/// +/// Returns `None` if the assertion passed (actual == expected), +/// or `Some(AssertionFailure)` with the diff. +async fn run_single_assertion( + ctx: &TestContext, + assertion: &Assertion, +) -> Result> { + // Query targets the specific deployment (not by subgraph name). + let target = QueryTarget::Deployment(ctx.deployment.hash.clone(), ApiVersion::default()); + let query = Query::new( + q::parse_query(&assertion.query) + .map_err(|e| anyhow!("Failed to parse query: {:?}", e))? + .into_static(), + None, + false, + ); + + let query_res: QueryResults = ctx.graphql_runner.clone().run_query(query, target).await; + + let result = query_res + .first() + .ok_or_else(|| anyhow!("No query result"))? + .duplicate() + .to_result() + .map_err(|errors| anyhow!("Query errors: {:?}", errors))?; + + // Convert graph-node's internal r::Value to serde_json::Value for comparison. + let actual_json = match result { + Some(value) => r_value_to_json(&value), + None => serde_json::Value::Null, + }; + + if json_equal(&actual_json, &assertion.expected) { + Ok(None) + } else { + Ok(Some(AssertionFailure { + query: assertion.query.clone(), + expected: assertion.expected.clone(), + actual: actual_json, + })) + } +} + +/// Convert graph-node's internal `r::Value` (GraphQL result) to `serde_json::Value`. +/// +/// Graph-node uses its own value type for GraphQL results. This converts to +/// standard JSON for comparison with the expected values in the test file. +fn r_value_to_json(value: &r::Value) -> serde_json::Value { + match value { + r::Value::Null => serde_json::Value::Null, + r::Value::Boolean(b) => serde_json::Value::Bool(*b), + r::Value::Int(n) => serde_json::Value::Number((*n).into()), + r::Value::Float(f) => serde_json::json!(*f), + r::Value::String(s) => serde_json::Value::String(s.clone()), + r::Value::Enum(s) => serde_json::Value::String(s.clone()), + r::Value::List(list) => { + serde_json::Value::Array(list.iter().map(r_value_to_json).collect()) + } + r::Value::Object(obj) => { + let map: serde_json::Map = obj + .iter() + .map(|(k, v)| (k.to_string(), r_value_to_json(v))) + .collect(); + serde_json::Value::Object(map) + } + r::Value::Timestamp(t) => serde_json::Value::String(t.to_string()), + } +} + +/// Compare two JSON values for equality (ignoring key ordering in objects). +/// +/// Also handles string-vs-number coercion: GraphQL returns `BigInt` and +/// `BigDecimal` fields as JSON strings (e.g., `"1000000000000000000"`), +/// but test authors may write them as JSON numbers. This function treats +/// `String("123")` and `Number(123)` as equal when they represent the +/// same value. +fn json_equal(a: &serde_json::Value, b: &serde_json::Value) -> bool { + match (a, b) { + (serde_json::Value::Null, serde_json::Value::Null) => true, + (serde_json::Value::Bool(a), serde_json::Value::Bool(b)) => a == b, + (serde_json::Value::Number(a), serde_json::Value::Number(b)) => a == b, + (serde_json::Value::String(a), serde_json::Value::String(b)) => a == b, + // String-vs-number coercion for BigInt/BigDecimal fields. + (serde_json::Value::String(s), serde_json::Value::Number(n)) + | (serde_json::Value::Number(n), serde_json::Value::String(s)) => s == &n.to_string(), + (serde_json::Value::Array(a), serde_json::Value::Array(b)) => { + a.len() == b.len() && a.iter().zip(b.iter()).all(|(a, b)| json_equal(a, b)) + } + (serde_json::Value::Object(a), serde_json::Value::Object(b)) => { + a.len() == b.len() + && a.iter() + .all(|(k, v)| b.get(k).map(|bv| json_equal(v, bv)).unwrap_or(false)) + } + _ => false, + } +} From abca0c88bf0e30fae1d80c7ad016d7d88aa4ce0d Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Thu, 5 Feb 2026 23:46:24 +0200 Subject: [PATCH 06/34] gnd(test): Extract noop/stub trait implementations to dedicated module Moves unused adapter stubs to gnd/src/commands/test/noop.rs: - StaticBlockRefetcher - NoopRuntimeAdapter / NoopRuntimeAdapterBuilder - NoopAdapterSelector - NoopTriggersAdapter These satisfy Chain constructor trait bounds but are never called during normal test execution since triggers are pre-built and host functions are not available in mocks. --- gnd/src/commands/test/block_stream.rs | 182 ++++++++++++++++++++++++++ gnd/src/commands/test/noop.rs | 172 ++++++++++++++++++++++++ 2 files changed, 354 insertions(+) create mode 100644 gnd/src/commands/test/block_stream.rs create mode 100644 gnd/src/commands/test/noop.rs diff --git a/gnd/src/commands/test/block_stream.rs b/gnd/src/commands/test/block_stream.rs new file mode 100644 index 00000000000..68684d36291 --- /dev/null +++ b/gnd/src/commands/test/block_stream.rs @@ -0,0 +1,182 @@ +//! Mock block stream infrastructure for feeding pre-defined test blocks. +//! +//! These types implement graph-node's `BlockStreamBuilder`/`BlockStream` traits +//! to feed pre-defined test blocks instead of connecting to a real RPC endpoint. +//! This is the core mock: everything else (store, WASM runtime, trigger processing) +//! is real graph-node code. + +use async_trait::async_trait; +use graph::blockchain::block_stream::{ + BlockStream, BlockStreamBuilder, BlockStreamError, BlockStreamEvent, BlockWithTriggers, + FirehoseCursor, +}; +use graph::blockchain::{BlockPtr, Blockchain, TriggerFilterWrapper}; +use graph::components::store::{DeploymentLocator, SourceableStore}; +use graph::futures03::Stream; +use graph::prelude::BlockNumber; +use graph_chain_ethereum::Chain; +use std::pin::Pin; +use std::sync::{Arc, Mutex}; +use std::task::{Context as TaskContext, Poll}; + +/// Builds block streams that yield pre-defined blocks from test data. +/// +/// Implements `BlockStreamBuilder` so it can be plugged into graph-node's +/// `Chain` constructor. Both `build_firehose` and `build_polling` return the +/// same static stream since we don't care about the transport mechanism. +/// +/// If `current_block` is provided (e.g., after a restart), the stream skips +/// blocks up to and including that pointer to avoid reprocessing. +pub(super) struct StaticStreamBuilder { + pub chain: Vec>, +} + +#[async_trait] +impl BlockStreamBuilder for StaticStreamBuilder { + async fn build_firehose( + &self, + _chain: &Chain, + _deployment: DeploymentLocator, + _block_cursor: FirehoseCursor, + _start_blocks: Vec, + current_block: Option, + _filter: Arc<::TriggerFilter>, + _unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, + ) -> anyhow::Result>> { + let current_idx = current_block.map(|current_block| { + self.chain + .iter() + .enumerate() + .find(|(_, b)| b.ptr() == current_block) + .map(|(i, _)| i) + .unwrap_or(0) + }); + Ok(Box::new(StaticStream::new(self.chain.clone(), current_idx))) + } + + async fn build_polling( + &self, + _chain: &Chain, + _deployment: DeploymentLocator, + _start_blocks: Vec, + _source_subgraph_stores: Vec>, + current_block: Option, + _filter: Arc>, + _unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, + ) -> anyhow::Result>> { + let current_idx = current_block.map(|current_block| { + self.chain + .iter() + .enumerate() + .find(|(_, b)| b.ptr() == current_block) + .map(|(i, _)| i) + .unwrap_or(0) + }); + Ok(Box::new(StaticStream::new(self.chain.clone(), current_idx))) + } +} + +/// A `Stream` that synchronously yields pre-defined blocks one at a time. +/// +/// Each `poll_next` call returns the next block immediately (no async waiting). +/// When all blocks have been emitted, returns `None` to signal stream completion, +/// which tells the indexer that sync is done. +struct StaticStream { + blocks: Vec>, + current_idx: usize, +} + +impl StaticStream { + /// Create a new stream, optionally skipping past already-processed blocks. + /// + /// `skip_to`: If `Some(i)`, start from block `i+1` (block `i` was already processed). + /// If `None`, start from the beginning. + fn new(blocks: Vec>, skip_to: Option) -> Self { + Self { + blocks, + current_idx: skip_to.map(|i| i + 1).unwrap_or(0), + } + } +} + +impl BlockStream for StaticStream { + fn buffer_size_hint(&self) -> usize { + 1 + } +} + +impl Unpin for StaticStream {} + +impl Stream for StaticStream { + type Item = Result, BlockStreamError>; + + fn poll_next(mut self: Pin<&mut Self>, _cx: &mut TaskContext<'_>) -> Poll> { + if self.current_idx >= self.blocks.len() { + return Poll::Ready(None); + } + + let block = self.blocks[self.current_idx].clone(); + let cursor = FirehoseCursor::from(format!("test-cursor-{}", self.current_idx)); + self.current_idx += 1; + + Poll::Ready(Some(Ok(BlockStreamEvent::ProcessBlock(block, cursor)))) + } +} + +/// Thread-safe wrapper around a `BlockStreamBuilder` to allow dynamic replacement. +/// +/// Graph-node's `Chain` takes an `Arc` at construction time. +/// This wrapper uses a `Mutex` so we could theoretically swap the inner builder +/// (e.g., for re-running with different blocks), though currently only used once. +pub(super) struct MutexBlockStreamBuilder(pub Mutex>>); + +#[async_trait] +impl BlockStreamBuilder for MutexBlockStreamBuilder { + async fn build_firehose( + &self, + chain: &Chain, + deployment: DeploymentLocator, + block_cursor: FirehoseCursor, + start_blocks: Vec, + subgraph_current_block: Option, + filter: Arc<::TriggerFilter>, + unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, + ) -> anyhow::Result>> { + let builder = self.0.lock().unwrap().clone(); + builder + .build_firehose( + chain, + deployment, + block_cursor, + start_blocks, + subgraph_current_block, + filter, + unified_api_version, + ) + .await + } + + async fn build_polling( + &self, + chain: &Chain, + deployment: DeploymentLocator, + start_blocks: Vec, + source_subgraph_stores: Vec>, + subgraph_current_block: Option, + filter: Arc>, + unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, + ) -> anyhow::Result>> { + let builder = self.0.lock().unwrap().clone(); + builder + .build_polling( + chain, + deployment, + start_blocks, + source_subgraph_stores, + subgraph_current_block, + filter, + unified_api_version, + ) + .await + } +} diff --git a/gnd/src/commands/test/noop.rs b/gnd/src/commands/test/noop.rs new file mode 100644 index 00000000000..897eff092fd --- /dev/null +++ b/gnd/src/commands/test/noop.rs @@ -0,0 +1,172 @@ +//! Noop/stub trait implementations for the mock `Chain`. +//! +//! These types satisfy the trait bounds required by the `Chain` constructor +//! but are never called during normal test execution because: +//! - Triggers are provided directly via `StaticStreamBuilder` (no scanning needed) +//! - Runtime host functions (eth_call etc.) are not available in mock tests +//! (subgraphs that use `ethereum.call()` will fail — this is a known V1 limitation) + +use async_trait::async_trait; +use graph::blockchain::block_stream::{BlockRefetcher, BlockWithTriggers, FirehoseCursor}; +use graph::blockchain::{ + BlockPtr, Blockchain, ChainIdentifier, RuntimeAdapter as RuntimeAdapterTrait, TriggersAdapter, + TriggersAdapterSelector, +}; +use graph::components::store::{DeploymentLocator, EthereumCallCache}; +use graph::data_source::DataSource; +use graph::prelude::{BlockHash, BlockNumber, Error}; +use graph::slog::{Discard, Logger}; +use graph_chain_ethereum::chain::RuntimeAdapterBuilder; +use graph_chain_ethereum::network::EthereumNetworkAdapters; +use graph_chain_ethereum::Chain; +use std::collections::BTreeSet; +use std::marker::PhantomData; +use std::sync::Arc; + +use graph::slog::o; + +// ============ Block Refetcher ============ + +/// Block refetcher that never refetches. +/// +/// In production, block refetching handles reorgs by re-fetching blocks from +/// the chain. In tests, all blocks are pre-defined and there are no reorgs, +/// so this is a noop. `required()` returns false so it's never called. +pub(super) struct StaticBlockRefetcher { + pub _phantom: PhantomData, +} + +#[async_trait] +impl BlockRefetcher for StaticBlockRefetcher { + fn required(&self, _chain: &C) -> bool { + false + } + + async fn get_block( + &self, + _chain: &C, + _logger: &Logger, + _cursor: FirehoseCursor, + ) -> Result { + unimplemented!("StaticBlockRefetcher should never be called") + } +} + +// ============ Runtime Adapters ============ + +/// Returns empty host functions — chain-specific runtime extensions +/// (like eth_call) are not available in mock tests. +struct NoopRuntimeAdapter { + _phantom: PhantomData, +} + +impl RuntimeAdapterTrait for NoopRuntimeAdapter { + fn host_fns(&self, _ds: &DataSource) -> Result, Error> { + Ok(vec![]) + } +} + +/// Builds `NoopRuntimeAdapter` instances for the Chain constructor. +pub(super) struct NoopRuntimeAdapterBuilder; + +impl RuntimeAdapterBuilder for NoopRuntimeAdapterBuilder { + fn build( + &self, + _eth_adapters: Arc, + _call_cache: Arc, + _chain_identifier: Arc, + ) -> Arc> { + Arc::new(NoopRuntimeAdapter { + _phantom: PhantomData, + }) + } +} + +// ============ Triggers Adapters ============ + +/// Always returns `NoopTriggersAdapter` regardless of deployment or capabilities. +pub(super) struct NoopAdapterSelector { + pub _phantom: PhantomData, +} + +impl TriggersAdapterSelector for NoopAdapterSelector { + fn triggers_adapter( + &self, + _loc: &DeploymentLocator, + _capabilities: &::NodeCapabilities, + _unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, + ) -> Result>, Error> { + Ok(Arc::new(NoopTriggersAdapter { + _phantom: PhantomData, + })) + } +} + +/// A triggers adapter that returns empty/default results for all methods. +/// +/// Since we feed pre-built triggers via `StaticStreamBuilder`, the adapter's +/// scanning and fetching methods are never called during normal test execution. +/// The methods that are called (like `parent_ptr` for chain traversal) return +/// sensible defaults. +struct NoopTriggersAdapter { + _phantom: PhantomData, +} + +#[async_trait] +impl TriggersAdapter for NoopTriggersAdapter { + async fn ancestor_block( + &self, + _ptr: BlockPtr, + _offset: BlockNumber, + _root: Option, + ) -> Result::Block>, Error> { + Ok(None) + } + + async fn load_block_ptrs_by_numbers( + &self, + _logger: Logger, + _block_numbers: BTreeSet, + ) -> Result, Error> { + Ok(vec![]) + } + + async fn chain_head_ptr(&self) -> Result, Error> { + Ok(None) + } + + async fn scan_triggers( + &self, + _from: BlockNumber, + _to: BlockNumber, + _filter: &C::TriggerFilter, + ) -> Result<(Vec>, BlockNumber), Error> { + Ok((vec![], 0)) + } + + async fn triggers_in_block( + &self, + _logger: &Logger, + block: ::Block, + _filter: &::TriggerFilter, + ) -> Result, Error> { + let logger = Logger::root(Discard, o!()); + Ok(BlockWithTriggers::new(block, Vec::new(), &logger)) + } + + async fn is_on_main_chain(&self, _ptr: BlockPtr) -> Result { + Ok(true) + } + + /// Returns a synthetic parent pointer for chain traversal. + /// Block 0 has no parent; all others point to block N-1 with a default hash. + async fn parent_ptr(&self, block: &BlockPtr) -> Result, Error> { + match block.number { + 0 => Ok(None), + n => Ok(Some(BlockPtr { + hash: BlockHash::default(), + number: n - 1, + })), + } + } +} From f29050d25f89a1049ff28fb97c40d9dcb2644210 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Wed, 11 Feb 2026 13:07:34 +0200 Subject: [PATCH 07/34] gnd(test): Add eth_call mocking and refactor test output --- gnd/Cargo.toml | 2 +- gnd/src/commands/test/assertion.rs | 141 ++++++++++++-- gnd/src/commands/test/eth_calls.rs | 294 +++++++++++++++++++++++++++++ gnd/src/commands/test/mod.rs | 13 +- gnd/src/commands/test/noop.rs | 45 +---- gnd/src/commands/test/output.rs | 126 ++++++++++--- gnd/src/commands/test/runner.rs | 85 +++++++-- gnd/src/commands/test/schema.rs | 75 +++++++- gnd/src/commands/test/trigger.rs | 2 +- 9 files changed, 673 insertions(+), 110 deletions(-) create mode 100644 gnd/src/commands/test/eth_calls.rs diff --git a/gnd/Cargo.toml b/gnd/Cargo.toml index fd60a5ec638..5d6195d67fa 100644 --- a/gnd/Cargo.toml +++ b/gnd/Cargo.toml @@ -56,6 +56,7 @@ thiserror = { workspace = true } # Console output indicatif = "0.18" console = "0.16" +similar = "2" # Code generation graphql-tools = { workspace = true } @@ -85,4 +86,3 @@ pgtemp = { git = "https://github.com/graphprotocol/pgtemp", branch = "initdb-arg [dev-dependencies] tempfile = "3" walkdir = "2" -similar = "2" diff --git a/gnd/src/commands/test/assertion.rs b/gnd/src/commands/test/assertion.rs index 9a0bbd19f6e..a24e8c09d4a 100644 --- a/gnd/src/commands/test/assertion.rs +++ b/gnd/src/commands/test/assertion.rs @@ -5,7 +5,7 @@ //! from the test file. use super::runner::TestContext; -use super::schema::{Assertion, AssertionFailure, TestResult}; +use super::schema::{Assertion, AssertionFailure, AssertionOutcome, TestResult}; use anyhow::{anyhow, Result}; use graph::data::query::{Query, QueryResults, QueryTarget}; use graph::prelude::{q, r, ApiVersion, GraphQlRunner as GraphQlRunnerTrait}; @@ -18,29 +18,39 @@ pub(super) async fn run_assertions( ctx: &TestContext, assertions: &[Assertion], ) -> Result { - let mut failures = Vec::new(); + let mut outcomes = Vec::new(); + let mut has_failure = false; for assertion in assertions { match run_single_assertion(ctx, assertion).await { - Ok(None) => {} // Passed - Ok(Some(failure)) => failures.push(failure), + Ok(None) => { + outcomes.push(AssertionOutcome::Passed { + query: assertion.query.clone(), + }); + } + Ok(Some(failure)) => { + has_failure = true; + outcomes.push(AssertionOutcome::Failed(failure)); + } Err(e) => { - // Query execution error — record as a failure with the error message. - failures.push(AssertionFailure { + has_failure = true; + outcomes.push(AssertionOutcome::Failed(AssertionFailure { query: assertion.query.clone(), expected: assertion.expected.clone(), actual: serde_json::json!({ "error": e.to_string() }), - }); + })); } } } - if failures.is_empty() { - Ok(TestResult::Passed) - } else { + if has_failure { Ok(TestResult::Failed { handler_error: None, - assertion_failures: failures, + assertions: outcomes, + }) + } else { + Ok(TestResult::Passed { + assertions: outcomes, }) } } @@ -115,6 +125,98 @@ fn r_value_to_json(value: &r::Value) -> serde_json::Value { } } +/// Reorder `actual` arrays to align with `expected`'s element ordering. +/// +/// When a test fails, the raw diff can be misleading if array elements appear +/// in a different order — every line shows as changed even if only one field +/// differs. This function reorders `actual` so that elements are paired with +/// their closest match in `expected`, producing a diff that highlights only +/// real value differences. +pub(super) fn align_for_diff( + expected: &serde_json::Value, + actual: &serde_json::Value, +) -> serde_json::Value { + match (expected, actual) { + (serde_json::Value::Array(exp), serde_json::Value::Array(act)) => { + let mut used = vec![false; act.len()]; + let mut aligned = Vec::with_capacity(exp.len().max(act.len())); + + // For each expected element, find the most similar actual element. + for exp_elem in exp { + let best = act + .iter() + .enumerate() + .filter(|(i, _)| !used[*i]) + .max_by_key(|(_, a)| json_similarity(exp_elem, a)); + + if let Some((idx, _)) = best { + used[idx] = true; + aligned.push(align_for_diff(exp_elem, &act[idx])); + } + } + + // Append any unmatched actual elements at the end. + for (i, elem) in act.iter().enumerate() { + if !used[i] { + aligned.push(elem.clone()); + } + } + + serde_json::Value::Array(aligned) + } + (serde_json::Value::Object(exp), serde_json::Value::Object(act)) => { + // Recurse into matching keys. + let aligned: serde_json::Map = act + .iter() + .map(|(k, v)| { + let aligned_v = if let Some(exp_v) = exp.get(k) { + align_for_diff(exp_v, v) + } else { + v.clone() + }; + (k.clone(), aligned_v) + }) + .collect(); + serde_json::Value::Object(aligned) + } + _ => actual.clone(), + } +} + +/// Score how similar two JSON values are (higher = more similar). +/// +/// For objects, counts matching key-value pairs with heavy weight on `id` +/// (the most common GraphQL entity identifier). Returns 0 for non-matching +/// leaf values. +/// +/// Note: Both this and `json_equal`'s array arm are O(n²). This is fine for +/// realistic test sizes (<1000 entities). If needed, an O(n) fast-path +/// could pre-match elements by `id` field via HashMap before falling back +/// to the similarity scan. +fn json_similarity(a: &serde_json::Value, b: &serde_json::Value) -> usize { + match (a, b) { + (serde_json::Value::Object(a_obj), serde_json::Value::Object(b_obj)) => { + let mut score = 0; + for (k, v) in a_obj { + if let Some(bv) = b_obj.get(k) { + if json_equal(v, bv) { + // `id` match is a strong signal for entity identity. + score += if k == "id" { 100 } else { 1 }; + } + } + } + score + } + _ => { + if json_equal(a, b) { + 1 + } else { + 0 + } + } + } +} + /// Compare two JSON values for equality (ignoring key ordering in objects). /// /// Also handles string-vs-number coercion: GraphQL returns `BigInt` and @@ -132,7 +234,22 @@ fn json_equal(a: &serde_json::Value, b: &serde_json::Value) -> bool { (serde_json::Value::String(s), serde_json::Value::Number(n)) | (serde_json::Value::Number(n), serde_json::Value::String(s)) => s == &n.to_string(), (serde_json::Value::Array(a), serde_json::Value::Array(b)) => { - a.len() == b.len() && a.iter().zip(b.iter()).all(|(a, b)| json_equal(a, b)) + if a.len() != b.len() { + return false; + } + // Order-insensitive comparison: each element in `a` must match + // exactly one unmatched element in `b`. This handles GraphQL + // collection queries where entity ordering is non-deterministic. + let mut used = vec![false; b.len()]; + a.iter().all(|a_elem| { + for (i, b_elem) in b.iter().enumerate() { + if !used[i] && json_equal(a_elem, b_elem) { + used[i] = true; + return true; + } + } + false + }) } (serde_json::Value::Object(a), serde_json::Value::Object(b)) => { a.len() == b.len() diff --git a/gnd/src/commands/test/eth_calls.rs b/gnd/src/commands/test/eth_calls.rs new file mode 100644 index 00000000000..97ca3ad0a5d --- /dev/null +++ b/gnd/src/commands/test/eth_calls.rs @@ -0,0 +1,294 @@ +//! Mock eth_call cache population for `gnd test`. +//! +//! When a subgraph handler executes `ethereum.call()`, graph-node looks up +//! the result in its call cache (PostgreSQL `eth_call_cache` table). By +//! pre-populating this cache with mock responses before indexing starts, +//! tests can control what contract calls return without a real Ethereum node. +//! +//! ## Encoding +//! +//! The cache key is derived from the contract address, the ABI-encoded call +//! data (4-byte selector + encoded parameters), and the block pointer. This +//! module encodes call data using the same `FunctionExt::abi_encode_input()` +//! method that graph-node uses in production (`ethereum_adapter.rs`), ensuring +//! cache IDs match exactly. +//! +//! ## Function signature format +//! +//! Function signatures follow the graph-node convention: +//! ```text +//! functionName(inputTypes):(outputTypes) +//! ``` +//! Examples: +//! - `"balanceOf(address):(uint256)"` +//! - `"getReserves():(uint112,uint112,uint32)"` +//! - `"symbol():(string)"` +//! +//! The colon-separated output syntax is converted internally to alloy's +//! `"returns"` syntax for parsing. + +use super::schema::{MockEthCall, TestFile}; +use super::trigger::json_to_sol_value; +use anyhow::{anyhow, Context, Result}; +use graph::abi::FunctionExt as GraphFunctionExt; +use graph::blockchain::block_stream::BlockWithTriggers; +use graph::blockchain::BlockPtr; +use graph::components::store::EthereumCallCache; +use graph::data::store::ethereum::call; +use graph::prelude::alloy::dyn_abi::{DynSolType, FunctionExt as AlloyFunctionExt}; +use graph::prelude::alloy::json_abi::Function; +use graph::prelude::alloy::primitives::Address; +use graph::slog::Logger; +use graph_chain_ethereum::Chain; +use graph_store_postgres::ChainStore; +use std::sync::Arc; + +/// Parse a function signature and ABI-encode the call data (selector + params). +/// +/// Uses graph-node's `FunctionExt::abi_encode_input()` — the same encoding path +/// as production `ethereum_adapter.rs:1483-1487` — so the resulting call data +/// produces identical cache IDs. +/// +/// # Arguments +/// * `function_sig` - Function signature, e.g. `"balanceOf(address):(uint256)"` +/// * `params` - JSON values for each input parameter +/// +/// # Returns +/// Encoded call data: 4-byte selector followed by ABI-encoded parameters. +fn encode_function_call(function_sig: &str, params: &[serde_json::Value]) -> Result> { + let alloy_sig = to_alloy_signature(function_sig); + let function = Function::parse(&alloy_sig).map_err(|e| { + anyhow!( + "Failed to parse function signature '{}': {:?}", + function_sig, + e + ) + })?; + + let args: Vec<_> = params + .iter() + .zip(&function.inputs) + .map(|(json, param)| { + let sol_type: DynSolType = param + .ty + .parse() + .map_err(|e| anyhow!("Invalid type '{}': {:?}", param.ty, e))?; + json_to_sol_value(&sol_type, json) + }) + .collect::>>()?; + + GraphFunctionExt::abi_encode_input(&function, &args).context("Failed to encode function call") +} + +/// Parse function outputs from the signature and ABI-encode return values. +/// +/// Uses alloy's `JsonAbiExt::abi_encode_output()` which encodes the return +/// values without a selector prefix (just ABI-encoded parameters), matching +/// what an `eth_call` RPC response would contain. +/// +/// # Arguments +/// * `function_sig` - Function signature, e.g. `"balanceOf(address):(uint256)"` +/// * `returns` - JSON values for each output parameter +/// +/// # Returns +/// ABI-encoded return data (no selector prefix). +fn encode_return_value(function_sig: &str, returns: &[serde_json::Value]) -> Result> { + let alloy_sig = to_alloy_signature(function_sig); + let function = Function::parse(&alloy_sig).map_err(|e| { + anyhow!( + "Failed to parse function signature '{}': {:?}", + function_sig, + e + ) + })?; + + let output_values: Vec<_> = returns + .iter() + .zip(&function.outputs) + .map(|(json, param)| { + let sol_type: DynSolType = param + .ty + .parse() + .map_err(|e| anyhow!("Invalid type '{}': {:?}", param.ty, e))?; + json_to_sol_value(&sol_type, json) + }) + .collect::>>()?; + + AlloyFunctionExt::abi_encode_output(&function, &output_values) + .map_err(|e| anyhow!("Failed to encode return value: {}", e)) +} + +/// Convert a graph-node style function signature to alloy's expected format. +/// +/// Graph-node uses `name(inputs):(outputs)` while alloy expects +/// `name(inputs) returns (outputs)`. +/// +/// Examples: +/// - `"balanceOf(address):(uint256)"` → `"balanceOf(address) returns (uint256)"` +/// - `"name():(string)"` → `"name() returns (string)"` +/// - `"transfer(address,uint256)"` → `"transfer(address,uint256)"` (no change) +/// - `"balanceOf(address) returns (uint256)"` → unchanged (already alloy format) +fn to_alloy_signature(sig: &str) -> String { + // If it already contains "returns", assume alloy format. + if sig.contains(" returns ") { + return sig.to_string(); + } + + // Look for the "):(" pattern that separates inputs from outputs. + if let Some(pos) = sig.find("):(") { + let inputs = &sig[..=pos]; // "name(inputs)" + let outputs = &sig[pos + 2..]; // "(outputs)" + format!("{} returns {}", inputs, outputs) + } else { + sig.to_string() + } +} + +/// Populate the eth_call cache with mock call responses from test blocks. +/// +/// For each `MockEthCall` in the test file's blocks, this function: +/// 1. Parses the contract address +/// 2. Encodes the function call (selector + params) using the same encoding +/// as production graph-node +/// 3. Creates a `call::Request` matching what the runtime would generate +/// 4. Encodes the return value (or marks as revert) +/// 5. Inserts into the cache via `ChainStore::set_call()` +/// +/// The cache uses BLAKE3 hashing internally to compute cache IDs from the +/// request + block pointer, ensuring our mock entries are found by the same +/// lookup code that production uses. +pub async fn populate_eth_call_cache( + logger: &Logger, + chain_store: Arc, + blocks: &[BlockWithTriggers], + test_file: &TestFile, +) -> Result<()> { + for (block_data, test_block) in blocks.iter().zip(&test_file.blocks) { + let block_ptr = block_data.ptr(); + + for eth_call in &test_block.eth_calls { + populate_single_call(logger, chain_store.clone(), &block_ptr, eth_call).await?; + } + } + Ok(()) +} + +async fn populate_single_call( + logger: &Logger, + chain_store: Arc, + block_ptr: &BlockPtr, + eth_call: &MockEthCall, +) -> Result<()> { + let address: Address = eth_call + .address + .parse() + .with_context(|| format!("Invalid contract address: {}", eth_call.address))?; + + let encoded_call = + encode_function_call(ð_call.function, ð_call.params).with_context(|| { + format!( + "Failed to encode call for {}::{}", + eth_call.address, eth_call.function + ) + })?; + + let request = call::Request::new(address, encoded_call, 0); + + let retval = if eth_call.reverts { + call::Retval::Null + } else { + let encoded_return = encode_return_value(ð_call.function, ð_call.returns) + .with_context(|| { + format!( + "Failed to encode return value for {}::{}", + eth_call.address, eth_call.function + ) + })?; + call::Retval::Value(encoded_return.into()) + }; + + chain_store + .set_call(logger, request, block_ptr.clone(), retval) + .await + .with_context(|| { + format!( + "Failed to cache eth_call for {}::{}", + eth_call.address, eth_call.function + ) + })?; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_to_alloy_signature_with_colon() { + assert_eq!( + to_alloy_signature("balanceOf(address):(uint256)"), + "balanceOf(address) returns (uint256)" + ); + } + + #[test] + fn test_to_alloy_signature_multiple_outputs() { + assert_eq!( + to_alloy_signature("getReserves():(uint112,uint112,uint32)"), + "getReserves() returns (uint112,uint112,uint32)" + ); + } + + #[test] + fn test_to_alloy_signature_no_outputs() { + assert_eq!( + to_alloy_signature("transfer(address,uint256)"), + "transfer(address,uint256)" + ); + } + + #[test] + fn test_to_alloy_signature_already_alloy_format() { + assert_eq!( + to_alloy_signature("balanceOf(address) returns (uint256)"), + "balanceOf(address) returns (uint256)" + ); + } + + #[test] + fn test_encode_function_call_balanceof() { + let encoded = encode_function_call( + "balanceOf(address):(uint256)", + &[serde_json::json!( + "0x0000000000000000000000000000000000000001" + )], + ) + .unwrap(); + + // First 4 bytes should be the selector for balanceOf(address) + assert_eq!(&encoded[..4], &[0x70, 0xa0, 0x82, 0x31]); + // Total length: 4 (selector) + 32 (address param) = 36 + assert_eq!(encoded.len(), 36); + } + + #[test] + fn test_encode_return_value_uint256() { + let encoded = encode_return_value( + "balanceOf(address):(uint256)", + &[serde_json::json!("1000000000000000000")], + ) + .unwrap(); + + // ABI-encoded uint256 is 32 bytes (no selector) + assert_eq!(encoded.len(), 32); + } + + #[test] + fn test_encode_function_call_no_params() { + let encoded = encode_function_call("symbol():(string)", &[]).unwrap(); + + // Just the 4-byte selector + assert_eq!(encoded.len(), 4); + } +} diff --git a/gnd/src/commands/test/mod.rs b/gnd/src/commands/test/mod.rs index 61065adb310..c0a7fcbbf72 100644 --- a/gnd/src/commands/test/mod.rs +++ b/gnd/src/commands/test/mod.rs @@ -40,6 +40,7 @@ mod assertion; mod block_stream; +mod eth_calls; mod mock_chain; mod noop; mod output; @@ -52,8 +53,6 @@ use clap::Parser; use console::style; use std::path::PathBuf; -pub use schema::TestResult; - use crate::output::{step, Step}; #[derive(Clone, Debug, Parser)] @@ -152,6 +151,7 @@ pub async fn run_test(opt: TestOpt) -> Result<()> { let mut passed = 0; let mut failed = 0; + let mut all_failures = Vec::new(); for path in test_files { output::print_test_start(&path); @@ -171,9 +171,11 @@ pub async fn run_test(opt: TestOpt) -> Result<()> { match runner::run_single_test(&opt, &test_file).await { Ok(result) => { output::print_test_result(&test_file.name, &result); - match result { - TestResult::Passed => passed += 1, - TestResult::Failed { .. } => failed += 1, + if result.is_passed() { + passed += 1; + } else { + all_failures.extend(output::collect_failures(&test_file.name, &result)); + failed += 1; } } Err(e) => { @@ -183,6 +185,7 @@ pub async fn run_test(opt: TestOpt) -> Result<()> { } } + output::print_failure_details(&all_failures); output::print_summary(passed, failed); if failed > 0 { diff --git a/gnd/src/commands/test/noop.rs b/gnd/src/commands/test/noop.rs index 897eff092fd..70faa0525b5 100644 --- a/gnd/src/commands/test/noop.rs +++ b/gnd/src/commands/test/noop.rs @@ -3,22 +3,15 @@ //! These types satisfy the trait bounds required by the `Chain` constructor //! but are never called during normal test execution because: //! - Triggers are provided directly via `StaticStreamBuilder` (no scanning needed) -//! - Runtime host functions (eth_call etc.) are not available in mock tests -//! (subgraphs that use `ethereum.call()` will fail — this is a known V1 limitation) +//! - The real `EthereumRuntimeAdapterBuilder` is used for host functions +//! (ethereum.call, ethereum.getBalance, ethereum.hasCode), backed by the call cache use async_trait::async_trait; use graph::blockchain::block_stream::{BlockRefetcher, BlockWithTriggers, FirehoseCursor}; -use graph::blockchain::{ - BlockPtr, Blockchain, ChainIdentifier, RuntimeAdapter as RuntimeAdapterTrait, TriggersAdapter, - TriggersAdapterSelector, -}; -use graph::components::store::{DeploymentLocator, EthereumCallCache}; -use graph::data_source::DataSource; +use graph::blockchain::{BlockPtr, Blockchain, TriggersAdapter, TriggersAdapterSelector}; +use graph::components::store::DeploymentLocator; use graph::prelude::{BlockHash, BlockNumber, Error}; use graph::slog::{Discard, Logger}; -use graph_chain_ethereum::chain::RuntimeAdapterBuilder; -use graph_chain_ethereum::network::EthereumNetworkAdapters; -use graph_chain_ethereum::Chain; use std::collections::BTreeSet; use std::marker::PhantomData; use std::sync::Arc; @@ -52,36 +45,6 @@ impl BlockRefetcher for StaticBlockRefetcher { } } -// ============ Runtime Adapters ============ - -/// Returns empty host functions — chain-specific runtime extensions -/// (like eth_call) are not available in mock tests. -struct NoopRuntimeAdapter { - _phantom: PhantomData, -} - -impl RuntimeAdapterTrait for NoopRuntimeAdapter { - fn host_fns(&self, _ds: &DataSource) -> Result, Error> { - Ok(vec![]) - } -} - -/// Builds `NoopRuntimeAdapter` instances for the Chain constructor. -pub(super) struct NoopRuntimeAdapterBuilder; - -impl RuntimeAdapterBuilder for NoopRuntimeAdapterBuilder { - fn build( - &self, - _eth_adapters: Arc, - _call_cache: Arc, - _chain_identifier: Arc, - ) -> Arc> { - Arc::new(NoopRuntimeAdapter { - _phantom: PhantomData, - }) - } -} - // ============ Triggers Adapters ============ /// Always returns `NoopTriggersAdapter` regardless of deployment or capabilities. diff --git a/gnd/src/commands/test/output.rs b/gnd/src/commands/test/output.rs index 2596048702b..82b8f19b8cc 100644 --- a/gnd/src/commands/test/output.rs +++ b/gnd/src/commands/test/output.rs @@ -1,11 +1,13 @@ //! Console output formatting for test results. //! -//! Formats test results with colored pass/fail indicators and detailed -//! assertion failure diffs showing expected vs actual JSON values. +//! Formats test results with colored pass/fail indicators per query and +//! detailed assertion failure diffs collected at the end of the run. use console::style; +use similar::{ChangeTag, TextDiff}; -use super::schema::{AssertionFailure, TestResult}; +use super::assertion::align_for_diff; +use super::schema::{AssertionFailure, AssertionOutcome, TestResult}; use crate::output::{step, Step}; /// Print the header line when starting a test file. @@ -13,40 +15,104 @@ pub fn print_test_start(path: &std::path::Path) { step(Step::Load, &format!("Running {}", path.display())); } -/// Print the result of a single test case (pass or fail with details). +/// Print the result of a single test case with per-query pass/fail indicators. +/// +/// Shows ✔/✘ for the test name, then ✔/✘ for each individual assertion query. +/// Detailed diffs are NOT printed here — they are collected and printed at the end +/// via [`print_failure_details`]. pub fn print_test_result(name: &str, result: &TestResult) { - match result { - TestResult::Passed => { - println!(" {} {}", style("✔").green(), name); - } - TestResult::Failed { - handler_error, - assertion_failures, - } => { - println!(" {} {}", style("✘").red(), name); - if let Some(err) = handler_error { - println!(" {} {}", style("Handler error:").red(), err); + if result.is_passed() { + println!(" {} {}", style("✔").green(), name); + } else { + println!(" {} {}", style("✘").red(), name); + } + + if let Some(err) = result.handler_error() { + println!(" {} {}", style("Handler error:").red(), err); + } + + for outcome in result.assertions() { + match outcome { + AssertionOutcome::Passed { query } => { + println!(" {} {}", style("✔").green(), style(query).dim()); } - for failure in assertion_failures { - print_assertion_failure(failure); + AssertionOutcome::Failed(failure) => { + println!(" {} {}", style("✘").red(), failure.query); } } } } -/// Print a detailed assertion failure showing query, expected, and actual values. -fn print_assertion_failure(failure: &AssertionFailure) { - println!(" {} {}", style("Query:").yellow(), failure.query); - println!( - " {} {}", - style("Expected:").green(), - serde_json::to_string_pretty(&failure.expected).unwrap_or_default() - ); - println!( - " {} {}", - style("Actual:").red(), - serde_json::to_string_pretty(&failure.actual).unwrap_or_default() - ); +/// Collected failure info for deferred output. +pub struct FailureDetail { + /// Name of the test that failed. + pub test_name: String, + /// The assertion failure details. + pub failure: AssertionFailure, +} + +/// Collect assertion failures from a test result for deferred display. +pub fn collect_failures(test_name: &str, result: &TestResult) -> Vec { + result + .assertions() + .iter() + .filter_map(|outcome| match outcome { + AssertionOutcome::Passed { .. } => None, + AssertionOutcome::Failed(failure) => Some(FailureDetail { + test_name: test_name.to_string(), + failure: AssertionFailure { + query: failure.query.clone(), + expected: failure.expected.clone(), + actual: failure.actual.clone(), + }, + }), + }) + .collect() +} + +/// Print all collected failure details at the end of the test run. +pub fn print_failure_details(details: &[FailureDetail]) { + if details.is_empty() { + return; + } + + println!(); + println!("{}", style("Failures:").red().bold()); + + for detail in details { + println!(); + println!( + " {} {} {}", + style("●").red(), + style(&detail.test_name).bold(), + style("→").dim(), + ); + println!(" {} {}", style("Query:").yellow(), detail.failure.query); + + let expected = serde_json::to_string_pretty(&detail.failure.expected).unwrap_or_default(); + // Align actual arrays to expected's element ordering so the diff + // highlights real value differences instead of showing every line + // as changed due to non-deterministic GraphQL collection ordering. + let aligned_actual = align_for_diff(&detail.failure.expected, &detail.failure.actual); + let actual = serde_json::to_string_pretty(&aligned_actual).unwrap_or_default(); + + println!( + " {} {} expected {} actual", + style("Diff:").yellow(), + style("(-)").green(), + style("(+)").red(), + ); + + let diff = TextDiff::from_lines(&expected, &actual); + for change in diff.iter_all_changes() { + let text = change.value().trim_end_matches('\n'); + match change.tag() { + ChangeTag::Delete => println!(" {}", style(format!("- {text}")).green()), + ChangeTag::Insert => println!(" {}", style(format!("+ {text}")).red()), + ChangeTag::Equal => println!(" {text}"), + } + } + } } /// Print the final summary line with total pass/fail counts. diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index c99f1316994..378798274cc 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -26,7 +26,7 @@ use super::assertion::run_assertions; use super::block_stream::{MutexBlockStreamBuilder, StaticStreamBuilder}; use super::mock_chain; -use super::noop::{NoopAdapterSelector, NoopRuntimeAdapterBuilder, StaticBlockRefetcher}; +use super::noop::{NoopAdapterSelector, StaticBlockRefetcher}; use super::schema::{TestFile, TestResult}; use super::trigger::build_blocks_with_triggers; use super::TestOpt; @@ -37,7 +37,7 @@ use graph::blockchain::{BlockPtr, BlockchainMap, ChainIdentifier}; use graph::cheap_clone::CheapClone; use graph::components::link_resolver::{ArweaveClient, FileLinkResolver}; use graph::components::metrics::MetricsRegistry; -use graph::components::network_provider::ChainName; +use graph::components::network_provider::{ChainName, ProviderCheckStrategy, ProviderManager}; use graph::components::store::DeploymentLocator; use graph::components::subgraph::{Settings, SubgraphInstanceManager as _}; use graph::data::graphql::load_manager::LoadManager; @@ -51,8 +51,11 @@ use graph::prelude::{ SubgraphStore as SubgraphStoreTrait, SubgraphVersionSwitchingMode, }; use graph::slog::{info, o, Drain, Logger, OwnedKVList, Record}; -use graph_chain_ethereum::network::EthereumNetworkAdapters; -use graph_chain_ethereum::Chain; +use graph_chain_ethereum::chain::EthereumRuntimeAdapterBuilder; +use graph_chain_ethereum::network::{EthereumNetworkAdapter, EthereumNetworkAdapters}; +use graph_chain_ethereum::{ + Chain, EthereumAdapter, NodeCapabilities, ProviderEthRpcMetrics, Transport, +}; use graph_core::polling_monitor::{arweave_service, ipfs_service}; use graph_graphql::prelude::GraphQlRunner; use graph_node::config::Config; @@ -171,7 +174,7 @@ pub async fn run_single_test(opt: &TestOpt, test_file: &TestFile) -> Result Result Result Result { - // Minimal graph-node config with one primary shard and one chain. - // The chain provider URL is a dummy — no real RPC calls are made. + // Minimal graph-node config: one primary shard, no chain providers. + // The chain→shard mapping defaults to "primary" in StoreBuilder::make_store, + // and we construct EthereumNetworkAdapters directly in setup_chain. let config_str = format!( r#" [store] @@ -365,14 +380,8 @@ indexers = [ "default" ] [chains] ingestor = "default" - -[chains.{}] -shard = "primary" -provider = [ - {{ label = "test", url = "http://localhost:1/", features = [] }} -] "#, - db_url, network_name + db_url ); let config = Config::from_str(&config_str, "default") @@ -458,7 +467,49 @@ async fn setup_chain( let static_block_stream = Arc::new(StaticStreamBuilder { chain: blocks }); let block_stream_builder = Arc::new(MutexBlockStreamBuilder(Mutex::new(static_block_stream))); - let eth_adapters = Arc::new(EthereumNetworkAdapters::empty_for_testing()); + // Create a dummy Ethereum adapter with archive capabilities. + // The adapter itself is never used for RPC — ethereum.call results come from + // the pre-populated call cache. But the RuntimeAdapter needs to resolve an + // adapter with matching capabilities before it can invoke the cache lookup. + let endpoint_metrics = Arc::new(EndpointMetrics::mock()); + let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); + let transport = Transport::new_rpc( + graph::url::Url::parse("http://0.0.0.0:0").unwrap(), + graph::http::HeaderMap::new(), + endpoint_metrics.clone(), + "", + ); + let dummy_adapter = Arc::new( + EthereumAdapter::new( + logger.clone(), + String::new(), + transport, + provider_metrics, + true, + false, + ) + .await, + ); + let adapter = EthereumNetworkAdapter::new( + endpoint_metrics, + NodeCapabilities { + archive: true, + traces: false, + }, + dummy_adapter, + SubgraphLimit::Unlimited, + ); + let provider_manager = ProviderManager::new( + logger.clone(), + vec![(stores.network_name.clone(), vec![adapter])], + ProviderCheckStrategy::MarkAsValid, + ); + let eth_adapters = Arc::new(EthereumNetworkAdapters::new( + stores.network_name.clone(), + provider_manager, + vec![], + None, + )); let chain = Chain::new( logger_factory, @@ -475,7 +526,7 @@ async fn setup_chain( Arc::new(NoopAdapterSelector { _phantom: PhantomData, }), - Arc::new(NoopRuntimeAdapterBuilder), + Arc::new(EthereumRuntimeAdapterBuilder {}), eth_adapters, graph::prelude::ENV_VARS.reorg_threshold(), graph::prelude::ENV_VARS.ingestor_polling_interval, diff --git a/gnd/src/commands/test/schema.rs b/gnd/src/commands/test/schema.rs index d651c0fa730..7853053c8f8 100644 --- a/gnd/src/commands/test/schema.rs +++ b/gnd/src/commands/test/schema.rs @@ -79,6 +79,12 @@ pub struct TestBlock { /// graph-node's trigger ordering (block start -> events by logIndex -> block end). #[serde(default)] pub triggers: Vec, + + /// Mock contract call responses for this specific block. + /// These are pre-cached in the database before the test runs so that + /// `ethereum.call()` invocations in handlers return the mocked values. + #[serde(default, rename = "ethCalls")] + pub eth_calls: Vec, } /// A trigger within a block. The `type` field determines the variant. @@ -140,6 +146,32 @@ pub struct LogTrigger { #[derive(Debug, Clone, Default, Deserialize)] pub struct BlockTrigger {} +/// A mock contract call response that will be pre-cached for a specific block. +/// +/// When a subgraph handler calls `ethereum.call()` during indexing, graph-node +/// looks up the result in its call cache. By pre-populating this cache with +/// mock responses, tests can control what contract calls return without needing +/// a real Ethereum node. +#[derive(Debug, Clone, Deserialize)] +pub struct MockEthCall { + /// Contract address to mock (checksummed or lowercase hex). + pub address: String, + + /// Function signature to mock. + /// Example: `"balanceOf(address):(uint256)"` + pub function: String, + + /// Input parameters for the function call. + pub params: Vec, + + /// Return values for the function call. + pub returns: Vec, + + /// If true, the call will revert instead of returning values. + #[serde(default)] + pub reverts: bool, +} + /// A GraphQL assertion to validate indexed entity state. #[derive(Debug, Clone, Deserialize)] #[allow(dead_code)] @@ -159,16 +191,53 @@ pub struct Assertion { #[derive(Debug)] pub enum TestResult { /// All assertions passed and no handler errors occurred. - Passed, + Passed { + /// Per-assertion outcomes (all passed). + assertions: Vec, + }, /// The test failed due to handler errors and/or assertion mismatches. Failed { /// If the subgraph handler threw a fatal error during indexing, /// this contains the error message. The test fails immediately /// without running assertions. handler_error: Option, - /// List of assertions where actual != expected. - assertion_failures: Vec, + /// Per-assertion outcomes (mix of passed and failed). + assertions: Vec, + }, +} + +impl TestResult { + pub fn is_passed(&self) -> bool { + matches!(self, TestResult::Passed { .. }) + } + + pub fn assertions(&self) -> &[AssertionOutcome] { + match self { + TestResult::Passed { assertions } | TestResult::Failed { assertions, .. } => assertions, + } + } + + pub fn handler_error(&self) -> Option<&str> { + match self { + TestResult::Failed { + handler_error: Some(e), + .. + } => Some(e), + _ => None, + } + } +} + +/// Outcome of a single assertion query. +#[derive(Debug)] +pub enum AssertionOutcome { + /// The assertion passed — actual matched expected. + Passed { + /// The GraphQL query that was executed. + query: String, }, + /// The assertion failed — actual did not match expected. + Failed(AssertionFailure), } /// Details about a single failed assertion. diff --git a/gnd/src/commands/test/trigger.rs b/gnd/src/commands/test/trigger.rs index 31cec3fb39b..6fb2c2e6494 100644 --- a/gnd/src/commands/test/trigger.rs +++ b/gnd/src/commands/test/trigger.rs @@ -225,7 +225,7 @@ pub fn encode_event_log( /// - `bytes`: hex string → dynamic byte array /// - `string`: JSON string /// - `bytes1`..`bytes32`: hex string → fixed-length byte array (right-zero-padded to 32 bytes) -fn json_to_sol_value(sol_type: &DynSolType, value: &serde_json::Value) -> Result { +pub fn json_to_sol_value(sol_type: &DynSolType, value: &serde_json::Value) -> Result { match sol_type { DynSolType::Address => { let s = value From f2f97b34f114ec04aba627977460f62cf7871d2c Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Thu, 12 Feb 2026 16:17:25 +0200 Subject: [PATCH 08/34] gnd(test): Add EIP-1559 base fee support and refactor block creation - Add baseFeePerGas field to TestBlock schema - Parse and apply base fee when creating test blocks - Replace graph-node helper functions with direct alloy types - Extract dummy_transaction creation into dedicated function - Use alloy Block::empty() constructor for cleaner block creation --- gnd/src/commands/test/schema.rs | 5 ++ gnd/src/commands/test/trigger.rs | 86 ++++++++++++++++++++++++-------- 2 files changed, 69 insertions(+), 22 deletions(-) diff --git a/gnd/src/commands/test/schema.rs b/gnd/src/commands/test/schema.rs index 7853053c8f8..ac092d2ef60 100644 --- a/gnd/src/commands/test/schema.rs +++ b/gnd/src/commands/test/schema.rs @@ -74,6 +74,11 @@ pub struct TestBlock { #[serde(default)] pub timestamp: Option, + /// Base fee per gas (EIP-1559). If omitted, defaults to None (pre-EIP-1559 blocks). + /// Specified as a decimal string to handle large values (e.g., "15000000000"). + #[serde(default, rename = "baseFeePerGas")] + pub base_fee_per_gas: Option, + /// Triggers within this block (log events, block events). /// Multiple triggers per block are supported and will be sorted by /// graph-node's trigger ordering (block start -> events by logIndex -> block end). diff --git a/gnd/src/commands/test/trigger.rs b/gnd/src/commands/test/trigger.rs index 6fb2c2e6494..7c4b95335aa 100644 --- a/gnd/src/commands/test/trigger.rs +++ b/gnd/src/commands/test/trigger.rs @@ -67,6 +67,14 @@ pub fn build_blocks_with_triggers( // Default timestamp simulates 12-second block times. let timestamp = test_block.timestamp.unwrap_or(number * 12); + // Parse base fee per gas if provided (EIP-1559 support). + let base_fee_per_gas = test_block + .base_fee_per_gas + .as_ref() + .map(|s| s.parse::()) + .transpose() + .context("Invalid baseFeePerGas value")?; + let mut triggers = Vec::new(); for (log_index, trigger) in test_block.triggers.iter().enumerate() { @@ -86,7 +94,14 @@ pub fn build_blocks_with_triggers( } } - let block = create_block_with_triggers(number, hash, parent_hash, timestamp, triggers)?; + let block = create_block_with_triggers( + number, + hash, + parent_hash, + timestamp, + base_fee_per_gas, + triggers, + )?; blocks.push(block); // Chain to next block. @@ -355,27 +370,50 @@ fn sol_value_to_topic(value: &DynSolValue) -> Result { } } -/// Create a `BlockWithTriggers` from block metadata and triggers. +/// Create a dummy transaction with a specific hash for block transaction lists. /// -/// Constructs a minimal but valid Ethereum block including: -/// - Block header with number, hash, parent_hash -/// - Dummy transactions for each unique tx hash referenced by log triggers -/// (graph-node requires matching transactions in the block body) -/// - The triggers themselves, which get sorted by graph-node's ordering logic +/// Graph-node looks up transactions by hash during log processing, so we need +/// matching dummy transactions in the block body. +fn dummy_transaction( + block_number: u64, + block_hash: B256, + transaction_index: u64, + transaction_hash: B256, +) -> graph::prelude::alloy::rpc::types::Transaction { + use graph::prelude::alloy::consensus::transaction::Recovered; + use graph::prelude::alloy::consensus::{Signed, TxEnvelope, TxLegacy}; + use graph::prelude::alloy::primitives::{Address, Signature, U256}; + use graph::prelude::alloy::rpc::types::Transaction; + + let signed = Signed::new_unchecked( + TxLegacy::default(), + Signature::new(U256::from(1), U256::from(1), false), + transaction_hash, + ); + + Transaction { + inner: Recovered::new_unchecked(TxEnvelope::Legacy(signed), Address::ZERO), + block_hash: Some(block_hash), + block_number: Some(block_number), + transaction_index: Some(transaction_index), + effective_gas_price: None, + } +} + +/// Create a `BlockWithTriggers` from block metadata and triggers. fn create_block_with_triggers( number: u64, hash: B256, parent_hash: B256, - _timestamp: u64, + timestamp: u64, + base_fee_per_gas: Option, triggers: Vec, ) -> Result> { - use graph::prelude::alloy::rpc::types::BlockTransactions; - use graph::prelude::{create_dummy_transaction, create_minimal_block_for_test}; + use graph::prelude::alloy::consensus::Header as ConsensusHeader; + use graph::prelude::alloy::rpc::types::{Block, BlockTransactions, Header}; use std::collections::HashSet; // Collect unique transaction hashes from log triggers. - // Graph-node looks up the transaction by hash during log processing, - // so we need corresponding dummy transactions in the block body. let mut tx_hashes: HashSet = HashSet::new(); for trigger in &triggers { if let EthereumTrigger::Log(LogRef::FullLog(log, _)) = trigger { @@ -388,22 +426,26 @@ fn create_block_with_triggers( let transactions: Vec<_> = tx_hashes .into_iter() .enumerate() - .map(|(idx, tx_hash)| create_dummy_transaction(number, hash, Some(idx as u64), tx_hash)) + .map(|(idx, tx_hash)| dummy_transaction(number, hash, idx as u64, tx_hash)) .collect(); - // Build a minimal block with our hash/parent_hash and attach transactions. - let alloy_block = create_minimal_block_for_test(number, hash) - .map_header(|mut header| { - header.inner.parent_hash = parent_hash; - header - }) - .with_transactions(BlockTransactions::Full(transactions)); + let alloy_block = Block::empty(Header { + hash, + inner: ConsensusHeader { + number, + parent_hash, + timestamp, + base_fee_per_gas, + ..Default::default() + }, + total_difficulty: None, + size: None, + }) + .with_transactions(BlockTransactions::Full(transactions)); let light_block = LightEthereumBlock::new(alloy_block.into()); let finality_block = BlockFinality::Final(Arc::new(light_block)); - // BlockWithTriggers::new automatically sorts triggers by graph-node's - // standard ordering (block start → events by logIndex → block end). Ok(BlockWithTriggers::new( finality_block, triggers, From fa2871878e2a6281af096892a668f5e8716b0c98 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Thu, 12 Feb 2026 16:17:38 +0200 Subject: [PATCH 09/34] gnd(test): Simplify test schema by auto-injecting block triggers - Rename 'triggers' field to 'events' in TestBlock - Remove TestTrigger enum and BlockTrigger type - Keep LogEvent as the only event type users specify - Auto-inject Start and End block triggers for every block - This ensures block handlers fire correctly without explicit config - Update docs to reflect that block triggers are automatic --- gnd/src/commands/test/schema.rs | 52 ++++++++++---------------------- gnd/src/commands/test/trigger.rs | 39 +++++++++++++----------- 2 files changed, 37 insertions(+), 54 deletions(-) diff --git a/gnd/src/commands/test/schema.rs b/gnd/src/commands/test/schema.rs index ac092d2ef60..9c8aa4857ea 100644 --- a/gnd/src/commands/test/schema.rs +++ b/gnd/src/commands/test/schema.rs @@ -1,8 +1,10 @@ //! JSON schema types for test files and result types. //! //! Test files are JSON documents that describe a sequence of mock blockchain -//! blocks with triggers (log events, block events) and GraphQL assertions to -//! validate the resulting entity state after indexing. +//! blocks with triggers (log events) and GraphQL assertions to validate the +//! resulting entity state after indexing. Block triggers are auto-injected +//! for every block (both `Start` and `End` types) so block handlers with any +//! filter (`once`, `polling`, or none) fire correctly without explicit config. //! //! ## Test file format //! @@ -12,9 +14,8 @@ //! "blocks": [ //! { //! "number": 1, -//! "triggers": [ +//! "events": [ //! { -//! "type": "log", //! "address": "0x1234...", //! "event": "Transfer(address indexed from, address indexed to, uint256 value)", //! "params": { "from": "0xaaaa...", "to": "0xbbbb...", "value": "1000" } @@ -56,7 +57,7 @@ pub struct TestFile { pub assertions: Vec, } -/// A mock blockchain block containing zero or more triggers. +/// A mock blockchain block containing zero or more events. #[derive(Debug, Clone, Deserialize)] pub struct TestBlock { /// Block number. If omitted, auto-increments starting from `start_block` @@ -79,11 +80,11 @@ pub struct TestBlock { #[serde(default, rename = "baseFeePerGas")] pub base_fee_per_gas: Option, - /// Triggers within this block (log events, block events). - /// Multiple triggers per block are supported and will be sorted by + /// Log events within this block. Block triggers are auto-injected. + /// Multiple events per block are supported and will be sorted by /// graph-node's trigger ordering (block start -> events by logIndex -> block end). #[serde(default)] - pub triggers: Vec, + pub events: Vec, /// Mock contract call responses for this specific block. /// These are pre-cached in the database before the test runs so that @@ -92,33 +93,18 @@ pub struct TestBlock { pub eth_calls: Vec, } -/// A trigger within a block. The `type` field determines the variant. -/// -/// JSON example for a log trigger: -/// ```json -/// { "type": "log", "address": "0x...", "event": "Transfer(...)", "params": {...} } -/// ``` -/// -/// JSON example for a block trigger: -/// ```json -/// { "type": "block" } -/// ``` -#[derive(Debug, Clone, Deserialize)] -#[serde(tag = "type", rename_all = "lowercase")] -pub enum TestTrigger { - /// An Ethereum log (event) trigger. This is the most common trigger type. - Log(LogTrigger), - /// A block-level trigger that fires at the end of block processing. - Block(BlockTrigger), -} - -/// A mock Ethereum event log trigger. +/// A mock Ethereum event log. /// /// The event signature is parsed and parameters are ABI-encoded into the /// proper topics (indexed params) and data (non-indexed params) format /// that graph-node expects. +/// +/// JSON example: +/// ```json +/// { "address": "0x...", "event": "Transfer(...)", "params": {...} } +/// ``` #[derive(Debug, Clone, Deserialize)] -pub struct LogTrigger { +pub struct LogEvent { /// Contract address that emitted the event (checksummed or lowercase hex). pub address: String, @@ -145,12 +131,6 @@ pub struct LogTrigger { pub tx_hash: Option, } -/// A block-level trigger. Fires as `EthereumBlockTriggerType::End`, -/// meaning it runs after all event handlers in the block. -/// No additional fields needed — the block data comes from the parent TestBlock. -#[derive(Debug, Clone, Default, Deserialize)] -pub struct BlockTrigger {} - /// A mock contract call response that will be pre-cached for a specific block. /// /// When a subgraph handler calls `ethereum.call()` during indexing, graph-node diff --git a/gnd/src/commands/test/trigger.rs b/gnd/src/commands/test/trigger.rs index 7c4b95335aa..235ce7c0bb5 100644 --- a/gnd/src/commands/test/trigger.rs +++ b/gnd/src/commands/test/trigger.rs @@ -20,9 +20,10 @@ //! - A `LightEthereumBlock` with proper parent hash chaining //! - Dummy transactions for each unique tx hash (graph-node requires //! matching transactions in the block for log processing) -//! - `EthereumTrigger` variants for each trigger in the test JSON +//! - `EthereumTrigger` variants for each log trigger in the test JSON +//! - Auto-injected `Start` and `End` block triggers (so block handlers fire correctly) -use super::schema::{LogTrigger, TestFile, TestTrigger}; +use super::schema::{LogEvent, TestFile}; use anyhow::{anyhow, Context, Result}; use graph::blockchain::block_stream::BlockWithTriggers; use graph::prelude::alloy::dyn_abi::{DynSolType, DynSolValue}; @@ -77,23 +78,25 @@ pub fn build_blocks_with_triggers( let mut triggers = Vec::new(); - for (log_index, trigger) in test_block.triggers.iter().enumerate() { - match trigger { - TestTrigger::Log(log_trigger) => { - let eth_trigger = - build_log_trigger(number, hash, log_index as u64, log_trigger)?; - triggers.push(eth_trigger); - } - TestTrigger::Block(_) => { - // Block triggers fire at block end, after all event handlers. - triggers.push(EthereumTrigger::Block( - BlockPtr::new(hash.into(), number as i32), - EthereumBlockTriggerType::End, - )); - } - } + for (log_index, log_event) in test_block.events.iter().enumerate() { + let eth_trigger = build_log_trigger(number, hash, log_index as u64, log_event)?; + triggers.push(eth_trigger); } + // Auto-inject block triggers for every block so that block handlers + // with any filter fire correctly: + // - Start: matches `once` handlers (at start_block) and initialization handlers + // - End: matches unfiltered and `polling` handlers + let block_ptr = BlockPtr::new(hash.into(), number as i32); + triggers.push(EthereumTrigger::Block( + block_ptr.clone(), + EthereumBlockTriggerType::Start, + )); + triggers.push(EthereumTrigger::Block( + block_ptr, + EthereumBlockTriggerType::End, + )); + let block = create_block_with_triggers( number, hash, @@ -122,7 +125,7 @@ fn build_log_trigger( block_number: u64, block_hash: B256, log_index: u64, - trigger: &LogTrigger, + trigger: &LogEvent, ) -> Result { let address: Address = trigger .address From cfde57ec32fef4a82d51c11bb9359ec2a58c9434 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Thu, 12 Feb 2026 16:17:46 +0200 Subject: [PATCH 10/34] gnd(test): Add support for manifests with startBlock > 0 - Extract min startBlock from manifest in extract_start_block_from_manifest() - Use startBlock as default test block numbering base - Create start_block_override to bypass on-chain validation - Pass override through setup_context() to SubgraphRegistrar - This allows testing subgraphs that specify startBlock without needing a real chain --- gnd/src/commands/test/runner.rs | 66 ++++++++++++++++++++++++++++++--- 1 file changed, 61 insertions(+), 5 deletions(-) diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index 378798274cc..43a7a5a5a5a 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -160,6 +160,38 @@ fn extract_network_from_manifest(manifest_path: &Path) -> Result { Ok(network) } +/// Extract the minimum `startBlock` across all data sources in a manifest. +/// +/// When a manifest specifies `startBlock` on its data sources, graph-node +/// normally validates that the block exists on-chain during deployment. +/// In tests there is no real chain, so the caller uses this value to build +/// a `start_block_override` that bypasses validation. +/// +/// Returns 0 if no data source specifies a `startBlock`. +fn extract_start_block_from_manifest(manifest_path: &Path) -> Result { + let content = std::fs::read_to_string(manifest_path) + .with_context(|| format!("Failed to read manifest: {}", manifest_path.display()))?; + let manifest: serde_yaml::Value = serde_yaml::from_str(&content) + .with_context(|| format!("Failed to parse manifest: {}", manifest_path.display()))?; + + let min_start_block = manifest + .get("dataSources") + .and_then(|ds| ds.as_sequence()) + .map(|seq| { + seq.iter() + .filter_map(|ds| { + ds.get("source") + .and_then(|s| s.get("startBlock")) + .and_then(|b| b.as_u64()) + }) + .min() + .unwrap_or(0) + }) + .unwrap_or(0); + + Ok(min_start_block) +} + /// Run a single test file end-to-end. /// /// This is the main entry point called from `mod.rs` for each test file. @@ -169,11 +201,8 @@ fn extract_network_from_manifest(manifest_path: &Path) -> Result { /// Returns `TestResult::Passed` if all assertions match, or `TestResult::Failed` /// with details about handler errors or assertion mismatches. pub async fn run_single_test(opt: &TestOpt, test_file: &TestFile) -> Result { - // Convert test JSON blocks into graph-node's internal block format. - let blocks = build_blocks_with_triggers(test_file, 1)?; - // Empty test with no blocks and no assertions is trivially passing. - if blocks.is_empty() && test_file.assertions.is_empty() { + if test_file.blocks.is_empty() && test_file.assertions.is_empty() { return Ok(TestResult::Passed { assertions: vec![] }); } @@ -205,6 +234,30 @@ pub async fn run_single_test(opt: &TestOpt, test_file: &TestFile) -> Result 0, + // graph-node normally validates the block exists on-chain, but our test + // environment has no real chain. We provide a start_block_override to + // bypass validation, and also default test block numbering to start at + // the manifest's startBlock so blocks land in the indexed range. + let min_start_block = extract_start_block_from_manifest(&built_manifest_path)?; + + // Convert test JSON blocks into graph-node's internal block format. + // Default block numbering starts at the manifest's startBlock so that + // test blocks without explicit numbers fall in the subgraph's indexed range. + let blocks = build_blocks_with_triggers(test_file, min_start_block)?; + + // Build a start_block_override when startBlock > 0 to bypass on-chain + // block validation (which would fail against the dummy firehose endpoint). + // This mirrors what resolve_start_block() computes: a BlockPtr for + // block (min_start_block - 1). + let start_block_override = if min_start_block > 0 { + use graph::prelude::alloy::primitives::keccak256; + let hash = keccak256((min_start_block - 1).to_be_bytes()); + Some(BlockPtr::new(hash.into(), (min_start_block - 1) as i32)) + } else { + None + }; + // Create a temporary database for this test. The `_temp_db` handle must // be kept alive for the duration of the test — dropping it destroys the database. let (db_url, _temp_db) = get_database_url(opt, &build_dir)?; @@ -245,6 +298,7 @@ pub async fn run_single_test(opt: &TestOpt, test_file: &TestFile) -> Result, ) -> Result { let env_vars = Arc::new(EnvVars::from_env().unwrap_or_default()); let mock_registry = Arc::new(MetricsRegistry::mock()); @@ -666,13 +721,14 @@ async fn setup_context( SubgraphRegistrar::create_subgraph(subgraph_registrar.as_ref(), subgraph_name.clone()).await?; // Deploy the subgraph version (loads manifest, compiles WASM, creates schema tables). + // start_block_override bypasses on-chain block validation when startBlock > 0. let deployment = SubgraphRegistrar::create_subgraph_version( subgraph_registrar.as_ref(), subgraph_name.clone(), hash.clone(), node_id.clone(), None, - None, + start_block_override, None, None, false, From f8414737d0088bb5a8c97ecc4b0ab1d933c44181 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Fri, 13 Feb 2026 00:01:28 +0200 Subject: [PATCH 11/34] gnd(test): Fix issues after rebase Signed-off-by: Maksim Dimitrov --- Cargo.lock | 4 ++++ gnd/Cargo.toml | 1 - gnd/src/commands/test/runner.rs | 8 ++++---- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 77ecb11e1e8..fb4e71112c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3645,6 +3645,7 @@ version = "0.41.2" dependencies = [ "Inflector", "anyhow", + "async-trait", "clap", "clap_complete", "console 0.16.2", @@ -3654,8 +3655,11 @@ dependencies = [ "graph", "graph-chain-ethereum", "graph-core", + "graph-graphql", "graph-node", + "graph-store-postgres", "graphql-tools", + "hex", "indicatif", "inquire", "lazy_static", diff --git a/gnd/Cargo.toml b/gnd/Cargo.toml index 5d6195d67fa..69137b6eb11 100644 --- a/gnd/Cargo.toml +++ b/gnd/Cargo.toml @@ -21,7 +21,6 @@ graph = { path = "../graph" } graph-chain-ethereum = { path = "../chain/ethereum" } graph-core = { path = "../core" } graph-node = { path = "../node" } -graph-chain-ethereum = { path = "../chain/ethereum" } graph-graphql = { path = "../graphql" } graph-store-postgres = { path = "../store/postgres" } diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index 43a7a5a5a5a..ad1e0898bad 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -532,6 +532,8 @@ async fn setup_chain( graph::http::HeaderMap::new(), endpoint_metrics.clone(), "", + false, // no_eip2718 + graph_chain_ethereum::Compression::None, ); let dummy_adapter = Arc::new( EthereumAdapter::new( @@ -755,10 +757,8 @@ async fn cleanup( ) -> Result<()> { let locators = SubgraphStoreTrait::locators(subgraph_store, hash).await?; - match subgraph_store.remove_subgraph(name.clone()).await { - Ok(_) | Err(graph::prelude::StoreError::SubgraphNotFound(_)) => {} - Err(e) => return Err(e.into()), - } + // Ignore errors - the subgraph might not exist on first run + let _ = subgraph_store.remove_subgraph(name.clone()).await; for locator in locators { subgraph_store.remove_deployment(locator.id.into()).await?; From c84b6e63d76080b5466c197c2abf9e8c52a9fa05 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Fri, 13 Feb 2026 13:00:05 +0200 Subject: [PATCH 12/34] gnd(test): Fix matchstick path Signed-off-by: Maksim Dimitrov --- gnd/src/commands/test/matchstick.rs | 443 ++++++++++++++++++++++++++++ gnd/src/commands/test/mod.rs | 217 +------------- 2 files changed, 448 insertions(+), 212 deletions(-) create mode 100644 gnd/src/commands/test/matchstick.rs diff --git a/gnd/src/commands/test/matchstick.rs b/gnd/src/commands/test/matchstick.rs new file mode 100644 index 00000000000..c8fec7902bd --- /dev/null +++ b/gnd/src/commands/test/matchstick.rs @@ -0,0 +1,443 @@ +//! Backward-compatible Matchstick test runner (legacy mode). +//! +//! Dispatches to Docker mode or binary mode depending on the `--docker` flag. +//! This is the legacy path for projects that haven't migrated to the new +//! JSON-based test format yet. + +use anyhow::{anyhow, Context, Result}; +use serde::{Deserialize, Serialize}; +use std::path::{Path, PathBuf}; +use std::time::{SystemTime, UNIX_EPOCH}; + +use crate::output::{step, Step}; + +use super::TestOpt; + +const MATCHSTICK_GITHUB_RELEASES: &str = + "https://api.github.com/repos/LimeChain/matchstick/releases/latest"; +const MATCHSTICK_DOWNLOAD_BASE: &str = "https://github.com/LimeChain/matchstick/releases/download"; +const MATCHSTICK_FALLBACK_VERSION: &str = "0.6.0"; +const VERSION_CACHE_TTL_SECS: u64 = 86400; // 24 hours + +/// Cached version info written to `{test_dir}/.latest.json`. +#[derive(Serialize, Deserialize)] +struct VersionCache { + version: String, + timestamp: u64, +} + +/// Entry point for the legacy Matchstick test runner. +/// +/// Dispatches to Docker mode or binary mode depending on the `--docker` flag. +pub(super) async fn run(opt: &TestOpt) -> Result<()> { + if opt.docker { + run_docker_tests(opt).await + } else { + run_binary_tests(opt).await + } +} + +// --------------------------------------------------------------------------- +// Version resolution +// --------------------------------------------------------------------------- + +/// Resolve the Matchstick version to use. +/// +/// Priority: CLI flag → cached `.latest.json` (24h TTL) → GitHub API → fallback. +async fn resolve_matchstick_version( + explicit_version: Option<&str>, + cache_dir: &Path, +) -> Result { + if let Some(v) = explicit_version { + return Ok(v.to_string()); + } + + let cache_path = cache_dir.join(".latest.json"); + + if let Some(cached) = read_version_cache(&cache_path) { + return Ok(cached); + } + + step(Step::Load, "Fetching latest Matchstick version"); + match fetch_latest_version().await { + Ok(version) => { + let _ = write_version_cache(&cache_path, &version); + Ok(version) + } + Err(e) => { + step( + Step::Warn, + &format!( + "Failed to fetch latest version ({}), using {}", + e, MATCHSTICK_FALLBACK_VERSION + ), + ); + Ok(MATCHSTICK_FALLBACK_VERSION.to_string()) + } + } +} + +/// Fetch the latest release tag from the Matchstick GitHub repo. +async fn fetch_latest_version() -> Result { + let client = reqwest::Client::builder().user_agent("gnd-cli").build()?; + + let resp: serde_json::Value = client + .get(MATCHSTICK_GITHUB_RELEASES) + .send() + .await + .context("Failed to reach GitHub API")? + .error_for_status() + .context("GitHub API returned an error")? + .json() + .await + .context("Failed to parse GitHub API response")?; + + resp["tag_name"] + .as_str() + .map(|s| s.to_string()) + .ok_or_else(|| anyhow!("GitHub API response missing tag_name")) +} + +/// Read the cached version from `.latest.json` if it exists and is fresh. +fn read_version_cache(path: &Path) -> Option { + let data = std::fs::read_to_string(path).ok()?; + let cache: VersionCache = serde_json::from_str(&data).ok()?; + let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?.as_secs(); + + if now.saturating_sub(cache.timestamp) < VERSION_CACHE_TTL_SECS { + Some(cache.version) + } else { + None + } +} + +/// Write a version cache entry to `.latest.json`. +fn write_version_cache(path: &Path, version: &str) -> Result<()> { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .context("System clock before UNIX epoch")? + .as_secs(); + + let cache = VersionCache { + version: version.to_string(), + timestamp: now, + }; + + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent)?; + } + std::fs::write(path, serde_json::to_string_pretty(&cache)?)?; + Ok(()) +} + +// --------------------------------------------------------------------------- +// Platform detection +// --------------------------------------------------------------------------- + +/// Determine the platform-specific binary name for a given Matchstick version. +/// +/// Mirrors the `getPlatform` logic from graph-tooling's test.ts: +/// - For versions > 0.5.4: simplified platform names (macos-12, linux-22) +/// - For versions <= 0.5.4: legacy platform names with more OS-version granularity +fn get_platform(version: &str) -> Result { + let ver = semver::Version::parse(version) + .with_context(|| format!("Invalid Matchstick version: {version}"))?; + let cutoff = semver::Version::new(0, 5, 4); + + let os = std::env::consts::OS; + let arch = std::env::consts::ARCH; + + if arch != "x86_64" && !(os == "macos" && arch == "aarch64") { + return Err(anyhow!("Unsupported platform: {} {}", os, arch)); + } + + if ver > cutoff { + match os { + "macos" if arch == "aarch64" => Ok("binary-macos-12-m1".to_string()), + "macos" => Ok("binary-macos-12".to_string()), + "linux" => Ok("binary-linux-22".to_string()), + _ => Err(anyhow!("Unsupported OS: {}", os)), + } + } else { + // Legacy platform detection for versions <= 0.5.4 + match os { + "macos" => { + let darwin_major = get_darwin_major_version(); + if matches!(darwin_major, Some(18) | Some(19)) { + Ok("binary-macos-10.15".to_string()) + } else if arch == "aarch64" { + Ok("binary-macos-11-m1".to_string()) + } else { + Ok("binary-macos-11".to_string()) + } + } + "linux" => { + let linux_major = get_linux_major_version(); + match linux_major { + Some(18) => Ok("binary-linux-18".to_string()), + Some(22) | Some(24) => Ok("binary-linux-22".to_string()), + _ => Ok("binary-linux-20".to_string()), + } + } + _ => Err(anyhow!("Unsupported OS: {}", os)), + } + } +} + +/// Parse the major Darwin kernel version from `uname -r` output. +/// +/// Darwin 18.x → macOS 10.14 Mojave, Darwin 19.x → macOS 10.15 Catalina. +fn get_darwin_major_version() -> Option { + let output = std::process::Command::new("uname") + .arg("-r") + .output() + .ok()?; + let release = String::from_utf8_lossy(&output.stdout); + release.trim().split('.').next()?.parse().ok() +} + +/// Parse the major OS version from `/etc/os-release` VERSION_ID field. +fn get_linux_major_version() -> Option { + let content = std::fs::read_to_string("/etc/os-release").ok()?; + for line in content.lines() { + if let Some(val) = line.strip_prefix("VERSION_ID=") { + let val = val.trim_matches('"'); + // Handle "22.04" → 22, or "22" → 22 + return val.split('.').next()?.parse().ok(); + } + } + None +} + +// --------------------------------------------------------------------------- +// Binary download +// --------------------------------------------------------------------------- + +/// Download the Matchstick binary from GitHub releases. +/// +/// The binary is saved to `node_modules/.bin/matchstick-{platform}`. +/// Skips download if the binary already exists, unless `force` is true. +async fn download_matchstick_binary(version: &str, platform: &str, force: bool) -> Result { + let bin_dir = PathBuf::from("node_modules/.bin"); + let bin_path = bin_dir.join(format!("matchstick-{platform}")); + + if bin_path.exists() && !force { + step( + Step::Done, + &format!("Binary already exists: {}", bin_path.display()), + ); + return Ok(bin_path); + } + + std::fs::create_dir_all(&bin_dir) + .with_context(|| format!("Failed to create directory: {}", bin_dir.display()))?; + + let url = format!("{MATCHSTICK_DOWNLOAD_BASE}/{version}/{platform}"); + step(Step::Load, &format!("Downloading Matchstick {version}")); + + let client = reqwest::Client::builder().user_agent("gnd-cli").build()?; + + let resp = client + .get(&url) + .send() + .await + .with_context(|| format!("Failed to download from {url}"))? + .error_for_status() + .with_context(|| { + format!( + "Download failed for {url}.\n\ + Try Docker mode instead: gnd test --matchstick -d" + ) + })?; + + let bytes = resp + .bytes() + .await + .context("Failed to read download response")?; + + std::fs::write(&bin_path, &bytes) + .with_context(|| format!("Failed to write binary to {}", bin_path.display()))?; + + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + std::fs::set_permissions(&bin_path, std::fs::Permissions::from_mode(0o755)) + .with_context(|| format!("Failed to set permissions on {}", bin_path.display()))?; + } + + step(Step::Done, &format!("Downloaded to {}", bin_path.display())); + Ok(bin_path) +} + +// --------------------------------------------------------------------------- +// Binary test runner +// --------------------------------------------------------------------------- + +/// Run Matchstick tests by downloading and executing the native binary. +/// +/// Resolves version → detects platform → downloads binary → spawns process. +async fn run_binary_tests(opt: &TestOpt) -> Result<()> { + step(Step::Generate, "Running Matchstick tests (legacy mode)"); + + let version = + resolve_matchstick_version(opt.matchstick_version.as_deref(), &opt.test_dir).await?; + + let platform = get_platform(&version)?; + let bin_path = download_matchstick_binary(&version, &platform, opt.force).await?; + + let workdir = opt + .manifest + .parent() + .filter(|p| !p.as_os_str().is_empty()) + .unwrap_or(Path::new(".")); + let mut cmd = std::process::Command::new(&bin_path); + cmd.current_dir(workdir); + + if opt.coverage { + cmd.arg("-c"); + } + if opt.recompile { + cmd.arg("-r"); + } + if let Some(datasource) = &opt.datasource { + cmd.arg(datasource); + } + + let status = cmd.status()?; + + if status.success() { + step(Step::Done, "Matchstick tests passed"); + Ok(()) + } else { + Err(anyhow!("Matchstick tests failed")) + } +} + +// --------------------------------------------------------------------------- +// Docker test runner +// --------------------------------------------------------------------------- + +/// Run Matchstick tests inside a Docker container. +/// +/// This is the recommended mode on macOS where the native Matchstick binary +/// has known issues. The Docker image is built automatically if it doesn't +/// exist or if `--force` is specified. +async fn run_docker_tests(opt: &TestOpt) -> Result<()> { + step(Step::Generate, "Running Matchstick tests in Docker"); + + std::process::Command::new("docker") + .arg("--version") + .output() + .context("Docker not found. Please install Docker to use -d/--docker mode.")?; + + let mut test_args = String::new(); + if opt.coverage { + test_args.push_str(" -c"); + } + if opt.recompile { + test_args.push_str(" -r"); + } + if let Some(datasource) = &opt.datasource { + test_args.push_str(&format!(" {}", datasource)); + } + + let cwd = std::env::current_dir().context("Failed to get current directory")?; + + let mut cmd = std::process::Command::new("docker"); + cmd.args([ + "run", + "-it", + "--rm", + "--mount", + &format!("type=bind,source={},target=/matchstick", cwd.display()), + ]); + if !test_args.is_empty() { + cmd.args(["-e", &format!("ARGS={}", test_args.trim())]); + } + cmd.arg("matchstick"); + + // Check if the Docker image already exists. + let image_check = std::process::Command::new("docker") + .args(["images", "-q", "matchstick"]) + .output() + .context("Failed to check for Docker image")?; + let image_exists = !image_check.stdout.is_empty(); + + if !image_exists || opt.force { + let version = + resolve_matchstick_version(opt.matchstick_version.as_deref(), &opt.test_dir).await?; + + step(Step::Generate, "Building Matchstick Docker image"); + let dockerfile_path = PathBuf::from("tests/.docker/Dockerfile"); + if !dockerfile_path.exists() || opt.force { + create_dockerfile(&dockerfile_path, &version)?; + } + let build_status = std::process::Command::new("docker") + .args([ + "build", + "-f", + &dockerfile_path.to_string_lossy(), + "-t", + "matchstick", + ".", + ]) + .status() + .context("Failed to build Docker image")?; + if !build_status.success() { + return Err(anyhow!("Failed to build Matchstick Docker image")); + } + } + + let status = cmd.status().context("Failed to run Docker container")?; + if status.success() { + step(Step::Done, "Tests passed"); + Ok(()) + } else { + Err(anyhow!("Tests failed")) + } +} + +/// Create a Dockerfile for running Matchstick tests in a container. +/// +/// The Dockerfile downloads the Matchstick binary directly from GitHub releases +/// (not npm — `matchstick-as` is the AssemblyScript library, not the runner binary). +/// Based on . +fn create_dockerfile(path: &PathBuf, version: &str) -> Result<()> { + use std::fs; + + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + + let dockerfile_content = format!( + r#"FROM --platform=linux/x86_64 ubuntu:22.04 + +ARG DEBIAN_FRONTEND=noninteractive +ENV ARGS="" + +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + curl ca-certificates postgresql postgresql-contrib \ + && rm -rf /var/lib/apt/lists/* + +RUN curl -fsSL https://deb.nodesource.com/setup_18.x | bash - \ + && apt-get install -y --no-install-recommends nodejs \ + && rm -rf /var/lib/apt/lists/* + +RUN curl -fsSL -o /usr/local/bin/matchstick \ + https://github.com/LimeChain/matchstick/releases/download/{version}/binary-linux-22 \ + && chmod +x /usr/local/bin/matchstick + +RUN mkdir /matchstick +WORKDIR /matchstick + +CMD ["sh", "-c", "matchstick $ARGS"] +"#, + version = version + ); + + fs::write(path, dockerfile_content) + .with_context(|| format!("Failed to write Dockerfile to {}", path.display()))?; + step(Step::Write, &format!("Created {}", path.display())); + Ok(()) +} diff --git a/gnd/src/commands/test/mod.rs b/gnd/src/commands/test/mod.rs index c0a7fcbbf72..06fbaf62a2f 100644 --- a/gnd/src/commands/test/mod.rs +++ b/gnd/src/commands/test/mod.rs @@ -36,11 +36,13 @@ //! - [`assertion`]: GraphQL assertion execution and JSON comparison //! - [`block_stream`]: Mock block stream that feeds pre-built blocks //! - [`noop`]: Noop/stub trait implementations for the mock chain +//! - [`matchstick`]: Legacy Matchstick test runner (version resolution, download, Docker) //! - [`output`]: Console output formatting for test results mod assertion; mod block_stream; mod eth_calls; +mod matchstick; mod mock_chain; mod noop; mod output; @@ -48,7 +50,7 @@ mod runner; mod schema; mod trigger; -use anyhow::{anyhow, Context, Result}; +use anyhow::{anyhow, Result}; use clap::Parser; use console::style; use std::path::PathBuf; @@ -94,7 +96,7 @@ pub struct TestOpt { #[clap(short = 'f', long, requires = "matchstick")] pub force: bool, - /// Matchstick version to use (default: 0.6.0) + /// Matchstick version to use (default: latest from GitHub) #[clap(long, requires = "matchstick")] pub matchstick_version: Option, @@ -110,7 +112,7 @@ pub struct TestOpt { /// Returns an error if any tests fail (for non-zero exit code). pub async fn run_test(opt: TestOpt) -> Result<()> { if opt.matchstick { - return run_matchstick_tests(&opt); + return matchstick::run(&opt).await; } // Build the subgraph first so the WASM and schema are available in build/. @@ -194,212 +196,3 @@ pub async fn run_test(opt: TestOpt) -> Result<()> { Ok(()) } } - -/// Backward-compatible Matchstick test runner. -/// -/// Dispatches to Docker mode or binary mode depending on the `--docker` flag. -/// This is the legacy path for projects that haven't migrated to the new -/// JSON-based test format yet. -fn run_matchstick_tests(opt: &TestOpt) -> Result<()> { - if opt.docker { - run_docker_tests(opt) - } else { - run_binary_tests(opt) - } -} - -/// Run Matchstick tests using a locally installed binary. -/// -/// Searches for the Matchstick binary in well-known locations and executes it, -/// passing through any relevant CLI flags. -fn run_binary_tests(opt: &TestOpt) -> Result<()> { - step(Step::Generate, "Running Matchstick tests (legacy mode)"); - - let path = find_matchstick().ok_or_else(|| { - anyhow!( - "Matchstick not found. Please install it with:\n \ - npm install --save-dev matchstick-as\n\n\ - Or use Docker mode:\n \ - gnd test --matchstick -d" - ) - })?; - - let workdir = opt.manifest.parent().unwrap_or(std::path::Path::new(".")); - let mut cmd = std::process::Command::new(&path); - cmd.current_dir(workdir); - - if opt.coverage { - cmd.arg("-c"); - } - if opt.recompile { - cmd.arg("-r"); - } - if let Some(datasource) = &opt.datasource { - cmd.arg(datasource); - } - - let status = cmd - .status() - .with_context(|| format!("Failed to execute Matchstick binary: {}", path))?; - - if status.success() { - step(Step::Done, "Matchstick tests passed"); - Ok(()) - } else { - Err(anyhow!("Matchstick tests failed")) - } -} - -/// Find the Matchstick binary by searching well-known locations and PATH. -/// -/// Search order: -/// 1. `node_modules/.bin/graph-test` -/// 2. `node_modules/.bin/matchstick` -/// 3. `node_modules/matchstick-as/bin/matchstick` -/// 4. `graph-test` on PATH -/// 5. `matchstick` on PATH -fn find_matchstick() -> Option { - let local_paths = [ - "node_modules/.bin/graph-test", - "node_modules/.bin/matchstick", - "node_modules/matchstick-as/bin/matchstick", - ]; - - local_paths - .iter() - .find(|p| std::path::Path::new(p).exists()) - .map(|p| p.to_string()) - .or_else(|| { - which::which("graph-test") - .ok() - .map(|p| p.to_string_lossy().into_owned()) - }) - .or_else(|| { - which::which("matchstick") - .ok() - .map(|p| p.to_string_lossy().into_owned()) - }) -} - -/// Run Matchstick tests inside a Docker container. -/// -/// This is the recommended mode on macOS where the native Matchstick binary -/// has known issues. The Docker image is built automatically if it doesn't -/// exist or if `--force` is specified. -fn run_docker_tests(opt: &TestOpt) -> Result<()> { - step(Step::Generate, "Running Matchstick tests in Docker"); - - std::process::Command::new("docker") - .arg("--version") - .output() - .context("Docker not found. Please install Docker to use -d/--docker mode.")?; - - let mut test_args = String::new(); - if opt.coverage { - test_args.push_str(" -c"); - } - if opt.recompile { - test_args.push_str(" -r"); - } - if let Some(datasource) = &opt.datasource { - test_args.push_str(&format!(" {}", datasource)); - } - - let cwd = std::env::current_dir().context("Failed to get current directory")?; - - let mut cmd = std::process::Command::new("docker"); - cmd.args([ - "run", - "-it", - "--rm", - "--mount", - &format!("type=bind,source={},target=/matchstick", cwd.display()), - ]); - if !test_args.is_empty() { - cmd.args(["-e", &format!("ARGS={}", test_args.trim())]); - } - cmd.arg("matchstick"); - - // Check if the Docker image already exists. - let image_check = std::process::Command::new("docker") - .args(["images", "-q", "matchstick"]) - .output() - .context("Failed to check for Docker image")?; - let image_exists = !image_check.stdout.is_empty(); - - if !image_exists || opt.force { - step(Step::Generate, "Building Matchstick Docker image"); - let dockerfile_path = PathBuf::from("tests/.docker/Dockerfile"); - if !dockerfile_path.exists() || opt.force { - create_dockerfile(&dockerfile_path, opt.matchstick_version.as_deref())?; - } - let build_status = std::process::Command::new("docker") - .args([ - "build", - "-f", - &dockerfile_path.to_string_lossy(), - "-t", - "matchstick", - ".", - ]) - .status() - .context("Failed to build Docker image")?; - if !build_status.success() { - return Err(anyhow!("Failed to build Matchstick Docker image")); - } - } - - let status = cmd.status().context("Failed to run Docker container")?; - if status.success() { - step(Step::Done, "Tests passed"); - Ok(()) - } else { - Err(anyhow!("Tests failed")) - } -} - -/// Create a Dockerfile for running Matchstick tests in a container. -/// -/// The Dockerfile downloads the Matchstick binary directly from GitHub releases -/// (not npm — `matchstick-as` is the AssemblyScript library, not the runner binary). -/// Based on . -fn create_dockerfile(path: &PathBuf, version: Option<&str>) -> Result<()> { - use std::fs; - - if let Some(parent) = path.parent() { - fs::create_dir_all(parent)?; - } - - let version = version.unwrap_or("0.6.0"); - let dockerfile_content = format!( - r#"FROM --platform=linux/x86_64 ubuntu:22.04 - -ARG DEBIAN_FRONTEND=noninteractive -ENV ARGS="" - -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - curl ca-certificates postgresql postgresql-contrib \ - && rm -rf /var/lib/apt/lists/* - -RUN curl -fsSL https://deb.nodesource.com/setup_18.x | bash - \ - && apt-get install -y --no-install-recommends nodejs \ - && rm -rf /var/lib/apt/lists/* - -RUN curl -fsSL -o /usr/local/bin/matchstick \ - https://github.com/LimeChain/matchstick/releases/download/{version}/binary-linux-22 \ - && chmod +x /usr/local/bin/matchstick - -RUN mkdir /matchstick -WORKDIR /matchstick - -CMD ["sh", "-c", "matchstick $ARGS"] -"#, - version = version - ); - - fs::write(path, dockerfile_content) - .with_context(|| format!("Failed to write Dockerfile to {}", path.display()))?; - step(Step::Write, &format!("Created {}", path.display())); - Ok(()) -} From 944f981f33858a0c672768ce18c1ce940b64d75e Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Fri, 13 Feb 2026 15:48:23 +0200 Subject: [PATCH 13/34] gnd(test): Reuse load_manifest() Signed-off-by: Maksim Dimitrov --- gnd/src/commands/test/runner.rs | 58 +++++++++++-------------------- gnd/src/commands/test/schema.rs | 4 ++- gnd/src/manifest.rs | 12 +++++++ graph/src/data_source/subgraph.rs | 4 +++ 4 files changed, 39 insertions(+), 39 deletions(-) diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index ad1e0898bad..b10e1fef73b 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -30,6 +30,7 @@ use super::noop::{NoopAdapterSelector, StaticBlockRefetcher}; use super::schema::{TestFile, TestResult}; use super::trigger::build_blocks_with_triggers; use super::TestOpt; +use crate::manifest::{load_manifest, Manifest}; use anyhow::{anyhow, Context, Result}; use graph::amp::FlightClient; use graph::blockchain::block_stream::BlockWithTriggers; @@ -142,54 +143,34 @@ pub(super) struct TestContext { /// The network name must match the chain configuration passed to the store, /// otherwise graph-node won't route triggers to the correct chain. /// Falls back to "mainnet" if not found (the common case for Ethereum subgraphs). -fn extract_network_from_manifest(manifest_path: &Path) -> Result { - let content = std::fs::read_to_string(manifest_path) - .with_context(|| format!("Failed to read manifest: {}", manifest_path.display()))?; - let manifest: serde_yaml::Value = serde_yaml::from_str(&content) - .with_context(|| format!("Failed to parse manifest: {}", manifest_path.display()))?; - +fn extract_network_from_manifest(manifest: &Manifest) -> Result { let network = manifest - .get("dataSources") - .and_then(|ds| ds.as_sequence()) - .and_then(|seq| seq.first()) - .and_then(|first| first.get("network")) - .and_then(|n| n.as_str()) - .map(|s| s.to_string()) + .data_sources + .first() + .and_then(|ds| ds.network.clone()) .unwrap_or_else(|| "mainnet".to_string()); Ok(network) } -/// Extract the minimum `startBlock` across all data sources in a manifest. +/// Extract the minimum `startBlock` across all Ethereum data sources in a manifest. /// /// When a manifest specifies `startBlock` on its data sources, graph-node /// normally validates that the block exists on-chain during deployment. /// In tests there is no real chain, so the caller uses this value to build /// a `start_block_override` that bypasses validation. /// -/// Returns 0 if no data source specifies a `startBlock`. -fn extract_start_block_from_manifest(manifest_path: &Path) -> Result { - let content = std::fs::read_to_string(manifest_path) - .with_context(|| format!("Failed to read manifest: {}", manifest_path.display()))?; - let manifest: serde_yaml::Value = serde_yaml::from_str(&content) - .with_context(|| format!("Failed to parse manifest: {}", manifest_path.display()))?; - - let min_start_block = manifest - .get("dataSources") - .and_then(|ds| ds.as_sequence()) - .map(|seq| { - seq.iter() - .filter_map(|ds| { - ds.get("source") - .and_then(|s| s.get("startBlock")) - .and_then(|b| b.as_u64()) - }) - .min() - .unwrap_or(0) - }) - .unwrap_or(0); - - Ok(min_start_block) +/// Only considers Ethereum data sources (kind: "ethereum" or "ethereum/contract") +/// since gnd test only supports testing Ethereum contracts. +/// +/// Returns 0 if no Ethereum data source specifies a `startBlock`. +fn extract_start_block_from_manifest(manifest: &Manifest) -> Result { + Ok(manifest + .data_sources + .iter() + .map(|ds| ds.start_block) + .min() + .unwrap_or(0)) } /// Run a single test file end-to-end. @@ -230,16 +211,17 @@ pub async fn run_single_test(opt: &TestOpt, test_file: &TestFile) -> Result 0, // graph-node normally validates the block exists on-chain, but our test // environment has no real chain. We provide a start_block_override to // bypass validation, and also default test block numbering to start at // the manifest's startBlock so blocks land in the indexed range. - let min_start_block = extract_start_block_from_manifest(&built_manifest_path)?; + let min_start_block = extract_start_block_from_manifest(&manifest)?; // Convert test JSON blocks into graph-node's internal block format. // Default block numbering starts at the manifest's startBlock so that diff --git a/gnd/src/commands/test/schema.rs b/gnd/src/commands/test/schema.rs index 9c8aa4857ea..37122ead868 100644 --- a/gnd/src/commands/test/schema.rs +++ b/gnd/src/commands/test/schema.rs @@ -263,7 +263,9 @@ pub fn discover_test_files(dir: &Path) -> anyhow::Result> { if path.is_file() { if let Some(name) = path.file_name().and_then(|n| n.to_str()) { - if name.ends_with(".test.json") || name.ends_with(".json") { + if (name.ends_with(".test.json") || name.ends_with(".json")) + && !name.starts_with('.') + { files.push(path); } } diff --git a/gnd/src/manifest.rs b/gnd/src/manifest.rs index 5f37df3418f..947eff5fd12 100644 --- a/gnd/src/manifest.rs +++ b/gnd/src/manifest.rs @@ -68,6 +68,10 @@ pub struct DataSource { pub source_address: Option, /// The ABI name referenced in `source.abi` (Ethereum data sources only). pub source_abi: Option, + /// The block number at which this data source starts indexing (from source.startBlock). + pub start_block: u64, + /// The block number at which this data source stops indexing (from source.endBlock). + pub end_block: Option, /// Event handlers from the mapping. pub event_handlers: Vec, /// Call handlers from the mapping. @@ -228,6 +232,8 @@ fn convert_data_source(ds: GraphUnresolvedDS) -> DataSource { .collect(), source_address: eth.source.address.map(|a| format!("{:?}", a)), source_abi: Some(eth.source.abi.clone()), + start_block: eth.source.start_block as u64, + end_block: eth.source.end_block.map(|b| b as u64), event_handlers: eth .mapping .event_handlers @@ -277,6 +283,8 @@ fn convert_data_source(ds: GraphUnresolvedDS) -> DataSource { .collect(), source_address: Some(sub.source.address().to_string()), source_abi: None, + start_block: sub.source.start_block() as u64, + end_block: None, // Subgraph sources don't have end_block event_handlers: vec![], call_handlers: vec![], block_handlers: vec![], @@ -290,6 +298,8 @@ fn convert_data_source(ds: GraphUnresolvedDS) -> DataSource { abis: vec![], source_address: None, source_abi: None, + start_block: 0, // Offchain data sources don't have start_block + end_block: None, event_handlers: vec![], call_handlers: vec![], block_handlers: vec![], @@ -303,6 +313,8 @@ fn convert_data_source(ds: GraphUnresolvedDS) -> DataSource { abis: vec![], source_address: None, source_abi: None, + start_block: amp.source.start_block.unwrap_or(0), + end_block: amp.source.end_block, event_handlers: vec![], call_handlers: vec![], block_handlers: vec![], diff --git a/graph/src/data_source/subgraph.rs b/graph/src/data_source/subgraph.rs index 0207aee4df3..454f20ec29e 100644 --- a/graph/src/data_source/subgraph.rs +++ b/graph/src/data_source/subgraph.rs @@ -237,6 +237,10 @@ impl UnresolvedSource { pub fn address(&self) -> &DeploymentHash { &self.address } + + pub fn start_block(&self) -> BlockNumber { + self.start_block + } } #[derive(Clone, Debug, Default, Eq, PartialEq, Deserialize)] From ca697b847685ce57968ef26836fddacf2dea913e Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Fri, 13 Feb 2026 16:32:12 +0200 Subject: [PATCH 14/34] gnd(test): Add README, add important inline notes Signed-off-by: Maksim Dimitrov --- gnd/src/commands/test/README.md | 776 +++++++++++++++++++++++++++++ gnd/src/commands/test/assertion.rs | 6 + gnd/src/commands/test/runner.rs | 12 + gnd/src/commands/test/schema.rs | 4 + gnd/src/commands/test/trigger.rs | 3 +- 5 files changed, 800 insertions(+), 1 deletion(-) create mode 100644 gnd/src/commands/test/README.md diff --git a/gnd/src/commands/test/README.md b/gnd/src/commands/test/README.md new file mode 100644 index 00000000000..57f74969d37 --- /dev/null +++ b/gnd/src/commands/test/README.md @@ -0,0 +1,776 @@ +# gnd test + +Mock-based subgraph test runner that feeds JSON-defined blocks through real graph-node infrastructure (store, WASM runtime, trigger processing) with only the blockchain layer mocked. + +## Quick Start + +```bash +# Run all tests in tests/ directory +gnd test + +# Run a specific test file +gnd test tests/transfer.json + +# Skip automatic build (if subgraph already built) +gnd test --skip-build + +# Use legacy Matchstick runner +gnd test --matchstick +``` + +## Test File Format + +Tests are JSON files that define: +- Mock blockchain blocks with events +- Mock `eth_call` responses +- GraphQL assertions to validate entity state + +Place test files in a `tests/` directory with `.json` or `.test.json` extension. + +### Basic Example + +```json +{ + "name": "Transfer creates entity", + "blocks": [ + { + "number": 1, + "timestamp": 1672531200, + "events": [ + { + "address": "0x1234...", + "event": "Transfer(address indexed from, address indexed to, uint256 value)", + "params": { + "from": "0xaaaa...", + "to": "0xbbbb...", + "value": "1000" + } + } + ], + "ethCalls": [ + { + "address": "0x1234...", + "function": "balanceOf(address)(uint256)", + "params": ["0xaaaa..."], + "returns": ["1000000000000000000"] + } + ] + } + ], + "assertions": [ + { + "query": "{ transfer(id: \"1\") { from to value } }", + "expected": { + "transfer": { + "from": "0xaaaa...", + "to": "0xbbbb...", + "value": "1000" + } + } + } + ] +} +``` + +## Block Fields + +| Field | Required | Default | Description | +|-------|----------|---------|-------------| +| `number` | No | Auto-increments from lowest defined `startBlock` in the manifest file, or from `0` if no `startBlock` are defined | Block number | +| `hash` | No | `keccak256(block_number)` | Block hash | +| `timestamp` | No | `block_number * 12` | Unix timestamp | +| `baseFeePerGas` | No | None (pre-EIP-1559) | Base fee in wei | +| `events` | No | Empty array | Log events in this block | +| `ethCalls` | No | Empty array | Mock `eth_call` responses | + +### Empty Blocks + +Empty blocks (no events) still trigger block handlers: + +```json +{ + "name": "Test block handlers", + "blocks": [ + { + "number": 1, + "events": [...] + }, + {} // Block 2 with no events - block handlers still fire + ] +} +``` + +## Event Fields + +| Field | Required | Default | Description | +|-------|----------|---------|-------------| +| `address` | Yes | — | Contract address (lowercase hex with 0x prefix) | +| `event` | Yes | — | Full event signature with `indexed` keywords | +| `params` | No | Empty object | Event parameter values | +| `txHash` | No | `keccak256(block_number \|\| log_index)` | Transaction hash | + +### Event Signature Format + +**Important:** Include `indexed` keywords in the signature: + +```json +{ + "event": "Transfer(address indexed from, address indexed to, uint256 value)" +} +``` + +Not: +```json +{ + "event": "Transfer(address,address,uint256)" // ❌ Missing indexed keywords +} +``` + +### Parameter Types + +Event parameters are automatically ABI-encoded based on the signature. Supported formats: + +```json +{ + "params": { + "from": "0xaaaa...", // address + "to": "0xbbbb...", // address + "value": "1000", // uint256 (string or number) + "amount": 1000, // uint256 (number) + "enabled": true, // bool + "data": "0x1234...", // bytes + "name": "Token" // string + } +} +``` + +## Block Handlers + +Block handlers are **automatically triggered** for every block. You don't need to specify block triggers in the JSON. + +### How Block Handlers Work + +The test runner auto-injects both `Start` and `End` block triggers for each block, ensuring all block handler filters work correctly: + +- **`once` filter** → Fires once at `startBlock` (via `Start` trigger) +- **No filter** → Fires on every block (via `End` trigger) +- **`polling` filter** → Fires every N blocks based on formula: `(block_number - startBlock) % every == 0` + +### Example: Basic Block Handlers + +```json +{ + "name": "Block handlers test", + "blocks": [ + {}, // Block 0 - both 'once' and regular block handlers fire + {} // Block 1 - only regular block handlers fire + ], + "assertions": [ + { + "query": "{ blocks { number } }", + "expected": { + "blocks": [ + {"number": "0"}, + {"number": "1"} + ] + } + }, + { + "query": "{ blockOnces { msg } }", + "expected": { + "blockOnces": [ + {"msg": "This fires only once at block 0"} + ] + } + } + ] +} +``` + +### Polling Block Handlers + +Polling handlers fire at regular intervals specified by the `every` parameter. The handler fires when: + +``` +(block_number - startBlock) % every == 0 +``` + +**Manifest example:** +```yaml +blockHandlers: + - handler: handleEveryThreeBlocks + filter: + kind: polling + every: 3 +``` + +**Test example (startBlock: 0):** +```json +{ + "name": "Polling handler test", + "blocks": [ + {}, // Block 0 - handler fires (0 % 3 == 0) + {}, // Block 1 - handler doesn't fire + {}, // Block 2 - handler doesn't fire + {}, // Block 3 - handler fires (3 % 3 == 0) + {}, // Block 4 - handler doesn't fire + {}, // Block 5 - handler doesn't fire + {} // Block 6 - handler fires (6 % 3 == 0) + ], + "assertions": [ + { + "query": "{ pollingBlocks(orderBy: number) { number } }", + "expected": { + "pollingBlocks": [ + {"number": "0"}, + {"number": "3"}, + {"number": "6"} + ] + } + } + ] +} +``` + +**With non-zero startBlock:** + +When your data source has `startBlock > 0`, the polling interval is calculated from that starting point. + +**Manifest:** +```yaml +dataSources: + - name: Token + source: + startBlock: 100 + mapping: + blockHandlers: + - handler: handlePolling + filter: + kind: polling + every: 5 +``` + +**Test:** +```json +{ + "name": "Polling from block 100", + "blocks": [ + {"number": 100}, // Fires: (100-100) % 5 == 0 + {"number": 101}, // Doesn't fire + {"number": 102}, // Doesn't fire + {"number": 103}, // Doesn't fire + {"number": 104}, // Doesn't fire + {"number": 105}, // Fires: (105-100) % 5 == 0 + {"number": 106}, // Doesn't fire + {"number": 107}, // Doesn't fire + {"number": 108}, // Doesn't fire + {"number": 109}, // Doesn't fire + {"number": 110} // Fires: (110-100) % 5 == 0 + ], + "assertions": [ + { + "query": "{ pollingBlocks(orderBy: number) { number } }", + "expected": { + "pollingBlocks": [ + {"number": "100"}, + {"number": "105"}, + {"number": "110"} + ] + } + } + ] +} +``` + +**Note:** The test runner automatically handles `startBlock > 0`, so blocks default to numbering from the manifest's `startBlock`. + +## eth_call Mocking + +Mock contract calls made from mapping handlers using `contract.call()`: + +```json +{ + "ethCalls": [ + { + "address": "0x1234...", + "function": "balanceOf(address)(uint256)", + "params": ["0xaaaa..."], + "returns": ["1000000000000000000"] + } + ] +} +``` + +### ethCall Fields + +| Field | Required | Description | +|-------|----------|-------------| +| `address` | Yes | Contract address | +| `function` | Yes | Full signature: `"functionName(inputTypes)(returnTypes)"` | +| `params` | Yes | Array of input parameters (as strings) | +| `returns` | Yes | Array of return values (as strings, ignored if `reverts: true`) | +| `reverts` | No | Default `false`. If `true`, the call is cached as `Retval::Null` | + +### Function Signature Format + +Use full signatures with input and return types: + +```json +{ + "function": "symbol()(string)", // No inputs, returns string + "function": "balanceOf(address)(uint256)", // One input, returns uint256 + "function": "decimals()(uint8)" // No inputs, returns uint8 +} +``` + +### Mocking Reverts + +```json +{ + "address": "0x1234...", + "function": "transfer(address,uint256)(bool)", + "params": ["0xaaaa...", "1000"], + "returns": [], + "reverts": true +} +``` + +### Real-World Example + +From the ERC20 test: + +```json +{ + "ethCalls": [ + { + "address": "0x731a10897d267e19b34503ad902d0a29173ba4b1", + "function": "symbol()(string)", + "params": [], + "returns": ["GRT"] + }, + { + "address": "0x731a10897d267e19b34503ad902d0a29173ba4b1", + "function": "name()(string)", + "params": [], + "returns": ["TheGraph"] + }, + { + "address": "0x731a10897d267e19b34503ad902d0a29173ba4b1", + "function": "balanceOf(address)(uint256)", + "params": ["0xaaaa000000000000000000000000000000000000"], + "returns": ["3000000000000000000"] + } + ] +} +``` + +## Assertions + +GraphQL queries to validate the indexed entity state after processing all blocks. + +### Assertion Fields + +| Field | Required | Description | +|-------|----------|-------------| +| `query` | Yes | GraphQL query string | +| `expected` | Yes | Expected JSON response | + +### Comparison Behavior + +| Aspect | Behavior | +|--------|----------| +| Objects | Key-compared, order-insensitive | +| Arrays | **Order-insensitive** (set comparison) | +| String vs Number | Coerced — `"123"` matches `123` | +| Nulls/Booleans | Strict equality | + +**Important:** Arrays are compared as sets (order doesn't matter). If you need ordered results, use `orderBy` in your GraphQL query: + +```json +{ + "query": "{ transfers(orderBy: timestamp, orderDirection: asc) { id from to value } }", + "expected": { ... } +} +``` + +### Multiple Assertions + +You can have multiple assertions per test. They run sequentially after all blocks are processed: + +```json +{ + "assertions": [ + { + "query": "{ tokens { id name symbol } }", + "expected": { ... } + }, + { + "query": "{ accounts { id balance } }", + "expected": { ... } + } + ] +} +``` + +### Nested Entity Queries + +Test relationships and nested entities: + +```json +{ + "query": "{ accounts { id balances { token { symbol } amount } } }", + "expected": { + "accounts": [ + { + "id": "0xbbbb...", + "balances": [ + { + "token": { "symbol": "GRT" }, + "amount": "5000000000000000000" + } + ] + } + ] + } +} +``` + +## startBlock Handling + +The test runner automatically reads `startBlock` from your subgraph manifest and handles it correctly — **no real blockchain connection needed**. + +### How It Works + +1. Extracts the **minimum `startBlock`** across all data sources in your manifest +2. If min > 0, creates a `start_block_override` to bypass graph-node's on-chain block validation +3. Test blocks without explicit `"number"` auto-increment starting from that minimum `startBlock` + +### Default Block Numbering + +The starting block number depends on your manifest: + +| Manifest Configuration | Test Block Numbers | +|----------------------|-------------------| +| `startBlock: 0` (or unset) | 0, 1, 2, ... | +| `startBlock: 100` | 100, 101, 102, ... | +| Multiple data sources: `startBlock: 50` and `startBlock: 200` | 50, 51, 52, ... (uses minimum) | + +### Example: Single Data Source + +**Manifest:** +```yaml +dataSources: + - name: Token + source: + startBlock: 1000 +``` + +**Test:** +```json +{ + "blocks": [ + {}, // Block 1000 (auto-numbered) + {} // Block 1001 (auto-numbered) + ] +} +``` + +### Example: Explicit Block Numbers + +Override auto-numbering by specifying `"number"`: + +```json +{ + "blocks": [ + { + "number": 5000, + "events": [...] + }, + { + "number": 5001, + "events": [...] + } + ] +} +``` + +### Multi-Data Source Testing + +When your subgraph has multiple data sources with different `startBlock` values, you may need to use explicit block numbers. + +**Scenario:** DataSource A at `startBlock: 50` (Transfer events), DataSource B at `startBlock: 200` (Approval events). You want to test only DataSource B. + +**Manifest:** +```yaml +dataSources: + - name: TokenTransfers + source: + startBlock: 50 + mapping: + eventHandlers: + - event: Transfer(...) + handler: handleTransfer + - name: TokenApprovals + source: + startBlock: 200 + mapping: + eventHandlers: + - event: Approval(...) + handler: handleApproval +``` + +**Test:** +```json +{ + "name": "Test Approval handler", + "blocks": [ + { + "number": 200, // Explicit number >= DataSource B's startBlock + "events": [ + { + "address": "0x5678...", + "event": "Approval(address indexed owner, address indexed spender, uint256 value)", + "params": { + "owner": "0xaaaa...", + "spender": "0xbbbb...", + "value": "500" + } + } + ] + }, + { + "number": 201, + "events": [...] + } + ] +} +``` + +**Why explicit numbers are needed:** +- Default numbering starts at the **minimum** `startBlock` across all data sources (50 in this case) +- Blocks 50-199 are below DataSource B's `startBlock: 200`, so its handlers won't fire +- Use explicit `"number": 200` to ensure the block is in DataSource B's active range + +**Note:** DataSource A is still "active" from block 50 onward, but it simply sees no matching Transfer events in blocks 200-201, so no handlers fire for it. This is normal behavior — graph-node doesn't error on inactive handlers. + +## Test Organization + +### Directory Structure + +``` +my-subgraph/ +├── subgraph.yaml +├── schema.graphql +├── src/ +│ └── mapping.ts +└── tests/ + ├── transfer.json + ├── approval.json + └── edge-cases.test.json +``` + +### Naming Conventions + +- Use `.json` or `.test.json` extension +- Descriptive names: `transfer.json`, `mint-burn.json`, `edge-cases.json` +- The test runner discovers all `*.json` and `*.test.json` files in the test directory + +## Known Limitations + +| Feature | Status | +|---------|--------| +| Log events | ✅ Supported | +| Block handlers (all filters) | ✅ Supported | +| eth_call mocking | ✅ Supported | +| Dynamic/template data sources | ✅ Supported (via graph-node) | +| Transaction receipts (`receipt: true`) | ❌ Not implemented — handlers get `null` | +| File data sources / IPFS mocking | ❌ Not implemented | +| Call triggers (traces) | ❌ Not implemented | +| `--json` CI output | ❌ Not implemented | +| Parallel test execution | ❌ Not implemented | +| Test name filtering (`--filter`) | ❌ Not implemented | + +## Tips & Best Practices + +### Use Lowercase Addresses + +Always use lowercase hex addresses with `0x` prefix: + +```json +{ + "address": "0x731a10897d267e19b34503ad902d0a29173ba4b1" // ✅ Correct +} +``` + +Not: +```json +{ + "address": "0x731A10897D267E19B34503Ad902d0A29173Ba4B1" // ❌ Mixed case +} +``` + +### Test One Thing at a Time + +Write focused tests that validate a single behavior: + +```json +// ✅ Good - tests one scenario +{ + "name": "Transfer event creates TransferEvent entity", + "blocks": [...], + "assertions": [...] +} +``` + +```json +// ❌ Avoid - tests too many things +{ + "name": "Test everything", + "blocks": [/* 50 blocks */], + "assertions": [/* 20 assertions */] +} +``` + +### Order GraphQL Results + +If your assertion needs specific ordering, use `orderBy`: + +```json +{ + "query": "{ transfers(first: 10, orderBy: timestamp, orderDirection: asc) { id } }", + "expected": { ... } +} +``` + +### Test Block Handlers with Empty Blocks + +Use empty blocks to test that block handlers fire even without events: + +```json +{ + "blocks": [ + {}, // Empty block - block handlers still fire + {} + ] +} +``` + +### Testing Polling Handlers + +When testing polling handlers, include enough blocks to verify the interval: + +```json +// ✅ Good - tests multiple intervals +{ + "name": "Polling every 3 blocks", + "blocks": [{}, {}, {}, {}, {}, {}, {}], // Blocks 0-6, handler fires at 0, 3, 6 + "assertions": [...] +} +``` + +```json +// ❌ Insufficient - only one firing +{ + "name": "Polling every 3 blocks", + "blocks": [{}], // Only block 0 - can't verify interval + "assertions": [...] +} +``` + +**Remember:** Polling handlers fire at `startBlock + (N × every)` where N = 0, 1, 2, ... + +### Split Complex Tests + +Instead of one large test with many blocks, split into multiple focused test files: + +``` +tests/ +├── transfer-basic.json # Basic transfer functionality +├── transfer-zero-value.json # Edge case: zero value +└── transfer-same-account.json # Edge case: self-transfer +``` + +## Architecture + +The test runner reuses real graph-node infrastructure: + +``` +test.json + ↓ +Parse & ABI encode events + ↓ +Mock block stream (StaticStreamBuilder) + ↓ +Real graph-node indexer + ├── WASM runtime + ├── Trigger processing + └── Entity storage (pgtemp database) + ↓ +GraphQL queries → Assertions +``` + +**Key design principles:** + +- **Fresh database per test:** Each test gets an isolated pgtemp database, automatically dropped on completion +- **Real WASM runtime:** Uses `EthereumRuntimeAdapterBuilder` with real `ethereum.call` host function +- **Pre-populated call cache:** `eth_call` responses are cached before indexing starts +- **No IPFS for manifest:** Uses `FileLinkResolver` to load manifest/WASM from build directory +- **Dummy RPC adapter:** Registered at `http://0.0.0.0:0` for capability lookup; never actually called + +## Troubleshooting + +### Test Fails: "Entity not found" + +**Cause:** Handler didn't create the expected entity. + +**Fix:** +1. Check event signature matches ABI (include `indexed` keywords) +2. Verify contract address matches manifest +3. Check block number is >= data source's `startBlock` +4. Add debug logging to your mapping handler + +### Test Timeout + +**Cause:** Indexer took longer than 60 seconds (default timeout). + +**Fix:** +1. Reduce number of blocks in test +2. Simplify mapping logic +3. Check for infinite loops in handler code + +### eth_call Returns Wrong Value + +**Cause:** Call cache miss — no matching mock in `ethCalls`. + +**Fix:** +1. Verify `address`, `function`, and `params` exactly match the call from your mapping +2. Check function signature format: `"functionName(inputTypes)(returnTypes)"` +3. Ensure parameters are in correct order + +### Block Handler Not Firing + +**Cause:** Block handlers auto-fire, but might be outside data source's active range. + +**Fix:** +1. Check data source's `startBlock` in manifest +2. Use explicit `"number"` in test blocks to ensure they're >= `startBlock` +3. Verify handler is defined in manifest's `blockHandlers` section + +## Legacy Matchstick Mode + +Fall back to the external Matchstick test runner for backward compatibility: + +```bash +gnd test --matchstick +``` + +This is useful if: +- You have existing Matchstick tests +- You need features not yet supported by the mock-based runner +- You're migrating gradually from Matchstick to the new test format + +## See Also + +- [Subgraph Manifest Documentation](https://thegraph.com/docs/en/developing/creating-a-subgraph/) +- [AssemblyScript Mapping API](https://thegraph.com/docs/en/developing/assemblyscript-api/) +- [GraphQL Schema](https://thegraph.com/docs/en/developing/creating-a-subgraph/#the-graph-ql-schema) diff --git a/gnd/src/commands/test/assertion.rs b/gnd/src/commands/test/assertion.rs index a24e8c09d4a..66a330b3ae9 100644 --- a/gnd/src/commands/test/assertion.rs +++ b/gnd/src/commands/test/assertion.rs @@ -201,6 +201,8 @@ fn json_similarity(a: &serde_json::Value, b: &serde_json::Value) -> usize { if let Some(bv) = b_obj.get(k) { if json_equal(v, bv) { // `id` match is a strong signal for entity identity. + // NOTE: Magic number 100 - weight for id field vs other fields (1). + // Could be extracted to constant if tuning needed. score += if k == "id" { 100 } else { 1 }; } } @@ -240,6 +242,10 @@ fn json_equal(a: &serde_json::Value, b: &serde_json::Value) -> bool { // Order-insensitive comparison: each element in `a` must match // exactly one unmatched element in `b`. This handles GraphQL // collection queries where entity ordering is non-deterministic. + // + // TODO: O(n²) complexity - fine for <1000 entities but could be optimized + // with id-based HashMap lookup for objects with `id` fields. + // See: gnd-test.md "Next Iteration Improvements" let mut used = vec![false; b.len()]; a.iter().all(|a_elem| { for (i, b_elem) in b.iter().enumerate() { diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index b10e1fef73b..5a66fe43d4c 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -82,6 +82,10 @@ const NODE_ID: &str = "gnd-test"; /// background subscription listener loses its connection and logs an error. /// This is expected during cleanup and not a real problem, so we filter it /// out to avoid confusing test output. All other log messages pass through. +/// +/// NOTE: String-based filtering is fragile - if the error message changes upstream, +/// the filter breaks silently. Consider structured logging/error type matching. +/// See: gnd-test.md "Next Iteration Improvements" struct FilterStoreEventEndedDrain { inner: D, } @@ -164,6 +168,11 @@ fn extract_network_from_manifest(manifest: &Manifest) -> Result { /// since gnd test only supports testing Ethereum contracts. /// /// Returns 0 if no Ethereum data source specifies a `startBlock`. +/// +/// NOTE: When multiple datasources have different startBlocks, taking the minimum +/// is correct for default block numbering, but users must use explicit "number" +/// fields to test datasources with higher startBlocks. Consider adding a warning +/// when this is detected. See: gnd-test.md "Next Iteration Improvements" fn extract_start_block_from_manifest(manifest: &Manifest) -> Result { Ok(manifest .data_sources @@ -762,6 +771,9 @@ async fn wait_for_sync( deployment: &DeploymentLocator, stop_block: BlockPtr, ) -> Result<(), SubgraphError> { + // NOTE: Hardcoded timeout/interval - could be made configurable via env var + // or CLI flag for slow subgraphs or faster iteration during development. + // See: gnd-test.md "Next Iteration Improvements" const MAX_WAIT: Duration = Duration::from_secs(60); const WAIT_TIME: Duration = Duration::from_millis(500); diff --git a/gnd/src/commands/test/schema.rs b/gnd/src/commands/test/schema.rs index 37122ead868..746399a4274 100644 --- a/gnd/src/commands/test/schema.rs +++ b/gnd/src/commands/test/schema.rs @@ -239,6 +239,10 @@ pub struct AssertionFailure { // ============ Parsing ============ /// Parse a JSON test file from disk into a [`TestFile`]. +/// +/// NOTE: Only validates JSON schema, not semantic correctness (e.g., block ordering, +/// valid addresses, parseable event signatures). Consider adding validation pass +/// for better error messages. See: gnd-test.md "Next Iteration Improvements" pub fn parse_test_file(path: &Path) -> anyhow::Result { let content = std::fs::read_to_string(path) .map_err(|e| anyhow::anyhow!("Failed to read test file {}: {}", path.display(), e))?; diff --git a/gnd/src/commands/test/trigger.rs b/gnd/src/commands/test/trigger.rs index 235ce7c0bb5..bdfd9a1fe91 100644 --- a/gnd/src/commands/test/trigger.rs +++ b/gnd/src/commands/test/trigger.rs @@ -65,7 +65,8 @@ pub fn build_blocks_with_triggers( .context("Invalid block hash")? .unwrap_or_else(|| keccak256(number.to_be_bytes())); - // Default timestamp simulates 12-second block times. + // Default timestamp simulates 12-second block times (Ethereum mainnet average). + // NOTE: Magic number - could be extracted to a named constant. let timestamp = test_block.timestamp.unwrap_or(number * 12); // Parse base fee per gas if provided (EIP-1559 support). From 6827e5a60cac4442574dba89b78aa06cc594978a Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Fri, 13 Feb 2026 16:57:08 +0200 Subject: [PATCH 15/34] gnd: Fix missing fields, update readme Signed-off-by: Maksim Dimitrov --- gnd/src/commands/test/README.md | 26 +------------------------- gnd/src/validation/mod.rs | 4 ++++ 2 files changed, 5 insertions(+), 25 deletions(-) diff --git a/gnd/src/commands/test/README.md b/gnd/src/commands/test/README.md index 57f74969d37..af6fe8fd1f0 100644 --- a/gnd/src/commands/test/README.md +++ b/gnd/src/commands/test/README.md @@ -582,7 +582,7 @@ my-subgraph/ | Log events | ✅ Supported | | Block handlers (all filters) | ✅ Supported | | eth_call mocking | ✅ Supported | -| Dynamic/template data sources | ✅ Supported (via graph-node) | +| Dynamic/template data sources | (Untested) | Transaction receipts (`receipt: true`) | ❌ Not implemented — handlers get `null` | | File data sources / IPFS mocking | ❌ Not implemented | | Call triggers (traces) | ❌ Not implemented | @@ -655,30 +655,6 @@ Use empty blocks to test that block handlers fire even without events: } ``` -### Testing Polling Handlers - -When testing polling handlers, include enough blocks to verify the interval: - -```json -// ✅ Good - tests multiple intervals -{ - "name": "Polling every 3 blocks", - "blocks": [{}, {}, {}, {}, {}, {}, {}], // Blocks 0-6, handler fires at 0, 3, 6 - "assertions": [...] -} -``` - -```json -// ❌ Insufficient - only one firing -{ - "name": "Polling every 3 blocks", - "blocks": [{}], // Only block 0 - can't verify interval - "assertions": [...] -} -``` - -**Remember:** Polling handlers fire at `startBlock + (N × every)` where N = 0, 1, 2, ... - ### Split Complex Tests Instead of one large test with many blocks, split into multiple focused test files: diff --git a/gnd/src/validation/mod.rs b/gnd/src/validation/mod.rs index 11832b0eb23..b64c9f41398 100644 --- a/gnd/src/validation/mod.rs +++ b/gnd/src/validation/mod.rs @@ -1181,6 +1181,8 @@ type Post @entity { abis: vec![], source_address: None, source_abi: None, + start_block: 0, + end_block: None, event_handlers: vec![], call_handlers: vec![], block_handlers: vec![], @@ -1197,6 +1199,8 @@ type Post @entity { abis: vec![], source_address: Some(address.to_string()), source_abi: None, + start_block: 0, + end_block: None, event_handlers: vec![], call_handlers: vec![], block_handlers: vec![], From 05cce6b09a98c0be92b530c9689f6b3f98191230 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Sat, 14 Feb 2026 00:01:11 +0200 Subject: [PATCH 16/34] gnd(test): Refactor CLI to accept test files/dirs as positional args MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Replace positional `manifest` arg with `--manifest` / `-m` flag (default: subgraph.yaml) - Add positional `tests` args accepting file or directory - When no args given, default to scanning `tests/` - Bare filenames resolve to `tests/` for convenience (e.g., `gnd test foo.json` → `tests/foo.json`) - Remove `--test-dir` flag (replaced by positional args) - Update README with new usage examples Signed-off-by: Maksim Dimitrov --- gnd/README.md | 40 +++++++++++++---- gnd/src/commands/test/matchstick.rs | 14 ++++-- gnd/src/commands/test/mod.rs | 67 +++++++++++++++++++++++------ 3 files changed, 94 insertions(+), 27 deletions(-) diff --git a/gnd/README.md b/gnd/README.md index 72355c033f8..61b4f15f117 100644 --- a/gnd/README.md +++ b/gnd/README.md @@ -301,25 +301,47 @@ Keys are stored in `~/.graph-cli.json`. ### `gnd test` -Run Matchstick tests for the subgraph. +Run subgraph tests. ```bash -gnd test [DATASOURCE] +gnd test [TEST_FILES...] ``` **Arguments:** -- `DATASOURCE`: Specific data source to test (optional) +- `PATHS`: Test JSON files or directories to scan. Defaults to `tests/` when nothing is specified. **Flags:** | Flag | Short | Description | |------|-------|-------------| -| `--coverage` | `-c` | Run with coverage reporting | -| `--docker` | `-d` | Run in Docker container | -| `--force` | `-f` | Force redownload of Matchstick binary | -| `--logs` | `-l` | Show debug logs | -| `--recompile` | `-r` | Force recompilation before testing | -| `--version` | `-v` | Matchstick version to use | +| `--manifest` | `-m` | Path to subgraph manifest (default: `subgraph.yaml`) | +| `--skip-build` | | Skip building the subgraph before testing | +| `--postgres-url` | | PostgreSQL connection URL (env: `POSTGRES_URL`) | +| `--matchstick` | | Use legacy Matchstick runner | +| `--docker` | `-d` | Run Matchstick in Docker (requires `--matchstick`) | +| `--coverage` | `-c` | Run with coverage reporting (requires `--matchstick`) | +| `--recompile` | `-r` | Force recompilation (requires `--matchstick`) | +| `--force` | `-f` | Force redownload of Matchstick binary (requires `--matchstick`) | + +**Examples:** + +```bash +# Run all tests in tests/ directory (default) +gnd test + +# Run specific test files +gnd test transfer.json approval.json +gnd test tests/transfer.json + +# Scan a custom directory +gnd test my-tests/ + +# Use a different manifest +gnd test -m subgraph.staging.yaml tests/transfer.json + +# Skip automatic build +gnd test --skip-build +``` ### `gnd clean` diff --git a/gnd/src/commands/test/matchstick.rs b/gnd/src/commands/test/matchstick.rs index c8fec7902bd..ccb18d4e97a 100644 --- a/gnd/src/commands/test/matchstick.rs +++ b/gnd/src/commands/test/matchstick.rs @@ -279,8 +279,11 @@ async fn download_matchstick_binary(version: &str, platform: &str, force: bool) async fn run_binary_tests(opt: &TestOpt) -> Result<()> { step(Step::Generate, "Running Matchstick tests (legacy mode)"); - let version = - resolve_matchstick_version(opt.matchstick_version.as_deref(), &opt.test_dir).await?; + let version = resolve_matchstick_version( + opt.matchstick_version.as_deref(), + Path::new(super::DEFAULT_TEST_DIR), + ) + .await?; let platform = get_platform(&version)?; let bin_path = download_matchstick_binary(&version, &platform, opt.force).await?; @@ -364,8 +367,11 @@ async fn run_docker_tests(opt: &TestOpt) -> Result<()> { let image_exists = !image_check.stdout.is_empty(); if !image_exists || opt.force { - let version = - resolve_matchstick_version(opt.matchstick_version.as_deref(), &opt.test_dir).await?; + let version = resolve_matchstick_version( + opt.matchstick_version.as_deref(), + Path::new(super::DEFAULT_TEST_DIR), + ) + .await?; step(Step::Generate, "Building Matchstick Docker image"); let dockerfile_path = PathBuf::from("tests/.docker/Dockerfile"); diff --git a/gnd/src/commands/test/mod.rs b/gnd/src/commands/test/mod.rs index 06fbaf62a2f..8cca54cf13b 100644 --- a/gnd/src/commands/test/mod.rs +++ b/gnd/src/commands/test/mod.rs @@ -57,17 +57,20 @@ use std::path::PathBuf; use crate::output::{step, Step}; +/// Default directory for test file discovery. +const DEFAULT_TEST_DIR: &str = "tests"; + #[derive(Clone, Debug, Parser)] #[clap(about = "Run subgraph tests")] pub struct TestOpt { + /// Test files or directories to run. Directories are scanned for *.json / *.test.json. + /// Defaults to the "tests/" directory when nothing is specified. + pub tests: Vec, + /// Path to subgraph manifest - #[clap(default_value = "subgraph.yaml")] + #[clap(short = 'm', long, default_value = "subgraph.yaml")] pub manifest: PathBuf, - /// Test files directory - #[clap(short = 't', long, default_value = "tests")] - pub test_dir: PathBuf, - /// Skip building the subgraph before testing #[clap(long)] pub skip_build: bool, @@ -134,19 +137,24 @@ pub async fn run_test(opt: TestOpt) -> Result<()> { step(Step::Done, "Build complete"); } - // Find all test JSON files in the test directory (sorted for deterministic order). + // Resolve test files from positional args. Default to "tests/" when none given. + let tests = if opt.tests.is_empty() { + vec![PathBuf::from(DEFAULT_TEST_DIR)] + } else { + opt.tests.clone() + }; + step(Step::Load, "Discovering test files"); - let test_files = schema::discover_test_files(&opt.test_dir)?; + let test_files = resolve_test_paths(&tests)?; if test_files.is_empty() { step(Step::Warn, "No test files found"); - println!( - " Looking in: {}", - opt.test_dir - .canonicalize() - .unwrap_or(opt.test_dir.clone()) - .display() - ); + for test in &tests { + println!( + " Looking in: {}", + test.canonicalize().unwrap_or(test.clone()).display() + ); + } println!(" Expected: *.test.json or *.json files"); return Ok(()); } @@ -196,3 +204,34 @@ pub async fn run_test(opt: TestOpt) -> Result<()> { Ok(()) } } + +/// Resolve a list of paths into concrete test file paths. +/// +/// Each path is either a JSON file (used directly) or a directory +/// (scanned for `*.json` / `*.test.json`). Bare filenames that don't +/// exist at the given path are also looked up in the default test +/// directory (e.g. `gnd test foo.json` resolves to `tests/foo.json`). +/// Results are sorted for deterministic execution order. +fn resolve_test_paths(paths: &[PathBuf]) -> Result> { + let mut files = Vec::new(); + + for path in paths { + if path.is_dir() { + files.extend(schema::discover_test_files(path)?); + } else if path.exists() { + files.push(path.clone()); + } else { + // Try resolving bare filename inside the default test directory. + let in_default_dir = PathBuf::from(DEFAULT_TEST_DIR).join(path); + if in_default_dir.exists() { + files.push(in_default_dir); + } else { + // Keep the original path — parse_test_file will report the error. + files.push(path.clone()); + } + } + } + + files.sort(); + Ok(files) +} From a3333e672241a875329cab1299d0ee3421684cde Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Mon, 16 Feb 2026 22:50:22 +0200 Subject: [PATCH 17/34] =?UTF-8?q?gnd(test):=20Fix=20diff=20colors=20?= =?UTF-8?q?=E2=80=94=20red=20for=20expected,=20green=20for=20actual?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- gnd/src/commands/test/output.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gnd/src/commands/test/output.rs b/gnd/src/commands/test/output.rs index 82b8f19b8cc..962a980500e 100644 --- a/gnd/src/commands/test/output.rs +++ b/gnd/src/commands/test/output.rs @@ -99,16 +99,16 @@ pub fn print_failure_details(details: &[FailureDetail]) { println!( " {} {} expected {} actual", style("Diff:").yellow(), - style("(-)").green(), - style("(+)").red(), + style("(-)").red(), + style("(+)").green(), ); let diff = TextDiff::from_lines(&expected, &actual); for change in diff.iter_all_changes() { let text = change.value().trim_end_matches('\n'); match change.tag() { - ChangeTag::Delete => println!(" {}", style(format!("- {text}")).green()), - ChangeTag::Insert => println!(" {}", style(format!("+ {text}")).red()), + ChangeTag::Delete => println!(" {}", style(format!("- {text}")).red()), + ChangeTag::Insert => println!(" {}", style(format!("+ {text}")).green()), ChangeTag::Equal => println!(" {text}"), } } From ca4dae89c804a12a041c73ee7ddeb44588d1f454 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Mon, 16 Feb 2026 22:50:37 +0200 Subject: [PATCH 18/34] gnd(test): Support Array, FixedArray, and Tuple in Solidity type conversion - Add Array, FixedArray, Tuple branches to json_to_sol_value - Fix strip_prefix("0x") usage (trim_start_matches incorrectly strips repeated 0s) - Fix i64::MIN two's complement handling via I256::into_raw() - Add block number overflow check against i32::MAX --- gnd/src/commands/test/trigger.rs | 103 ++++++++++++++++++++++++------- 1 file changed, 79 insertions(+), 24 deletions(-) diff --git a/gnd/src/commands/test/trigger.rs b/gnd/src/commands/test/trigger.rs index bdfd9a1fe91..e7be66f2c08 100644 --- a/gnd/src/commands/test/trigger.rs +++ b/gnd/src/commands/test/trigger.rs @@ -24,7 +24,7 @@ //! - Auto-injected `Start` and `End` block triggers (so block handlers fire correctly) use super::schema::{LogEvent, TestFile}; -use anyhow::{anyhow, Context, Result}; +use anyhow::{anyhow, ensure, Context, Result}; use graph::blockchain::block_stream::BlockWithTriggers; use graph::prelude::alloy::dyn_abi::{DynSolType, DynSolValue}; use graph::prelude::alloy::json_abi::Event; @@ -88,6 +88,11 @@ pub fn build_blocks_with_triggers( // with any filter fire correctly: // - Start: matches `once` handlers (at start_block) and initialization handlers // - End: matches unfiltered and `polling` handlers + ensure!( + number <= i32::MAX as u64, + "block number {} exceeds i32::MAX", + number + ); let block_ptr = BlockPtr::new(hash.into(), number as i32); triggers.push(EthereumTrigger::Block( block_ptr.clone(), @@ -256,11 +261,13 @@ pub fn json_to_sol_value(sol_type: &DynSolType, value: &serde_json::Value) -> Re DynSolType::Uint(bits) => { let n = match value { // String values support both decimal and "0x"-prefixed hex. - serde_json::Value::String(s) => U256::from_str_radix( - s.trim_start_matches("0x"), - if s.starts_with("0x") { 16 } else { 10 }, - ) - .context("Invalid uint")?, + serde_json::Value::String(s) => { + let (digits, radix) = match s.strip_prefix("0x") { + Some(hex) => (hex, 16), + None => (s.as_str(), 10), + }; + U256::from_str_radix(digits, radix).context("Invalid uint")? + } // JSON numbers are limited to u64 range — use strings for larger values. serde_json::Value::Number(n) => U256::from(n.as_u64().ok_or_else(|| { anyhow!("uint value {} does not fit in u64, use a string instead", n) @@ -270,18 +277,18 @@ pub fn json_to_sol_value(sol_type: &DynSolType, value: &serde_json::Value) -> Re Ok(DynSolValue::Uint(n, *bits)) } DynSolType::Int(bits) => { - // Signed integers use two's complement representation in U256. - // Negative values: negate via !abs + 1 (two's complement). let n = match value { serde_json::Value::String(s) => { - let is_negative = s.starts_with('-'); - let s_clean = s.trim_start_matches('-'); - let abs = U256::from_str_radix( - s_clean.trim_start_matches("0x"), - if s_clean.starts_with("0x") { 16 } else { 10 }, - ) - .context("Invalid int")?; - if is_negative { + let (is_neg, s_abs) = match s.strip_prefix('-') { + Some(rest) => (true, rest), + None => (false, s.as_str()), + }; + let (digits, radix) = match s_abs.strip_prefix("0x") { + Some(hex) => (hex, 16), + None => (s_abs, 10), + }; + let abs = U256::from_str_radix(digits, radix).context("Invalid int")?; + if is_neg { !abs + U256::from(1) // Two's complement negation } else { abs @@ -289,13 +296,16 @@ pub fn json_to_sol_value(sol_type: &DynSolType, value: &serde_json::Value) -> Re } serde_json::Value::Number(n) => { if let Some(i) = n.as_i64() { - if i < 0 { - !U256::from((-i) as u64) + U256::from(1) - } else { - U256::from(i as u64) - } + // into_raw() gives the two's complement U256 representation. + // Handles i64::MIN correctly (unlike `-i as u64` which overflows). + I256::try_from(i).unwrap().into_raw() } else { - U256::from(n.as_u64().unwrap_or(0)) + U256::from(n.as_u64().ok_or_else(|| { + anyhow!( + "int value {} not representable as u64, use a string instead", + n + ) + })?) } } _ => return Err(anyhow!("Expected string or number for int")), @@ -310,7 +320,8 @@ pub fn json_to_sol_value(sol_type: &DynSolType, value: &serde_json::Value) -> Re let s = value .as_str() .ok_or_else(|| anyhow!("Expected string for bytes"))?; - let bytes = hex::decode(s.trim_start_matches("0x")).context("Invalid hex")?; + let hex_str = s.strip_prefix("0x").unwrap_or(s); + let bytes = hex::decode(hex_str).context("Invalid hex")?; Ok(DynSolValue::Bytes(bytes)) } DynSolType::String => { @@ -321,7 +332,8 @@ pub fn json_to_sol_value(sol_type: &DynSolType, value: &serde_json::Value) -> Re let s = value .as_str() .ok_or_else(|| anyhow!("Expected string for bytes{}", len))?; - let bytes = hex::decode(s.trim_start_matches("0x")).context("Invalid hex")?; + let hex_str = s.strip_prefix("0x").unwrap_or(s); + let bytes = hex::decode(hex_str).context("Invalid hex")?; if bytes.len() > *len { return Err(anyhow!( "bytes{}: got {} bytes, expected at most {}", @@ -336,6 +348,49 @@ pub fn json_to_sol_value(sol_type: &DynSolType, value: &serde_json::Value) -> Re padded[..bytes.len()].copy_from_slice(&bytes); Ok(DynSolValue::FixedBytes(B256::from(padded), *len)) } + DynSolType::Array(inner) => { + let arr = value + .as_array() + .ok_or_else(|| anyhow!("Expected JSON array for array type"))?; + let elements: Vec = arr + .iter() + .map(|elem| json_to_sol_value(inner, elem)) + .collect::>()?; + Ok(DynSolValue::Array(elements)) + } + DynSolType::FixedArray(inner, size) => { + let arr = value + .as_array() + .ok_or_else(|| anyhow!("Expected JSON array for fixed array type"))?; + ensure!( + arr.len() == *size, + "Fixed array expects {} elements, got {}", + size, + arr.len() + ); + let elements: Vec = arr + .iter() + .map(|elem| json_to_sol_value(inner, elem)) + .collect::>()?; + Ok(DynSolValue::FixedArray(elements)) + } + DynSolType::Tuple(types) => { + let arr = value + .as_array() + .ok_or_else(|| anyhow!("Expected JSON array for tuple type (positional)"))?; + ensure!( + arr.len() == types.len(), + "Tuple expects {} elements, got {}", + types.len(), + arr.len() + ); + let values: Vec = types + .iter() + .zip(arr.iter()) + .map(|(ty, val)| json_to_sol_value(ty, val)) + .collect::>()?; + Ok(DynSolValue::Tuple(values)) + } _ => Err(anyhow!("Unsupported type: {:?}", sol_type)), } } From 5946cc7b8ede5f401eee4a96c13e5890f25b2003 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Mon, 16 Feb 2026 22:50:51 +0200 Subject: [PATCH 19/34] gnd(test): Recurse into subdirectories when discovering test files - discover_test_files now walks subdirectories recursively - Skip entries starting with non-alphanumeric chars (.hidden, _fixture) --- gnd/src/commands/test/schema.rs | 42 ++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/gnd/src/commands/test/schema.rs b/gnd/src/commands/test/schema.rs index 746399a4274..a321f01ac79 100644 --- a/gnd/src/commands/test/schema.rs +++ b/gnd/src/commands/test/schema.rs @@ -76,7 +76,7 @@ pub struct TestBlock { pub timestamp: Option, /// Base fee per gas (EIP-1559). If omitted, defaults to None (pre-EIP-1559 blocks). - /// Specified as a decimal string to handle large values (e.g., "15000000000"). + /// Specified as a decimal string, parsed as u64 (e.g., "15000000000"). #[serde(default, rename = "baseFeePerGas")] pub base_fee_per_gas: Option, @@ -159,7 +159,6 @@ pub struct MockEthCall { /// A GraphQL assertion to validate indexed entity state. #[derive(Debug, Clone, Deserialize)] -#[allow(dead_code)] pub struct Assertion { /// GraphQL query string. Example: `"{ transfer(id: \"1\") { from to value } }"` pub query: String, @@ -250,9 +249,11 @@ pub fn parse_test_file(path: &Path) -> anyhow::Result { .map_err(|e| anyhow::anyhow!("Failed to parse test file {}: {}", path.display(), e)) } -/// Discover test files in a directory. +/// Discover test files in a directory (recursive). /// -/// Matches `*.json` and `*.test.json` files (non-recursive). +/// Matches `*.json` and `*.test.json` files. Recurses into subdirectories. +/// Entries whose name starts with a non-alphanumeric character (e.g., `.hidden`, +/// `_fixture`) are skipped for both files and directories. /// Returns paths sorted alphabetically for deterministic execution order. pub fn discover_test_files(dir: &Path) -> anyhow::Result> { let mut files = Vec::new(); @@ -261,21 +262,34 @@ pub fn discover_test_files(dir: &Path) -> anyhow::Result> { return Ok(files); } + discover_recursive(dir, &mut files)?; + files.sort(); + Ok(files) +} + +/// Recursively walk `dir`, collecting JSON test files and descending into subdirectories. +/// +/// Skips entries whose name starts with a non-alphanumeric character (e.g., `.hidden`, `_fixture`). +fn discover_recursive(dir: &Path, files: &mut Vec) -> anyhow::Result<()> { for entry in std::fs::read_dir(dir)? { let entry = entry?; let path = entry.path(); + let name = match path.file_name().and_then(|n| n.to_str()) { + Some(n) => n, + None => continue, + }; + + // Skip entries whose name starts with a non-alphanumeric character. + if !name.starts_with(|c: char| c.is_alphanumeric()) { + continue; + } - if path.is_file() { - if let Some(name) = path.file_name().and_then(|n| n.to_str()) { - if (name.ends_with(".test.json") || name.ends_with(".json")) - && !name.starts_with('.') - { - files.push(path); - } - } + if path.is_dir() { + discover_recursive(&path, files)?; + } else if path.is_file() && (name.ends_with(".test.json") || name.ends_with(".json")) { + files.push(path); } } - files.sort(); - Ok(files) + Ok(()) } From 454519b9537a4c0536300352543cb3b54007fa9e Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Mon, 16 Feb 2026 22:51:10 +0200 Subject: [PATCH 20/34] gnd(test): Validate eth_call parameter and return value counts Add upfront arity checks in encode_function_call and encode_return_value so mismatches produce clear errors instead of silently truncating. Also simplify redundant .with_context() wrappers in populate_single_call. --- gnd/src/commands/test/eth_calls.rs | 47 ++++++++++++++---------------- 1 file changed, 22 insertions(+), 25 deletions(-) diff --git a/gnd/src/commands/test/eth_calls.rs b/gnd/src/commands/test/eth_calls.rs index 97ca3ad0a5d..5dd9651a272 100644 --- a/gnd/src/commands/test/eth_calls.rs +++ b/gnd/src/commands/test/eth_calls.rs @@ -65,6 +65,15 @@ fn encode_function_call(function_sig: &str, params: &[serde_json::Value]) -> Res ) })?; + if params.len() != function.inputs.len() { + return Err(anyhow!( + "Parameter count mismatch for '{}': expected {} parameters, got {}", + function_sig, + function.inputs.len(), + params.len() + )); + } + let args: Vec<_> = params .iter() .zip(&function.inputs) @@ -102,6 +111,15 @@ fn encode_return_value(function_sig: &str, returns: &[serde_json::Value]) -> Res ) })?; + if returns.len() != function.outputs.len() { + return Err(anyhow!( + "Return value count mismatch for '{}': expected {} return values, got {}", + function_sig, + function.outputs.len(), + returns.len() + )); + } + let output_values: Vec<_> = returns .iter() .zip(&function.outputs) @@ -179,43 +197,22 @@ async fn populate_single_call( block_ptr: &BlockPtr, eth_call: &MockEthCall, ) -> Result<()> { - let address: Address = eth_call - .address - .parse() - .with_context(|| format!("Invalid contract address: {}", eth_call.address))?; + let address: Address = eth_call.address.parse()?; - let encoded_call = - encode_function_call(ð_call.function, ð_call.params).with_context(|| { - format!( - "Failed to encode call for {}::{}", - eth_call.address, eth_call.function - ) - })?; + let encoded_call = encode_function_call(ð_call.function, ð_call.params)?; let request = call::Request::new(address, encoded_call, 0); let retval = if eth_call.reverts { call::Retval::Null } else { - let encoded_return = encode_return_value(ð_call.function, ð_call.returns) - .with_context(|| { - format!( - "Failed to encode return value for {}::{}", - eth_call.address, eth_call.function - ) - })?; + let encoded_return = encode_return_value(ð_call.function, ð_call.returns)?; call::Retval::Value(encoded_return.into()) }; chain_store .set_call(logger, request, block_ptr.clone(), retval) - .await - .with_context(|| { - format!( - "Failed to cache eth_call for {}::{}", - eth_call.address, eth_call.function - ) - })?; + .await?; Ok(()) } From aea3be640146e8a6e6afb81f186b842c5f04f060 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Mon, 16 Feb 2026 22:51:29 +0200 Subject: [PATCH 21/34] gnd(test): Ensure subgraph cleanup and remove FilterStoreEventEndedDrain - Remove FilterStoreEventEndedDrain log filter and unused logger field - Always call stop_subgraph after test, even on error - Warn when a test has blocks but no assertions - Add block number overflow check against i32::MAX --- gnd/src/commands/test/runner.rs | 81 ++++++++++++--------------------- 1 file changed, 30 insertions(+), 51 deletions(-) diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index 5a66fe43d4c..92ccd99fd1f 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -31,7 +31,7 @@ use super::schema::{TestFile, TestResult}; use super::trigger::build_blocks_with_triggers; use super::TestOpt; use crate::manifest::{load_manifest, Manifest}; -use anyhow::{anyhow, Context, Result}; +use anyhow::{anyhow, ensure, Context, Result}; use graph::amp::FlightClient; use graph::blockchain::block_stream::BlockWithTriggers; use graph::blockchain::{BlockPtr, BlockchainMap, ChainIdentifier}; @@ -51,7 +51,7 @@ use graph::prelude::{ DeploymentHash, LoggerFactory, NodeId, SubgraphCountMetric, SubgraphName, SubgraphRegistrar, SubgraphStore as SubgraphStoreTrait, SubgraphVersionSwitchingMode, }; -use graph::slog::{info, o, Drain, Logger, OwnedKVList, Record}; +use graph::slog::{info, o, Drain, Logger}; use graph_chain_ethereum::chain::EthereumRuntimeAdapterBuilder; use graph_chain_ethereum::network::{EthereumNetworkAdapter, EthereumNetworkAdapters}; use graph_chain_ethereum::{ @@ -74,38 +74,6 @@ use pgtemp::PgTempDBBuilder; /// Node ID used for all test deployments. Visible in store metadata. const NODE_ID: &str = "gnd-test"; -// ============ Test Infrastructure Types ============ - -/// A slog drain that suppresses the "Store event stream ended" error message. -/// -/// When a test completes and the pgtemp database is dropped, the store's -/// background subscription listener loses its connection and logs an error. -/// This is expected during cleanup and not a real problem, so we filter it -/// out to avoid confusing test output. All other log messages pass through. -/// -/// NOTE: String-based filtering is fragile - if the error message changes upstream, -/// the filter breaks silently. Consider structured logging/error type matching. -/// See: gnd-test.md "Next Iteration Improvements" -struct FilterStoreEventEndedDrain { - inner: D, -} - -impl Drain for FilterStoreEventEndedDrain { - type Ok = Option; - type Err = D::Err; - - fn log(&self, record: &Record, values: &OwnedKVList) -> Result { - if record - .msg() - .to_string() - .contains("Store event stream ended") - { - return Ok(None); - } - self.inner.log(record, values).map(Some) - } -} - /// Bundles the store infrastructure needed for test execution. /// /// Created once per test and holds the connection pools, chain store, @@ -128,8 +96,6 @@ struct TestStores { /// the store (for querying sync status), the deployment locator, /// and the GraphQL runner (for assertions). pub(super) struct TestContext { - #[allow(dead_code)] - pub(super) logger: Logger, /// Starts/stops subgraph indexing. pub(super) provider: Arc, /// Used to check sync progress and health status. @@ -196,6 +162,15 @@ pub async fn run_single_test(opt: &TestOpt, test_file: &TestFile) -> Result Result Result 0 { use graph::prelude::alloy::primitives::keccak256; let hash = keccak256((min_start_block - 1).to_be_bytes()); + ensure!( + min_start_block - 1 <= i32::MAX as u64, + "block number {} exceeds i32::MAX", + min_start_block - 1 + ); Some(BlockPtr::new(hash.into(), (min_start_block - 1) as i32)) } else { None @@ -301,8 +276,7 @@ pub async fn run_single_test(opt: &TestOpt, test_file: &TestFile) -> Result Result Result Date: Mon, 16 Feb 2026 22:51:49 +0200 Subject: [PATCH 22/34] gnd(test): Validate datasource name to prevent shell injection in Docker mode Add alphanumeric/hyphen/underscore validation before interpolating the datasource name into Docker's sh -c command. Also simplify redundant .with_context() wrappers. --- gnd/src/commands/test/matchstick.rs | 39 +++++++++++++---------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/gnd/src/commands/test/matchstick.rs b/gnd/src/commands/test/matchstick.rs index ccb18d4e97a..ff5a27a6d1e 100644 --- a/gnd/src/commands/test/matchstick.rs +++ b/gnd/src/commands/test/matchstick.rs @@ -140,8 +140,7 @@ fn write_version_cache(path: &Path, version: &str) -> Result<()> { /// - For versions > 0.5.4: simplified platform names (macos-12, linux-22) /// - For versions <= 0.5.4: legacy platform names with more OS-version granularity fn get_platform(version: &str) -> Result { - let ver = semver::Version::parse(version) - .with_context(|| format!("Invalid Matchstick version: {version}"))?; + let ver = semver::Version::parse(version)?; let cutoff = semver::Version::new(0, 5, 4); let os = std::env::consts::OS; @@ -229,40 +228,26 @@ async fn download_matchstick_binary(version: &str, platform: &str, force: bool) return Ok(bin_path); } - std::fs::create_dir_all(&bin_dir) - .with_context(|| format!("Failed to create directory: {}", bin_dir.display()))?; + std::fs::create_dir_all(&bin_dir)?; let url = format!("{MATCHSTICK_DOWNLOAD_BASE}/{version}/{platform}"); step(Step::Load, &format!("Downloading Matchstick {version}")); let client = reqwest::Client::builder().user_agent("gnd-cli").build()?; - let resp = client - .get(&url) - .send() - .await - .with_context(|| format!("Failed to download from {url}"))? - .error_for_status() - .with_context(|| { - format!( - "Download failed for {url}.\n\ - Try Docker mode instead: gnd test --matchstick -d" - ) - })?; + let resp = client.get(&url).send().await?.error_for_status()?; let bytes = resp .bytes() .await .context("Failed to read download response")?; - std::fs::write(&bin_path, &bytes) - .with_context(|| format!("Failed to write binary to {}", bin_path.display()))?; + std::fs::write(&bin_path, &bytes)?; #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; - std::fs::set_permissions(&bin_path, std::fs::Permissions::from_mode(0o755)) - .with_context(|| format!("Failed to set permissions on {}", bin_path.display()))?; + std::fs::set_permissions(&bin_path, std::fs::Permissions::from_mode(0o755))?; } step(Step::Done, &format!("Downloaded to {}", bin_path.display())); @@ -341,6 +326,17 @@ async fn run_docker_tests(opt: &TestOpt) -> Result<()> { test_args.push_str(" -r"); } if let Some(datasource) = &opt.datasource { + // Validate datasource name to prevent shell injection via Docker's + // `sh -c "matchstick $ARGS"` expansion. + if !datasource + .chars() + .all(|c| c.is_alphanumeric() || c == '_' || c == '-') + { + anyhow::bail!( + "Invalid datasource name '{}': must contain only alphanumeric characters, hyphens, or underscores", + datasource + ); + } test_args.push_str(&format!(" {}", datasource)); } @@ -442,8 +438,7 @@ CMD ["sh", "-c", "matchstick $ARGS"] version = version ); - fs::write(path, dockerfile_content) - .with_context(|| format!("Failed to write Dockerfile to {}", path.display()))?; + fs::write(path, dockerfile_content)?; step(Step::Write, &format!("Created {}", path.display())); Ok(()) } From 5190bb092a380be4cf5005e030b2522862785cd3 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Mon, 16 Feb 2026 22:52:03 +0200 Subject: [PATCH 23/34] gnd(test): Replace unwrap/unimplemented with proper error handling - block_stream: .unwrap() -> .expect() on mutex lock - noop: unimplemented!() -> Err(anyhow!(...)) - mod: Fail early with bail! on missing test file --- gnd/src/commands/test/block_stream.rs | 12 ++++++++++-- gnd/src/commands/test/mod.rs | 3 +-- gnd/src/commands/test/noop.rs | 4 +++- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/gnd/src/commands/test/block_stream.rs b/gnd/src/commands/test/block_stream.rs index 68684d36291..e2cedbf821b 100644 --- a/gnd/src/commands/test/block_stream.rs +++ b/gnd/src/commands/test/block_stream.rs @@ -142,7 +142,11 @@ impl BlockStreamBuilder for MutexBlockStreamBuilder { filter: Arc<::TriggerFilter>, unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, ) -> anyhow::Result>> { - let builder = self.0.lock().unwrap().clone(); + let builder = self + .0 + .lock() + .expect("block stream builder lock poisoned") + .clone(); builder .build_firehose( chain, @@ -166,7 +170,11 @@ impl BlockStreamBuilder for MutexBlockStreamBuilder { filter: Arc>, unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, ) -> anyhow::Result>> { - let builder = self.0.lock().unwrap().clone(); + let builder = self + .0 + .lock() + .expect("block stream builder lock poisoned") + .clone(); builder .build_polling( chain, diff --git a/gnd/src/commands/test/mod.rs b/gnd/src/commands/test/mod.rs index 8cca54cf13b..4f6f9db7075 100644 --- a/gnd/src/commands/test/mod.rs +++ b/gnd/src/commands/test/mod.rs @@ -226,8 +226,7 @@ fn resolve_test_paths(paths: &[PathBuf]) -> Result> { if in_default_dir.exists() { files.push(in_default_dir); } else { - // Keep the original path — parse_test_file will report the error. - files.push(path.clone()); + anyhow::bail!("Test file not found: {}", path.display()); } } } diff --git a/gnd/src/commands/test/noop.rs b/gnd/src/commands/test/noop.rs index 70faa0525b5..ce724125ca4 100644 --- a/gnd/src/commands/test/noop.rs +++ b/gnd/src/commands/test/noop.rs @@ -41,7 +41,9 @@ impl BlockRefetcher for StaticBlockRefetcher { _logger: &Logger, _cursor: FirehoseCursor, ) -> Result { - unimplemented!("StaticBlockRefetcher should never be called") + Err(anyhow::anyhow!( + "StaticBlockRefetcher::get_block should never be called — block refetching is disabled in test mode" + )) } } From 678b963dba79d6cd3a8026afc2caa34f0120b4aa Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Mon, 16 Feb 2026 23:11:45 +0200 Subject: [PATCH 24/34] gnd(test): Fix ServerAddress initialization Signed-off-by: Maksim Dimitrov --- gnd/src/commands/test/runner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index 92ccd99fd1f..e3bc8f8c386 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -606,7 +606,7 @@ async fn setup_context( // for manifest loading (FileLinkResolver handles that). let ipfs_metrics = IpfsMetrics::new(&mock_registry); let ipfs_client = Arc::new( - IpfsRpcClient::new_unchecked(ServerAddress::local_rpc_api(), ipfs_metrics, logger) + IpfsRpcClient::new_unchecked(ServerAddress::test_rpc_api(), ipfs_metrics, logger) .context("Failed to create IPFS client")?, ); From 32f36ff0fce81eaeb4b8eb93987f3605e690e77a Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Tue, 17 Feb 2026 10:47:52 +0200 Subject: [PATCH 25/34] gnd(test): Add verbose logging flag and extract manifest loading Add `-v` / `--verbose` flag with count semantics for controlling graph-node log verbosity during tests (-v=info, -vv=debug, -vvv=trace). GRAPH_LOG env var always takes precedence when set. Extract manifest loading into `ManifestInfo` struct loaded once per run, avoiding redundant parsing across tests. Thread a single logger through setup_stores/setup_chain instead of creating ad-hoc loggers. --- gnd/src/commands/test/mod.rs | 10 +- gnd/src/commands/test/runner.rs | 218 +++++++++++++++++++------------- 2 files changed, 142 insertions(+), 86 deletions(-) diff --git a/gnd/src/commands/test/mod.rs b/gnd/src/commands/test/mod.rs index 4f6f9db7075..0ccb92acd04 100644 --- a/gnd/src/commands/test/mod.rs +++ b/gnd/src/commands/test/mod.rs @@ -106,6 +106,11 @@ pub struct TestOpt { /// Specific data source to test (Matchstick only) #[clap(long, requires = "matchstick")] pub datasource: Option, + + /// Increase graph-node log verbosity (-v info, -vv debug, -vvv trace). + /// Overridden by GRAPH_LOG env var when set. + #[clap(short = 'v', long, action = clap::ArgAction::Count)] + pub verbose: u8, } /// Entry point for the `gnd test` command. @@ -144,6 +149,9 @@ pub async fn run_test(opt: TestOpt) -> Result<()> { opt.tests.clone() }; + step(Step::Load, "Loading manifest"); + let manifest_info = runner::load_manifest_info(&opt)?; + step(Step::Load, "Discovering test files"); let test_files = resolve_test_paths(&tests)?; @@ -178,7 +186,7 @@ pub async fn run_test(opt: TestOpt) -> Result<()> { // Run the test: set up infra, index blocks, check assertions. // Each test gets a fresh database so tests are fully isolated. - match runner::run_single_test(&opt, &test_file).await { + match runner::run_single_test(&opt, &manifest_info, &test_file).await { Ok(result) => { output::print_test_result(&test_file.name, &result); if result.is_passed() { diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index e3bc8f8c386..62cb82176b0 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -51,7 +51,7 @@ use graph::prelude::{ DeploymentHash, LoggerFactory, NodeId, SubgraphCountMetric, SubgraphName, SubgraphRegistrar, SubgraphStore as SubgraphStoreTrait, SubgraphVersionSwitchingMode, }; -use graph::slog::{info, o, Drain, Logger}; +use graph::slog::{info, o, Logger}; use graph_chain_ethereum::chain::EthereumRuntimeAdapterBuilder; use graph_chain_ethereum::network::{EthereumNetworkAdapter, EthereumNetworkAdapters}; use graph_chain_ethereum::{ @@ -74,6 +74,34 @@ use pgtemp::PgTempDBBuilder; /// Node ID used for all test deployments. Visible in store metadata. const NODE_ID: &str = "gnd-test"; +/// Build a logger based on the `-v` verbosity flag. +/// +/// When the `GRAPH_LOG` environment variable is set it always takes precedence +/// (this is the existing graph-node convention). Otherwise: +/// +/// | Flag | Level | +/// |----------|-------| +/// | *(none)* | Off (discard) — only test pass/fail output via `println!` | +/// | `-v` | Info | +/// | `-vv` | Debug | +/// | `-vvv` | Trace | +fn make_test_logger(verbose: u8) -> Logger { + // GRAPH_LOG env var always wins — use the standard graph-node logger + // with debug enabled so GRAPH_LOG's own filtering is the sole authority. + if std::env::var("GRAPH_LOG").is_ok() { + return graph::log::logger(true); + } + + match verbose { + 0 => graph::log::discard(), + 1 => graph::log::logger_with_levels(false, None), + 2 => graph::log::logger_with_levels(true, None), + // "trace" is parsed by slog_envlogger::LogBuilder::parse() as a global + // level filter — equivalent to setting GRAPH_LOG=trace. + _ => graph::log::logger_with_levels(true, Some("trace")), + } +} + /// Bundles the store infrastructure needed for test execution. /// /// Created once per test and holds the connection pools, chain store, @@ -106,7 +134,88 @@ pub(super) struct TestContext { pub(super) graphql_runner: Arc>, } -// ============ Test Execution ============ +// ============ Manifest Loading ============ + +/// Pre-computed manifest data shared across all tests in a run. +/// +/// Loaded once at the start of `run_test` and passed to each test, +/// avoiding redundant manifest parsing and noisy log output. +/// +/// All tests share the same `hash` (derived from the built manifest path). +/// `cleanup()` removes prior deployments with that hash before each test, +/// so tests MUST run sequentially. If parallelism is ever added, each test +/// will need a unique hash (e.g., by incorporating the test name). +pub(super) struct ManifestInfo { + /// The build directory containing compiled WASM, schema, and built manifest. + pub build_dir: PathBuf, + /// Network name from the manifest (e.g., "mainnet"). + pub network_name: ChainName, + /// Minimum `startBlock` across all data sources. + pub min_start_block: u64, + /// Override for on-chain block validation when startBlock > 0. + pub start_block_override: Option, + /// Deployment hash derived from the built manifest path. + pub hash: DeploymentHash, +} + +/// Load and pre-compute manifest data for the test run. +/// +/// Resolves paths relative to the manifest location, loads the built manifest, +/// extracts the network name and start block, and computes the deployment hash. +/// Called once before any tests run. +pub(super) fn load_manifest_info(opt: &TestOpt) -> Result { + let manifest_dir = opt + .manifest + .parent() + .map(|p| p.to_path_buf()) + .unwrap_or_else(|| PathBuf::from(".")); + + let build_dir = manifest_dir.join("build"); + + let manifest_filename = opt + .manifest + .file_name() + .and_then(|s| s.to_str()) + .unwrap_or("subgraph.yaml"); + let built_manifest_path = build_dir.join(manifest_filename); + let built_manifest_path = built_manifest_path + .canonicalize() + .context("Failed to resolve built manifest path — did you run 'gnd build'?")?; + + let manifest = load_manifest(&built_manifest_path)?; + + let network_name: ChainName = extract_network_from_manifest(&manifest)?.into(); + let min_start_block = extract_start_block_from_manifest(&manifest)?; + + let start_block_override = if min_start_block > 0 { + use graph::prelude::alloy::primitives::keccak256; + let hash = keccak256((min_start_block - 1).to_be_bytes()); + ensure!( + min_start_block - 1 <= i32::MAX as u64, + "block number {} exceeds i32::MAX", + min_start_block - 1 + ); + Some(BlockPtr::new(hash.into(), (min_start_block - 1) as i32)) + } else { + None + }; + + let deployment_id = built_manifest_path.display().to_string(); + let hash = DeploymentHash::new(&deployment_id).map_err(|_| { + anyhow!( + "Failed to create deployment hash from path: {}", + deployment_id + ) + })?; + + Ok(ManifestInfo { + build_dir, + network_name, + min_start_block, + start_block_override, + hash, + }) +} /// Extract the network name (e.g., "mainnet") from the first data source in a manifest. /// @@ -148,15 +257,24 @@ fn extract_start_block_from_manifest(manifest: &Manifest) -> Result { .unwrap_or(0)) } +// ============ Test Execution ============ + /// Run a single test file end-to-end. /// /// This is the main entry point called from `mod.rs` for each test file. /// It creates isolated infrastructure (database, stores, chain), indexes /// the mock blocks, and checks the GraphQL assertions. /// +/// The `manifest_info` is loaded once and shared across all tests to avoid +/// redundant manifest parsing. +/// /// Returns `TestResult::Passed` if all assertions match, or `TestResult::Failed` /// with details about handler errors or assertion mismatches. -pub async fn run_single_test(opt: &TestOpt, test_file: &TestFile) -> Result { +pub async fn run_single_test( + opt: &TestOpt, + manifest_info: &ManifestInfo, + test_file: &TestFile, +) -> Result { // Empty test with no blocks and no assertions is trivially passing. if test_file.blocks.is_empty() && test_file.assertions.is_empty() { return Ok(TestResult::Passed { assertions: vec![] }); @@ -171,80 +289,22 @@ pub async fn run_single_test(opt: &TestOpt, test_file: &TestFile) -> Result 0, - // graph-node normally validates the block exists on-chain, but our test - // environment has no real chain. We provide a start_block_override to - // bypass validation, and also default test block numbering to start at - // the manifest's startBlock so blocks land in the indexed range. - let min_start_block = extract_start_block_from_manifest(&manifest)?; - // Convert test JSON blocks into graph-node's internal block format. // Default block numbering starts at the manifest's startBlock so that // test blocks without explicit numbers fall in the subgraph's indexed range. - let blocks = build_blocks_with_triggers(test_file, min_start_block)?; - - // Build a start_block_override when startBlock > 0 to bypass on-chain - // block validation (which would fail against the dummy firehose endpoint). - // This mirrors what resolve_start_block() computes: a BlockPtr for - // block (min_start_block - 1). - let start_block_override = if min_start_block > 0 { - use graph::prelude::alloy::primitives::keccak256; - let hash = keccak256((min_start_block - 1).to_be_bytes()); - ensure!( - min_start_block - 1 <= i32::MAX as u64, - "block number {} exceeds i32::MAX", - min_start_block - 1 - ); - Some(BlockPtr::new(hash.into(), (min_start_block - 1) as i32)) - } else { - None - }; + let blocks = build_blocks_with_triggers(test_file, manifest_info.min_start_block)?; // Create a temporary database for this test. The `_temp_db` handle must // be kept alive for the duration of the test — dropping it destroys the database. - let (db_url, _temp_db) = get_database_url(opt, &build_dir)?; + let (db_url, _temp_db) = get_database_url(opt, &manifest_info.build_dir)?; - let logger = graph::log::logger(false).new(o!("test" => test_file.name.clone())); + let logger = make_test_logger(opt.verbose).new(o!("test" => test_file.name.clone())); // Initialize stores with the network name from the manifest. - let stores = setup_stores(&logger, &db_url, &network_name).await?; + let stores = setup_stores(&logger, &db_url, &manifest_info.network_name).await?; // Create the mock Ethereum chain that will feed our pre-built blocks. - let chain = setup_chain(&test_file.name, blocks.clone(), &stores).await?; - - // Use the built manifest path as the deployment hash, matching gnd dev's pattern. - // FileLinkResolver resolves the hash back to the filesystem path when loading. - let deployment_id = built_manifest_path.display().to_string(); - let hash = DeploymentHash::new(&deployment_id).map_err(|_| { - anyhow!( - "Failed to create deployment hash from path: {}", - deployment_id - ) - })?; + let chain = setup_chain(&logger, blocks.clone(), &stores).await?; // Sanitize test name for use as a subgraph name (alphanumeric + hyphens + underscores). let test_name_sanitized = test_file @@ -261,10 +321,10 @@ pub async fn run_single_test(opt: &TestOpt, test_file: &TestFile) -> Result Result { @@ -418,20 +478,9 @@ ingestor = "default" let mock_registry = Arc::new(MetricsRegistry::mock()); let node_id = NodeId::new(NODE_ID).unwrap(); - // Filter out the "Store event stream ended" error that fires during - // cleanup when pgtemp drops the database out from under the listener. - let base_logger = graph::log::logger(false); - let store_logger = Logger::root(base_logger.fuse(), o!()); - // StoreBuilder runs migrations and creates connection pools. - let store_builder = StoreBuilder::new( - &store_logger, - &node_id, - &config, - None, - mock_registry.clone(), - ) - .await; + let store_builder = + StoreBuilder::new(logger, &node_id, &config, None, mock_registry.clone()).await; let chain_head_listener = store_builder.chain_head_update_listener(); let network_identifiers: Vec = vec![network_name.clone()]; @@ -465,11 +514,10 @@ ingestor = "default" /// - `StaticBlockRefetcher`: no-op since there are no reorgs in tests /// - A dummy firehose endpoint (never actually connected to) async fn setup_chain( - test_name: &str, + logger: &Logger, blocks: Vec>, stores: &TestStores, ) -> Result> { - let logger = graph::log::logger(false).new(o!("test" => test_name.to_string())); let mock_registry = Arc::new(MetricsRegistry::mock()); let logger_factory = LoggerFactory::new(logger.clone(), None, mock_registry.clone()); From 8d265360be780d20cc0a7d2768fd7035f575c843 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Tue, 17 Feb 2026 15:07:22 +0200 Subject: [PATCH 26/34] gnd(test): Add integration test harness with pgtemp fixes Add gnd_test integration test suite with fixture subgraph and test cases for blocks, transfers, templates, and expected failures. Fix pgtemp Unix socket path overflow on macOS by overriding unix_socket_directories to /tmp. Reduce default pool_size to 2. --- gnd/Cargo.toml | 4 + gnd/src/commands/test/runner.rs | 8 +- .../gnd_test/subgraph/abis/ERC20.json | 222 ++++++++++++++++++ .../gnd_test/subgraph/abis/TokenFactory.json | 28 +++ .../fixtures/gnd_test/subgraph/package.json | 18 ++ .../fixtures/gnd_test/subgraph/schema.graphql | 33 +++ .../fixtures/gnd_test/subgraph/src/blocks.ts | 23 ++ .../fixtures/gnd_test/subgraph/src/factory.ts | 21 ++ .../fixtures/gnd_test/subgraph/src/token.ts | 86 +++++++ .../fixtures/gnd_test/subgraph/subgraph.yaml | 124 ++++++++++ .../gnd_test/subgraph/tests/blocks.json | 43 ++++ .../gnd_test/subgraph/tests/failing.json | 12 + .../gnd_test/subgraph/tests/templates.json | 80 +++++++ .../gnd_test/subgraph/tests/transfer.json | 65 +++++ gnd/tests/gnd_test.rs | 197 ++++++++++++++++ justfile | 12 + 16 files changed, 975 insertions(+), 1 deletion(-) create mode 100644 gnd/tests/fixtures/gnd_test/subgraph/abis/ERC20.json create mode 100644 gnd/tests/fixtures/gnd_test/subgraph/abis/TokenFactory.json create mode 100644 gnd/tests/fixtures/gnd_test/subgraph/package.json create mode 100644 gnd/tests/fixtures/gnd_test/subgraph/schema.graphql create mode 100644 gnd/tests/fixtures/gnd_test/subgraph/src/blocks.ts create mode 100644 gnd/tests/fixtures/gnd_test/subgraph/src/factory.ts create mode 100644 gnd/tests/fixtures/gnd_test/subgraph/src/token.ts create mode 100644 gnd/tests/fixtures/gnd_test/subgraph/subgraph.yaml create mode 100644 gnd/tests/fixtures/gnd_test/subgraph/tests/blocks.json create mode 100644 gnd/tests/fixtures/gnd_test/subgraph/tests/failing.json create mode 100644 gnd/tests/fixtures/gnd_test/subgraph/tests/templates.json create mode 100644 gnd/tests/fixtures/gnd_test/subgraph/tests/transfer.json create mode 100644 gnd/tests/gnd_test.rs diff --git a/gnd/Cargo.toml b/gnd/Cargo.toml index 69137b6eb11..a4cec7e5c9b 100644 --- a/gnd/Cargo.toml +++ b/gnd/Cargo.toml @@ -15,6 +15,10 @@ path = "tests/cli_commands.rs" name = "codegen_verification" path = "tests/codegen_verification.rs" +[[test]] +name = "gnd_test" +path = "tests/gnd_test.rs" + [dependencies] # Core graph dependencies graph = { path = "../graph" } diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index 62cb82176b0..4bdb6b9f00b 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -407,11 +407,17 @@ fn get_database_url(opt: &TestOpt, build_dir: &Path) -> Result<(String, Option = LazyLock::new(|| { + let temp_dir = TempDir::new().expect("Failed to create temp directory"); + let subgraph_dir = temp_dir.path().join("subgraph"); + fs::create_dir_all(&subgraph_dir).unwrap(); + + let fixture = fixture_path(); + assert!( + fixture.exists(), + "Fixture not found at {}", + fixture.display() + ); + + copy_dir_recursive(&fixture, &subgraph_dir).expect("Failed to copy fixture to temp directory"); + + // Install npm dependencies (graph-ts, graph-cli) + let npm_output = Command::new("npm") + .arg("install") + .current_dir(&subgraph_dir) + .output() + .expect("Failed to run `npm install`. Is npm available?"); + + assert!( + npm_output.status.success(), + "npm install failed in fixture:\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&npm_output.stdout), + String::from_utf8_lossy(&npm_output.stderr), + ); + + verify_asc_available(); + + let gnd = verify_gnd_binary(); + let codegen_output = Command::new(&gnd) + .args(["codegen", "--skip-migrations"]) + .current_dir(&subgraph_dir) + .output() + .expect("Failed to run `gnd codegen`"); + + assert!( + codegen_output.status.success(), + "gnd codegen failed in fixture:\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&codegen_output.stdout), + String::from_utf8_lossy(&codegen_output.stderr), + ); + + (temp_dir, subgraph_dir) +}); + +/// Get the path to the gnd binary. +fn gnd_binary_path() -> PathBuf { + let manifest_dir = env!("CARGO_MANIFEST_DIR"); + PathBuf::from(manifest_dir) + .parent() + .unwrap() + .join("target") + .join("debug") + .join("gnd") +} + +/// Verify the gnd binary exists, panic with a helpful message if not. +fn verify_gnd_binary() -> PathBuf { + let gnd_path = gnd_binary_path(); + assert!( + gnd_path.exists(), + "gnd binary not found at {}. Run `cargo build -p gnd` first.", + gnd_path.display() + ); + gnd_path +} + +/// Get the path to the gnd_test fixture subgraph. +fn fixture_path() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("tests") + .join("fixtures") + .join("gnd_test") + .join("subgraph") +} + +/// Assert that `asc` (AssemblyScript compiler) is available in PATH. +fn verify_asc_available() { + let output = Command::new("asc") + .arg("--version") + .output() + .expect("Failed to execute `asc --version`. Is AssemblyScript installed? Run: npm install -g assemblyscript@0.19.23"); + + assert!( + output.status.success(), + "`asc --version` failed. Install AssemblyScript: npm install -g assemblyscript@0.19.23" + ); +} + +/// Copy a directory recursively. +fn copy_dir_recursive(src: &Path, dst: &Path) -> std::io::Result<()> { + for entry in WalkDir::new(src).min_depth(1) { + let entry = entry?; + let relative_path = entry.path().strip_prefix(src).unwrap(); + let dest_path = dst.join(relative_path); + + if entry.file_type().is_dir() { + fs::create_dir_all(&dest_path)?; + } else { + fs::copy(entry.path(), &dest_path)?; + } + } + Ok(()) +} + +/// Run `gnd test` with the given args in the given directory. +/// Returns the Output (status, stdout, stderr). +fn run_gnd_test(args: &[&str], cwd: &Path) -> std::process::Output { + let gnd = verify_gnd_binary(); + Command::new(&gnd) + .arg("test") + .args(args) + .current_dir(cwd) + .output() + .expect("Failed to execute gnd test") +} + +// ============================================================================ +// gnd test — run all fixture tests +// ============================================================================ + +#[test] +fn test_gnd_test_all() { + let subgraph_dir = &FIXTURE.1; + + // Run only the passing test files (exclude failing.json which is used by the negative test). + let output = run_gnd_test( + &[ + "tests/transfer.json", + "tests/blocks.json", + "tests/templates.json", + ], + subgraph_dir, + ); + + assert!( + output.status.success(), + "gnd test failed with exit code: {:?}\nstdout: {}\nstderr: {}", + output.status.code(), + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr), + ); +} + +// ============================================================================ +// gnd test — verify failure on wrong assertions +// ============================================================================ + +#[test] +fn test_gnd_test_failing_assertions() { + let subgraph_dir = &FIXTURE.1; + + let output = run_gnd_test(&["tests/failing.json"], subgraph_dir); + + assert!( + !output.status.success(), + "gnd test should have failed for failing.json but exited with code 0\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr), + ); +} diff --git a/justfile b/justfile index b24b87f5138..a05ef141078 100644 --- a/justfile +++ b/justfile @@ -88,6 +88,18 @@ test-gnd-commands *EXTRA_FLAGS: cargo test {{EXTRA_FLAGS}} --package gnd --test cli_commands -- --nocapture +# Run gnd test runner tests (requires asc in PATH, uses pgtemp for PostgreSQL) +test-gnd-test *EXTRA_FLAGS: + #!/usr/bin/env bash + set -e # Exit on error + + # Build gnd binary + cargo build --bin gnd + + echo "Running gnd test runner tests" + + cargo test {{EXTRA_FLAGS}} --package gnd --test gnd_test -- --nocapture + # Clean workspace (cargo clean) clean: cargo clean From ee828d7f1419037d50e2913868901863d0dd699a Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Tue, 17 Feb 2026 15:38:32 +0200 Subject: [PATCH 27/34] gnd(compiler): Look for asc binary in local if global does not exist. --- gnd/src/compiler/asc.rs | 53 ++++++++++++------- .../fixtures/gnd_test/subgraph/package.json | 3 +- gnd/tests/gnd_test.rs | 23 +++++--- 3 files changed, 53 insertions(+), 26 deletions(-) diff --git a/gnd/src/compiler/asc.rs b/gnd/src/compiler/asc.rs index 6f838b723de..d70da30322b 100644 --- a/gnd/src/compiler/asc.rs +++ b/gnd/src/compiler/asc.rs @@ -38,17 +38,19 @@ const REQUIRED_ASC_VERSION: &str = "0.19.23"; /// /// Requires asc version 0.19.23 to be installed. pub fn compile_mapping(options: &AscCompileOptions) -> Result<()> { - // Check that asc is available - if !is_asc_available() { - return Err(anyhow!( - "AssemblyScript compiler (asc) not found. Please install it with:\n \ - npm install -g assemblyscript@{REQUIRED_ASC_VERSION}" - )); - } + // Resolve the asc binary, checking global PATH and local node_modules/.bin + let asc_bin = find_asc_binary(&options.base_dir).ok_or_else(|| { + anyhow!( + "AssemblyScript compiler (asc) not found. Install it with:\n \ + npm install -g assemblyscript@{REQUIRED_ASC_VERSION}\n \ + or locally:\n \ + npm install --save-dev assemblyscript@{REQUIRED_ASC_VERSION}" + ) + })?; // Check version unless explicitly skipped if !options.skip_version_check { - let version = get_asc_version()?; + let version = get_asc_version(&asc_bin)?; if version != REQUIRED_ASC_VERSION { return Err(anyhow!( "AssemblyScript compiler version mismatch: found {}, required {}.\n \ @@ -76,7 +78,7 @@ pub fn compile_mapping(options: &AscCompileOptions) -> Result<()> { .unwrap_or(&options.output_file); // Build the asc command - let mut cmd = Command::new("asc"); + let mut cmd = Command::new(&asc_bin); // Add compiler flags matching graph-cli behavior cmd.arg("--explicitStart") @@ -177,18 +179,31 @@ pub fn find_graph_ts(source_dir: &Path) -> Result<(Vec, PathBuf)> { Ok((lib_dirs, global_file)) } -/// Check if the asc compiler is available. -fn is_asc_available() -> bool { - Command::new("asc") +/// Find the `asc` binary by checking the global PATH first, then the project's +/// root `node_modules/.bin/asc`. +fn find_asc_binary(base_dir: &Path) -> Option { + // Check global PATH first + if Command::new("asc") .arg("--version") .output() .map(|o| o.status.success()) .unwrap_or(false) + { + return Some(PathBuf::from("asc")); + } + + // Backward compatibility with graph-cli: check local node_modules/.bin/asc + let local_asc = base_dir.join("node_modules").join(".bin").join("asc"); + if local_asc.exists() { + return Some(local_asc); + } + + None } /// Get the asc compiler version. -fn get_asc_version() -> Result { - let output = Command::new("asc") +fn get_asc_version(asc_bin: &Path) -> Result { + let output = Command::new(asc_bin) .arg("--version") .output() .context("Failed to execute asc --version")?; @@ -244,11 +259,13 @@ mod tests { #[test] fn test_asc_version_check() { // Skip if asc is not installed - if !is_asc_available() { - return; - } + let temp_dir = TempDir::new().unwrap(); + let asc_bin = match find_asc_binary(temp_dir.path()) { + Some(bin) => bin, + None => return, + }; - let version = get_asc_version().unwrap(); + let version = get_asc_version(&asc_bin).unwrap(); // Version should be a semver-like string (e.g., "0.19.23") assert!( version.split('.').count() >= 2, diff --git a/gnd/tests/fixtures/gnd_test/subgraph/package.json b/gnd/tests/fixtures/gnd_test/subgraph/package.json index a66f9b5ea72..96118f38494 100644 --- a/gnd/tests/fixtures/gnd_test/subgraph/package.json +++ b/gnd/tests/fixtures/gnd_test/subgraph/package.json @@ -13,6 +13,7 @@ }, "devDependencies": { "@graphprotocol/graph-cli": "0.98.1", - "@graphprotocol/graph-ts": "0.38.2" + "@graphprotocol/graph-ts": "0.38.2", + "assemblyscript": "0.19.23" } } diff --git a/gnd/tests/gnd_test.rs b/gnd/tests/gnd_test.rs index 9b242cc4209..e3206164c7a 100644 --- a/gnd/tests/gnd_test.rs +++ b/gnd/tests/gnd_test.rs @@ -60,7 +60,7 @@ static FIXTURE: LazyLock<(TempDir, PathBuf)> = LazyLock::new(|| { String::from_utf8_lossy(&npm_output.stderr), ); - verify_asc_available(); + verify_asc_available(&subgraph_dir); let gnd = verify_gnd_binary(); let codegen_output = Command::new(&gnd) @@ -110,16 +110,25 @@ fn fixture_path() -> PathBuf { .join("subgraph") } -/// Assert that `asc` (AssemblyScript compiler) is available in PATH. -fn verify_asc_available() { - let output = Command::new("asc") +/// Assert that `asc` (AssemblyScript compiler) is available in PATH or in local node_modules. +fn verify_asc_available(subgraph_dir: &Path) { + // Check global PATH first + if Command::new("asc") .arg("--version") .output() - .expect("Failed to execute `asc --version`. Is AssemblyScript installed? Run: npm install -g assemblyscript@0.19.23"); + .map(|o| o.status.success()) + .unwrap_or(false) + { + return; + } + // Fall back to local node_modules/.bin/asc + let local_asc = subgraph_dir.join("node_modules").join(".bin").join("asc"); assert!( - output.status.success(), - "`asc --version` failed. Install AssemblyScript: npm install -g assemblyscript@0.19.23" + local_asc.exists(), + "asc compiler not found globally or at {}. \ + Install it with: npm install -g assemblyscript@0.19.23", + local_asc.display() ); } From 9553e7793060b34fb317481928f8b99996e86381 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Tue, 17 Feb 2026 15:55:08 +0200 Subject: [PATCH 28/34] gnd(test): Use --postgres-url for tests --- gnd/tests/gnd_test.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/gnd/tests/gnd_test.rs b/gnd/tests/gnd_test.rs index e3206164c7a..28c56300ff3 100644 --- a/gnd/tests/gnd_test.rs +++ b/gnd/tests/gnd_test.rs @@ -152,9 +152,16 @@ fn copy_dir_recursive(src: &Path, dst: &Path) -> std::io::Result<()> { /// Returns the Output (status, stdout, stderr). fn run_gnd_test(args: &[&str], cwd: &Path) -> std::process::Output { let gnd = verify_gnd_binary(); - Command::new(&gnd) - .arg("test") - .args(args) + let mut cmd = Command::new(&gnd); + cmd.arg("test"); + + // When a database URL is provided via env var (e.g. in CI), pass it through + // to skip pgtemp which may not be available. + if let Ok(db_url) = std::env::var("THEGRAPH_STORE_POSTGRES_DIESEL_URL") { + cmd.arg("--postgres-url").arg(db_url); + } + + cmd.args(args) .current_dir(cwd) .output() .expect("Failed to execute gnd test") From e64f1ba871cf164bf0adb5df10f76e46d96ac6b2 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Tue, 17 Feb 2026 18:41:14 +0200 Subject: [PATCH 29/34] gnd(test): Fix test cleanup for --postgres-url --- gnd/src/commands/test/runner.rs | 62 ++++++++++++++++++++++++--------- 1 file changed, 45 insertions(+), 17 deletions(-) diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index 4bdb6b9f00b..451daf7e5cc 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -156,6 +156,11 @@ pub(super) struct ManifestInfo { pub start_block_override: Option, /// Deployment hash derived from the built manifest path. pub hash: DeploymentHash, + /// Subgraph name derived from the manifest's root directory (e.g., "test/my-subgraph"). + /// Fixed across all tests so that `cleanup` can always find and remove the + /// previous test's entry — per-test names left dangling FK references that + /// prevented `drop_chain` from clearing the chain head. + pub subgraph_name: SubgraphName, } /// Load and pre-compute manifest data for the test run. @@ -208,12 +213,27 @@ pub(super) fn load_manifest_info(opt: &TestOpt) -> Result { ) })?; + // Derive subgraph name from the root directory (e.g., "my-subgraph" → "test/my-subgraph"). + // Sanitize to alphanumeric + hyphens + underscores for SubgraphName compatibility. + let root_dir_name = manifest_dir + .canonicalize() + .unwrap_or(manifest_dir.clone()) + .file_name() + .and_then(|s| s.to_str()) + .unwrap_or("gnd-test") + .chars() + .filter(|c| c.is_alphanumeric() || *c == '-' || *c == '_') + .collect::(); + let subgraph_name = + SubgraphName::new(format!("test/{}", root_dir_name)).map_err(|e| anyhow!("{}", e))?; + Ok(ManifestInfo { build_dir, network_name, min_start_block, start_block_override, hash, + subgraph_name, }) } @@ -301,20 +321,18 @@ pub async fn run_single_test( let logger = make_test_logger(opt.verbose).new(o!("test" => test_file.name.clone())); // Initialize stores with the network name from the manifest. - let stores = setup_stores(&logger, &db_url, &manifest_info.network_name).await?; + let stores = setup_stores( + &logger, + &db_url, + &manifest_info.network_name, + &manifest_info.subgraph_name, + &manifest_info.hash, + ) + .await?; // Create the mock Ethereum chain that will feed our pre-built blocks. let chain = setup_chain(&logger, blocks.clone(), &stores).await?; - // Sanitize test name for use as a subgraph name (alphanumeric + hyphens + underscores). - let test_name_sanitized = test_file - .name - .chars() - .filter(|c| c.is_alphanumeric() || *c == '-' || *c == '_') - .collect::(); - let subgraph_name = - SubgraphName::new(format!("test/{}", test_name_sanitized)).map_err(|e| anyhow!("{}", e))?; - // Wire up all graph-node components (instance manager, provider, registrar, etc.) // and deploy the subgraph. let ctx = setup_context( @@ -323,7 +341,7 @@ pub async fn run_single_test( &chain, &manifest_info.build_dir, manifest_info.hash.clone(), - subgraph_name.clone(), + manifest_info.subgraph_name.clone(), manifest_info.start_block_override.clone(), ) .await?; @@ -456,6 +474,8 @@ async fn setup_stores( logger: &Logger, db_url: &str, network_name: &ChainName, + subgraph_name: &SubgraphName, + hash: &DeploymentHash, ) -> Result { // Minimal graph-node config: one primary shard, no chain providers. // The chain→shard mapping defaults to "primary" in StoreBuilder::make_store, @@ -492,14 +512,22 @@ ingestor = "default" let network_identifiers: Vec = vec![network_name.clone()]; let network_store = store_builder.network_store(network_identifiers).await; + // Clean up any leftover state from a previous run on this persistent database. + // Order matters: deployments must be removed before the chain can be dropped, + // because deployment_schemas has a FK constraint on the chains table. + let subgraph_store = network_store.subgraph_store(); + cleanup(&subgraph_store, subgraph_name, hash).await.ok(); + + let block_store = network_store.block_store(); + let _ = block_store.drop_chain(network_name).await; + // Synthetic chain identifier — net_version "1" with zero genesis hash. let ident = ChainIdentifier { net_version: "1".into(), genesis_block_hash: graph::prelude::alloy::primitives::B256::ZERO.into(), }; - let chain_store = network_store - .block_store() + let chain_store = block_store .create_chain_store(network_name, ident) .await .context("Failed to create chain store")?; @@ -643,9 +671,6 @@ async fn setup_context( let subgraph_store = stores.network_store.subgraph_store(); - // Remove any leftover deployment from a previous test run (idempotent). - cleanup(&subgraph_store, &subgraph_name, &hash).await.ok(); - // Map the network name to our mock chain so graph-node routes triggers correctly. let mut blockchain_map = BlockchainMap::new(); blockchain_map.insert(stores.network_name.clone(), chain.clone()); @@ -784,7 +809,10 @@ async fn cleanup( // Ignore errors - the subgraph might not exist on first run let _ = subgraph_store.remove_subgraph(name.clone()).await; - for locator in locators { + for locator in &locators { + // Unassign the deployment from its node first — remove_deployment + // silently skips deletion if the deployment is still assigned. + let _ = SubgraphStoreTrait::unassign_subgraph(subgraph_store, locator).await; subgraph_store.remove_deployment(locator.id.into()).await?; } From 117e2bbc9ab06199a1640845c797f307ff42477b Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Tue, 17 Feb 2026 21:30:26 +0200 Subject: [PATCH 30/34] gnd(test): Replace TempPgHandle with TestDatabase enum Makes the two database lifecycle paths explicit and self-documenting. `TestDatabase::Temporary` vs `TestDatabase::Persistent` (--postgres-url, needs cleanup) replaces the opaque `Option`. Cleanup in `setup_stores` is now gated on `db.needs_cleanup()` instead of running unconditionally. --- gnd/src/commands/test/runner.rs | 94 ++++++++++++++++++++++----------- gnd/tests/gnd_test.rs | 22 ++++---- 2 files changed, 77 insertions(+), 39 deletions(-) diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index 451daf7e5cc..5d01c984daa 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -2,7 +2,8 @@ //! //! This is the core of `gnd test`. For each test file, it: //! -//! 1. Creates a temporary PostgreSQL database (pgtemp) for complete test isolation +//! 1. Creates a test database (`TestDatabase::Temporary` via pgtemp, or +//! `TestDatabase::Persistent` via `--postgres-url`) for test isolation //! 2. Initializes graph-node stores (entity storage, block storage, chain store) //! 3. Constructs a mock Ethereum chain that feeds pre-defined blocks //! 4. Deploys the subgraph and starts the indexer @@ -38,7 +39,9 @@ use graph::blockchain::{BlockPtr, BlockchainMap, ChainIdentifier}; use graph::cheap_clone::CheapClone; use graph::components::link_resolver::{ArweaveClient, FileLinkResolver}; use graph::components::metrics::MetricsRegistry; -use graph::components::network_provider::{ChainName, ProviderCheckStrategy, ProviderManager}; +use graph::components::network_provider::{ + AmpChainNames, ChainName, ProviderCheckStrategy, ProviderManager, +}; use graph::components::store::DeploymentLocator; use graph::components::subgraph::{Settings, SubgraphInstanceManager as _}; use graph::data::graphql::load_manager::LoadManager; @@ -314,16 +317,16 @@ pub async fn run_single_test( // test blocks without explicit numbers fall in the subgraph's indexed range. let blocks = build_blocks_with_triggers(test_file, manifest_info.min_start_block)?; - // Create a temporary database for this test. The `_temp_db` handle must - // be kept alive for the duration of the test — dropping it destroys the database. - let (db_url, _temp_db) = get_database_url(opt, &manifest_info.build_dir)?; + // Create the database for this test. For pgtemp, the `db` value must + // stay alive for the duration of the test — dropping it destroys the database. + let db = create_test_database(opt, &manifest_info.build_dir)?; let logger = make_test_logger(opt.verbose).new(o!("test" => test_file.name.clone())); // Initialize stores with the network name from the manifest. let stores = setup_stores( &logger, - &db_url, + &db, &manifest_info.network_name, &manifest_info.subgraph_name, &manifest_info.hash, @@ -403,17 +406,17 @@ pub async fn run_single_test( result } -/// Get a PostgreSQL connection URL for the test. +/// Create the database for this test run. /// -/// If `--postgres-url` was provided, uses that directly. -/// Otherwise, on Unix, creates a temporary database via pgtemp in the build -/// directory (matching `gnd dev`'s pattern). The database is automatically -/// destroyed when `TempPgHandle` is dropped. +/// If `--postgres-url` was provided, returns a `Persistent` database that +/// requires cleanup between tests. Otherwise, on Unix, creates a `Temporary` +/// pgtemp database in the build directory (matching `gnd dev`'s pattern) — +/// dropped automatically when the returned value goes out of scope. /// /// On non-Unix systems, `--postgres-url` is required. -fn get_database_url(opt: &TestOpt, build_dir: &Path) -> Result<(String, Option)> { +fn create_test_database(opt: &TestOpt, build_dir: &Path) -> Result { if let Some(url) = &opt.postgres_url { - return Ok((url.clone(), None)); + return Ok(TestDatabase::Persistent { url: url.clone() }); } #[cfg(unix)] @@ -439,7 +442,7 @@ fn get_database_url(opt: &TestOpt, build_dir: &Path) -> Result<(String, Option Result<(String, Option &str { + match self { + #[cfg(unix)] + Self::Temporary { url, .. } => url, + Self::Persistent { url } => url, + } + } + + /// Persistent databases accumulate state across test runs and need + /// explicit cleanup (remove prior deployments, drop chains) before + /// each test. Temporary databases start fresh — no cleanup needed. + fn needs_cleanup(&self) -> bool { + match self { + #[cfg(unix)] + Self::Temporary { .. } => false, + Self::Persistent { .. } => true, + } + } +} /// Initialize graph-node stores from a database URL. /// @@ -472,7 +502,7 @@ struct TempPgHandle; /// error that occurs when pgtemp is dropped during cleanup. async fn setup_stores( logger: &Logger, - db_url: &str, + db: &TestDatabase, network_name: &ChainName, subgraph_name: &SubgraphName, hash: &DeploymentHash, @@ -495,7 +525,7 @@ indexers = [ "default" ] [chains] ingestor = "default" "#, - db_url + db.url() ); let config = Config::from_str(&config_str, "default") @@ -512,14 +542,17 @@ ingestor = "default" let network_identifiers: Vec = vec![network_name.clone()]; let network_store = store_builder.network_store(network_identifiers).await; - // Clean up any leftover state from a previous run on this persistent database. - // Order matters: deployments must be removed before the chain can be dropped, - // because deployment_schemas has a FK constraint on the chains table. - let subgraph_store = network_store.subgraph_store(); - cleanup(&subgraph_store, subgraph_name, hash).await.ok(); - + // Persistent databases accumulate state across test runs and need cleanup. + // Temporary (pgtemp) databases start fresh — no cleanup needed. let block_store = network_store.block_store(); - let _ = block_store.drop_chain(network_name).await; + if db.needs_cleanup() { + // Order matters: deployments must be removed before the chain can be dropped, + // because deployment_schemas has a FK constraint on the chains table. + let subgraph_store = network_store.subgraph_store(); + cleanup(&subgraph_store, subgraph_name, hash).await.ok(); + + let _ = block_store.drop_chain(network_name).await; + } // Synthetic chain identifier — net_version "1" with zero genesis hash. let ident = ChainIdentifier { @@ -767,6 +800,7 @@ async fn setup_context( node_id.clone(), SubgraphVersionSwitchingMode::Instant, Arc::new(Settings::default()), + Arc::new(AmpChainNames::default()), )); // Register the subgraph name (e.g., "test/TransferCreatesEntity"). diff --git a/gnd/tests/gnd_test.rs b/gnd/tests/gnd_test.rs index 28c56300ff3..0aebde3793b 100644 --- a/gnd/tests/gnd_test.rs +++ b/gnd/tests/gnd_test.rs @@ -21,18 +21,22 @@ //! ```bash //! just test-gnd-test //! ``` +//! +//! Tests run with `--test-threads=1` to avoid races when sharing a Postgres +//! instance via `--postgres-url` (CI). With pgtemp (default) each test gets +//! its own isolated database, but serial execution keeps things simple. use std::fs; use std::path::{Path, PathBuf}; use std::process::Command; -use std::sync::LazyLock; use tempfile::TempDir; use walkdir::WalkDir; -/// Shared fixture: copied once, npm-installed once, codegen'd once. -/// The `TempDir` is kept alive for the entire test binary lifetime. -static FIXTURE: LazyLock<(TempDir, PathBuf)> = LazyLock::new(|| { +/// Copy the fixture subgraph into a fresh temp directory, install npm +/// dependencies, and run `gnd codegen`. Returns the temp dir handle (to +/// keep it alive) and the path to the prepared subgraph directory. +fn setup_fixture() -> (TempDir, PathBuf) { let temp_dir = TempDir::new().expect("Failed to create temp directory"); let subgraph_dir = temp_dir.path().join("subgraph"); fs::create_dir_all(&subgraph_dir).unwrap(); @@ -77,7 +81,7 @@ static FIXTURE: LazyLock<(TempDir, PathBuf)> = LazyLock::new(|| { ); (temp_dir, subgraph_dir) -}); +} /// Get the path to the gnd binary. fn gnd_binary_path() -> PathBuf { @@ -173,7 +177,7 @@ fn run_gnd_test(args: &[&str], cwd: &Path) -> std::process::Output { #[test] fn test_gnd_test_all() { - let subgraph_dir = &FIXTURE.1; + let (_temp_dir, subgraph_dir) = setup_fixture(); // Run only the passing test files (exclude failing.json which is used by the negative test). let output = run_gnd_test( @@ -182,7 +186,7 @@ fn test_gnd_test_all() { "tests/blocks.json", "tests/templates.json", ], - subgraph_dir, + &subgraph_dir, ); assert!( @@ -200,9 +204,9 @@ fn test_gnd_test_all() { #[test] fn test_gnd_test_failing_assertions() { - let subgraph_dir = &FIXTURE.1; + let (_temp_dir, subgraph_dir) = setup_fixture(); - let output = run_gnd_test(&["tests/failing.json"], subgraph_dir); + let output = run_gnd_test(&["tests/failing.json"], &subgraph_dir); assert!( !output.status.success(), From c9aa0dce9c9989c5239e409bea8c9cf57d6e70bd Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Tue, 17 Feb 2026 21:58:40 +0200 Subject: [PATCH 31/34] gnd(test): Clean up persistent DB state after each test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When using --postgres-url, cleanup only ran at the start of each test (to remove stale state from a previous run). The last test's deployment was left in the DB, which broke unrelated unit test suites calling remove_all_subgraphs_for_test_use_only() — they don't set GRAPH_NODE_DISABLE_DEPLOYMENT_HASH_VALIDATION, so parsing the file-path deployment hash fails. Add post-test cleanup for persistent databases, mirroring the pre-test cleanup. Pre-test handles interrupted runs; post-test handles the normal case. Together they keep the DB clean regardless of how the run ends. --- gnd/src/commands/test/runner.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index 5d01c984daa..570ef484107 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -403,6 +403,21 @@ pub async fn run_single_test( .stop_subgraph(ctx.deployment.clone()) .await; + // For persistent databases, clean up the deployment after the test so the + // database is left in a clean state. Without this, the last test's deployment + // (which uses a file path as its hash) remains in the DB and breaks unrelated + // unit test suites that call remove_all_subgraphs_for_test_use_only(), since + // they don't set GRAPH_NODE_DISABLE_DEPLOYMENT_HASH_VALIDATION. + if db.needs_cleanup() { + cleanup( + &ctx.store, + &manifest_info.subgraph_name, + &manifest_info.hash, + ) + .await + .ok(); + } + result } @@ -544,6 +559,11 @@ ingestor = "default" // Persistent databases accumulate state across test runs and need cleanup. // Temporary (pgtemp) databases start fresh — no cleanup needed. + // + // Pre-test cleanup: removes stale state left by a previously interrupted run. + // Each test also runs post-test cleanup (at the end of run_single_test) for + // the normal case. Together they ensure the DB is always clean before and + // after each test, even if a previous run was interrupted mid-way. let block_store = network_store.block_store(); if db.needs_cleanup() { // Order matters: deployments must be removed before the chain can be dropped, From cbfbeccc5b6e6a053caeb91bc274dba8350ad80c Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Tue, 17 Feb 2026 22:59:47 +0200 Subject: [PATCH 32/34] gnd(test): Use Qm-prefixed SHA-1 hash as deployment hash MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Each test run now computes a fake-but-valid DeploymentHash as "Qm" + hex(sha1(manifest_path + seed)) where seed is the Unix epoch in milliseconds. This: - Passes DeploymentHash validation without bypassing it - Produces a unique hash and subgraph name per run, so sequential runs never conflict in the store - Removes the pre-test cleanup (it would never match a fresh hash) - Registers the hash as a FileLinkResolver alias so clone_for_manifest can resolve it to the real manifest path - Reuses the existing sha1 dep — no new dependencies --- gnd/src/commands/test/runner.rs | 135 +++++++++++++++----------------- 1 file changed, 64 insertions(+), 71 deletions(-) diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index 570ef484107..f4bac79f69a 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -69,7 +69,7 @@ use graph_store_postgres::{ChainHeadUpdateListener, ChainStore, Store, SubgraphS use std::marker::PhantomData; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; -use std::time::{Duration, Instant}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; #[cfg(unix)] use pgtemp::PgTempDBBuilder; @@ -144,28 +144,48 @@ pub(super) struct TestContext { /// Loaded once at the start of `run_test` and passed to each test, /// avoiding redundant manifest parsing and noisy log output. /// -/// All tests share the same `hash` (derived from the built manifest path). -/// `cleanup()` removes prior deployments with that hash before each test, -/// so tests MUST run sequentially. If parallelism is ever added, each test -/// will need a unique hash (e.g., by incorporating the test name). +/// All tests share the same `hash` and `subgraph_name` (derived from the built +/// manifest path and a per-run seed), which are unique across runs. This means +/// tests MUST still run sequentially within a single `gnd test` invocation, but +/// sequential `gnd test` invocations never conflict with each other in the store. pub(super) struct ManifestInfo { /// The build directory containing compiled WASM, schema, and built manifest. pub build_dir: PathBuf, + /// Canonical path to the built manifest file (e.g., `build/subgraph.yaml`). + /// Registered as an alias for `hash` in `FileLinkResolver` so that + /// `clone_for_manifest` can resolve the Qm hash to a real filesystem path. + pub manifest_path: PathBuf, /// Network name from the manifest (e.g., "mainnet"). pub network_name: ChainName, /// Minimum `startBlock` across all data sources. pub min_start_block: u64, /// Override for on-chain block validation when startBlock > 0. pub start_block_override: Option, - /// Deployment hash derived from the built manifest path. + /// Deployment hash derived from the built manifest path and a per-run seed. + /// Unique per run so that concurrent and sequential runs never conflict. pub hash: DeploymentHash, - /// Subgraph name derived from the manifest's root directory (e.g., "test/my-subgraph"). - /// Fixed across all tests so that `cleanup` can always find and remove the - /// previous test's entry — per-test names left dangling FK references that - /// prevented `drop_chain` from clearing the chain head. + /// Subgraph name derived from the manifest's root directory with a per-run seed suffix + /// (e.g., "test/my-subgraph-1739800000000"). Unique per run for the same reason. pub subgraph_name: SubgraphName, } +/// Compute a `DeploymentHash` for a local test run from a filesystem path and a seed. +/// +/// Produces `"Qm" + hex(sha1(path + '\0' + seed))` — 42 alphanumeric characters that +/// pass `DeploymentHash` validation. Not a real IPFS CIDv0, but visually consistent +/// with one and requires no additional dependencies (`sha1` is already used by `gnd build`). +/// +/// The `seed` (typically the current Unix epoch in milliseconds) makes each run produce a +/// distinct hash so sequential or concurrent test runs never collide in the store. +fn deployment_hash_from_path_and_seed(path: &Path, seed: u128) -> Result { + use sha1::{Digest, Sha1}; + + let input = format!("{}\0{}", path.display(), seed); + let digest = Sha1::digest(input.as_bytes()); + let qm = format!("Qm{:x}", digest); + DeploymentHash::new(qm).map_err(|e| anyhow!("Failed to create deployment hash: {}", e)) +} + /// Load and pre-compute manifest data for the test run. /// /// Resolves paths relative to the manifest location, loads the built manifest, @@ -208,15 +228,16 @@ pub(super) fn load_manifest_info(opt: &TestOpt) -> Result { None }; - let deployment_id = built_manifest_path.display().to_string(); - let hash = DeploymentHash::new(&deployment_id).map_err(|_| { - anyhow!( - "Failed to create deployment hash from path: {}", - deployment_id - ) - })?; + // Use Unix epoch millis as a per-run seed so each invocation gets a unique + // deployment hash and subgraph name, avoiding conflicts with previous runs. + let seed = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_millis(); - // Derive subgraph name from the root directory (e.g., "my-subgraph" → "test/my-subgraph"). + let hash = deployment_hash_from_path_and_seed(&built_manifest_path, seed)?; + + // Derive subgraph name from the root directory (e.g., "my-subgraph" → "test/my-subgraph-"). // Sanitize to alphanumeric + hyphens + underscores for SubgraphName compatibility. let root_dir_name = manifest_dir .canonicalize() @@ -227,11 +248,12 @@ pub(super) fn load_manifest_info(opt: &TestOpt) -> Result { .chars() .filter(|c| c.is_alphanumeric() || *c == '-' || *c == '_') .collect::(); - let subgraph_name = - SubgraphName::new(format!("test/{}", root_dir_name)).map_err(|e| anyhow!("{}", e))?; + let subgraph_name = SubgraphName::new(format!("test/{}-{}", root_dir_name, seed)) + .map_err(|e| anyhow!("{}", e))?; Ok(ManifestInfo { build_dir, + manifest_path: built_manifest_path, network_name, min_start_block, start_block_override, @@ -324,30 +346,14 @@ pub async fn run_single_test( let logger = make_test_logger(opt.verbose).new(o!("test" => test_file.name.clone())); // Initialize stores with the network name from the manifest. - let stores = setup_stores( - &logger, - &db, - &manifest_info.network_name, - &manifest_info.subgraph_name, - &manifest_info.hash, - ) - .await?; + let stores = setup_stores(&logger, &db, &manifest_info.network_name).await?; // Create the mock Ethereum chain that will feed our pre-built blocks. let chain = setup_chain(&logger, blocks.clone(), &stores).await?; // Wire up all graph-node components (instance manager, provider, registrar, etc.) // and deploy the subgraph. - let ctx = setup_context( - &logger, - &stores, - &chain, - &manifest_info.build_dir, - manifest_info.hash.clone(), - manifest_info.subgraph_name.clone(), - manifest_info.start_block_override.clone(), - ) - .await?; + let ctx = setup_context(&logger, &stores, &chain, manifest_info).await?; // Populate eth_call cache with mock responses before starting indexer. // This ensures handlers can successfully retrieve mocked contract call results. @@ -404,10 +410,9 @@ pub async fn run_single_test( .await; // For persistent databases, clean up the deployment after the test so the - // database is left in a clean state. Without this, the last test's deployment - // (which uses a file path as its hash) remains in the DB and breaks unrelated - // unit test suites that call remove_all_subgraphs_for_test_use_only(), since - // they don't set GRAPH_NODE_DISABLE_DEPLOYMENT_HASH_VALIDATION. + // database is left in a clean state. Each run generates a unique hash and + // subgraph name (via the seed), so pre-test cleanup is not needed — only + // post-test cleanup of the current run's deployment. if db.needs_cleanup() { cleanup( &ctx.store, @@ -513,14 +518,10 @@ impl TestDatabase { /// - A `StoreBuilder` that runs database migrations and creates connection pools /// - A chain store for the test chain with a synthetic genesis block (hash=0x0) /// -/// Uses a filtered logger to suppress the expected "Store event stream ended" -/// error that occurs when pgtemp is dropped during cleanup. async fn setup_stores( logger: &Logger, db: &TestDatabase, network_name: &ChainName, - subgraph_name: &SubgraphName, - hash: &DeploymentHash, ) -> Result { // Minimal graph-node config: one primary shard, no chain providers. // The chain→shard mapping defaults to "primary" in StoreBuilder::make_store, @@ -557,22 +558,7 @@ ingestor = "default" let network_identifiers: Vec = vec![network_name.clone()]; let network_store = store_builder.network_store(network_identifiers).await; - // Persistent databases accumulate state across test runs and need cleanup. - // Temporary (pgtemp) databases start fresh — no cleanup needed. - // - // Pre-test cleanup: removes stale state left by a previously interrupted run. - // Each test also runs post-test cleanup (at the end of run_single_test) for - // the normal case. Together they ensure the DB is always clean before and - // after each test, even if a previous run was interrupted mid-way. let block_store = network_store.block_store(); - if db.needs_cleanup() { - // Order matters: deployments must be removed before the chain can be dropped, - // because deployment_schemas has a FK constraint on the chains table. - let subgraph_store = network_store.subgraph_store(); - cleanup(&subgraph_store, subgraph_name, hash).await.ok(); - - let _ = block_store.drop_chain(network_name).await; - } // Synthetic chain identifier — net_version "1" with zero genesis hash. let ident = ChainIdentifier { @@ -712,11 +698,14 @@ async fn setup_context( logger: &Logger, stores: &TestStores, chain: &Arc, - build_dir: &Path, - hash: DeploymentHash, - subgraph_name: SubgraphName, - start_block_override: Option, + manifest_info: &ManifestInfo, ) -> Result { + let build_dir = &manifest_info.build_dir; + let manifest_path = &manifest_info.manifest_path; + let hash = manifest_info.hash.clone(); + let subgraph_name = manifest_info.subgraph_name.clone(); + let start_block_override = manifest_info.start_block_override.clone(); + let env_vars = Arc::new(EnvVars::from_env().unwrap_or_default()); let mock_registry = Arc::new(MetricsRegistry::mock()); let logger_factory = LoggerFactory::new(logger.clone(), None, mock_registry.clone()); @@ -730,9 +719,14 @@ async fn setup_context( let blockchain_map = Arc::new(blockchain_map); // FileLinkResolver loads the manifest and WASM from the build directory - // instead of fetching from IPFS. This matches gnd dev's approach. - let link_resolver: Arc = - Arc::new(FileLinkResolver::with_base_dir(build_dir)); + // instead of fetching from IPFS. The alias maps the Qm deployment hash to the + // actual manifest path so that clone_for_manifest can resolve it without + // treating the hash as a filesystem path. + let aliases = + std::collections::HashMap::from([(hash.to_string(), manifest_path.to_path_buf())]); + let link_resolver: Arc = Arc::new( + FileLinkResolver::new(Some(build_dir.to_path_buf()), aliases), + ); // IPFS client is required by the instance manager constructor but not used // for manifest loading (FileLinkResolver handles that). @@ -849,10 +843,9 @@ async fn setup_context( }) } -/// Remove a previous subgraph deployment and its data. +/// Remove a subgraph deployment and its data after a test run. /// -/// Called before each test to ensure a clean slate. Errors are ignored -/// (the deployment might not exist on first run). +/// Errors are ignored — the deployment is removed on a best-effort basis. async fn cleanup( subgraph_store: &SubgraphStore, name: &SubgraphName, From e95499313093420bacaf74022019719977f6c79b Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Tue, 17 Feb 2026 23:09:23 +0200 Subject: [PATCH 33/34] gnd(test): Fix stale docs after pre-test cleanup removal --- gnd/src/commands/test/README.md | 2 +- gnd/src/commands/test/runner.rs | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/gnd/src/commands/test/README.md b/gnd/src/commands/test/README.md index af6fe8fd1f0..4ddf487fbfa 100644 --- a/gnd/src/commands/test/README.md +++ b/gnd/src/commands/test/README.md @@ -687,7 +687,7 @@ GraphQL queries → Assertions **Key design principles:** -- **Fresh database per test:** Each test gets an isolated pgtemp database, automatically dropped on completion +- **Isolated database per test:** Each test gets a pgtemp database dropped on completion (default), or a shared persistent database with post-test cleanup (`--postgres-url`) - **Real WASM runtime:** Uses `EthereumRuntimeAdapterBuilder` with real `ethereum.call` host function - **Pre-populated call cache:** `eth_call` responses are cached before indexing starts - **No IPFS for manifest:** Uses `FileLinkResolver` to load manifest/WASM from build directory diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index f4bac79f69a..1decb9b0b07 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -19,7 +19,8 @@ //! RPC endpoint. //! //! This approach follows the same pattern as `gnd dev`, which also uses -//! `FileLinkResolver` and filesystem-based deployment hashes instead of IPFS. +//! `FileLinkResolver` to load the manifest and WASM from the build directory +//! instead of fetching from IPFS. //! //! Noop/stub adapters (see [`super::noop`]) satisfy the `Chain` constructor's //! trait bounds without making real network calls. @@ -500,8 +501,8 @@ impl TestDatabase { } /// Persistent databases accumulate state across test runs and need - /// explicit cleanup (remove prior deployments, drop chains) before - /// each test. Temporary databases start fresh — no cleanup needed. + /// explicit post-test cleanup to remove each run's deployment. + /// Temporary databases are dropped automatically — no cleanup needed. fn needs_cleanup(&self) -> bool { match self { #[cfg(unix)] @@ -687,13 +688,12 @@ async fn setup_chain( /// Wire up all graph-node components and deploy the subgraph. /// /// This mirrors what `gnd dev` does via the launcher, but assembled directly: -/// 1. Clean up any leftover deployment from a previous run -/// 2. Create blockchain map (just our mock chain) -/// 3. Set up link resolver (FileLinkResolver for local filesystem) -/// 4. Create the subgraph instance manager (WASM runtime, trigger processing) -/// 5. Create the subgraph provider (lifecycle management) -/// 6. Create the GraphQL runner (for assertions) -/// 7. Register and deploy the subgraph via the registrar +/// 1. Create blockchain map (just our mock chain) +/// 2. Set up link resolver (FileLinkResolver for local filesystem, with a hash alias) +/// 3. Create the subgraph instance manager (WASM runtime, trigger processing) +/// 4. Create the subgraph provider (lifecycle management) +/// 5. Create the GraphQL runner (for assertions) +/// 6. Register and deploy the subgraph via the registrar async fn setup_context( logger: &Logger, stores: &TestStores, From de385dc264b3284d45083fc728d3668f8a92e801 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Tue, 17 Feb 2026 23:43:15 +0200 Subject: [PATCH 34/34] gnd(test): Rename and move README into docs/ --- gnd/src/commands/test/README.md => docs/gnd-test.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename gnd/src/commands/test/README.md => docs/gnd-test.md (99%) diff --git a/gnd/src/commands/test/README.md b/docs/gnd-test.md similarity index 99% rename from gnd/src/commands/test/README.md rename to docs/gnd-test.md index 4ddf487fbfa..b9f126b8ae9 100644 --- a/gnd/src/commands/test/README.md +++ b/docs/gnd-test.md @@ -582,7 +582,7 @@ my-subgraph/ | Log events | ✅ Supported | | Block handlers (all filters) | ✅ Supported | | eth_call mocking | ✅ Supported | -| Dynamic/template data sources | (Untested) +| Dynamic/template data sources | ✅ Supported | | Transaction receipts (`receipt: true`) | ❌ Not implemented — handlers get `null` | | File data sources / IPFS mocking | ❌ Not implemented | | Call triggers (traces) | ❌ Not implemented |