diff --git a/Cargo.lock b/Cargo.lock index 77ecb11e1e8..fb4e71112c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3645,6 +3645,7 @@ version = "0.41.2" dependencies = [ "Inflector", "anyhow", + "async-trait", "clap", "clap_complete", "console 0.16.2", @@ -3654,8 +3655,11 @@ dependencies = [ "graph", "graph-chain-ethereum", "graph-core", + "graph-graphql", "graph-node", + "graph-store-postgres", "graphql-tools", + "hex", "indicatif", "inquire", "lazy_static", diff --git a/gnd/Cargo.toml b/gnd/Cargo.toml index 580e51a7f3f..a4cec7e5c9b 100644 --- a/gnd/Cargo.toml +++ b/gnd/Cargo.toml @@ -15,12 +15,22 @@ path = "tests/cli_commands.rs" name = "codegen_verification" path = "tests/codegen_verification.rs" +[[test]] +name = "gnd_test" +path = "tests/gnd_test.rs" + [dependencies] # Core graph dependencies graph = { path = "../graph" } graph-chain-ethereum = { path = "../chain/ethereum" } graph-core = { path = "../core" } graph-node = { path = "../node" } +graph-graphql = { path = "../graphql" } +graph-store-postgres = { path = "../store/postgres" } + +# Test command dependencies +hex = "0.4" +async-trait = { workspace = true } # Direct dependencies from current dev.rs anyhow = { workspace = true } @@ -49,6 +59,7 @@ thiserror = { workspace = true } # Console output indicatif = "0.18" console = "0.16" +similar = "2" # Code generation graphql-tools = { workspace = true } @@ -78,4 +89,3 @@ pgtemp = { git = "https://github.com/graphprotocol/pgtemp", branch = "initdb-arg [dev-dependencies] tempfile = "3" walkdir = "2" -similar = "2" diff --git a/gnd/README.md b/gnd/README.md index 72355c033f8..61b4f15f117 100644 --- a/gnd/README.md +++ b/gnd/README.md @@ -301,25 +301,47 @@ Keys are stored in `~/.graph-cli.json`. ### `gnd test` -Run Matchstick tests for the subgraph. +Run subgraph tests. ```bash -gnd test [DATASOURCE] +gnd test [TEST_FILES...] ``` **Arguments:** -- `DATASOURCE`: Specific data source to test (optional) +- `PATHS`: Test JSON files or directories to scan. Defaults to `tests/` when nothing is specified. **Flags:** | Flag | Short | Description | |------|-------|-------------| -| `--coverage` | `-c` | Run with coverage reporting | -| `--docker` | `-d` | Run in Docker container | -| `--force` | `-f` | Force redownload of Matchstick binary | -| `--logs` | `-l` | Show debug logs | -| `--recompile` | `-r` | Force recompilation before testing | -| `--version` | `-v` | Matchstick version to use | +| `--manifest` | `-m` | Path to subgraph manifest (default: `subgraph.yaml`) | +| `--skip-build` | | Skip building the subgraph before testing | +| `--postgres-url` | | PostgreSQL connection URL (env: `POSTGRES_URL`) | +| `--matchstick` | | Use legacy Matchstick runner | +| `--docker` | `-d` | Run Matchstick in Docker (requires `--matchstick`) | +| `--coverage` | `-c` | Run with coverage reporting (requires `--matchstick`) | +| `--recompile` | `-r` | Force recompilation (requires `--matchstick`) | +| `--force` | `-f` | Force redownload of Matchstick binary (requires `--matchstick`) | + +**Examples:** + +```bash +# Run all tests in tests/ directory (default) +gnd test + +# Run specific test files +gnd test transfer.json approval.json +gnd test tests/transfer.json + +# Scan a custom directory +gnd test my-tests/ + +# Use a different manifest +gnd test -m subgraph.staging.yaml tests/transfer.json + +# Skip automatic build +gnd test --skip-build +``` ### `gnd clean` diff --git a/gnd/docs/gnd-test.md b/gnd/docs/gnd-test.md new file mode 100644 index 00000000000..15de2947d96 --- /dev/null +++ b/gnd/docs/gnd-test.md @@ -0,0 +1,752 @@ +# gnd test + +Mock-based subgraph test runner that feeds JSON-defined blocks through real graph-node infrastructure (store, WASM runtime, trigger processing) with only the blockchain layer mocked. + +## Quick Start + +```bash +# Run all tests in tests/ directory +gnd test + +# Run a specific test file +gnd test tests/transfer.json + +# Skip automatic build (if subgraph already built) +gnd test --skip-build + +# Use legacy Matchstick runner +gnd test --matchstick +``` + +## Test File Format + +Tests are JSON files that define: +- Mock blockchain blocks with events +- Mock `eth_call` responses +- GraphQL assertions to validate entity state + +Place test files in a `tests/` directory with `.json` or `.test.json` extension. + +### Basic Example + +```json +{ + "name": "Transfer creates entity", + "blocks": [ + { + "number": 1, + "timestamp": 1672531200, + "events": [ + { + "address": "0x1234...", + "event": "Transfer(address indexed from, address indexed to, uint256 value)", + "params": { + "from": "0xaaaa...", + "to": "0xbbbb...", + "value": "1000" + } + } + ], + "ethCalls": [ + { + "address": "0x1234...", + "function": "balanceOf(address)(uint256)", + "params": ["0xaaaa..."], + "returns": ["1000000000000000000"] + } + ] + } + ], + "assertions": [ + { + "query": "{ transfer(id: \"1\") { from to value } }", + "expected": { + "transfer": { + "from": "0xaaaa...", + "to": "0xbbbb...", + "value": "1000" + } + } + } + ] +} +``` + +## Block Fields + +| Field | Required | Default | Description | +|-------|----------|---------|-------------| +| `number` | No | Auto-increments from lowest defined `startBlock` in the manifest file, or from `0` if no `startBlock` are defined | Block number | +| `hash` | No | `keccak256(block_number)` | Block hash | +| `timestamp` | No | `block_number` | Unix timestamp | +| `baseFeePerGas` | No | None (pre-EIP-1559) | Base fee in wei | +| `events` | No | Empty array | Log events in this block | +| `ethCalls` | No | Empty array | Mock `eth_call` responses | + +### Empty Blocks + +Empty blocks (no events) still trigger block handlers: + +```json +{ + "name": "Test block handlers", + "blocks": [ + { + "number": 1, + "events": [...] + }, + {} // Block 2 with no events - block handlers still fire + ] +} +``` + +## Event Fields + +| Field | Required | Default | Description | +|-------|----------|---------|-------------| +| `address` | Yes | — | Contract address (lowercase hex with 0x prefix) | +| `event` | Yes | — | Full event signature with `indexed` keywords | +| `params` | No | Empty object | Event parameter values | +| `txHash` | No | `keccak256(block_number \|\| log_index)` | Transaction hash | + +### Event Signature Format + +**Important:** Include `indexed` keywords in the signature: + +```json +{ + "event": "Transfer(address indexed from, address indexed to, uint256 value)" +} +``` + +Not: +```json +{ + "event": "Transfer(address,address,uint256)" // ❌ Missing indexed keywords +} +``` + +### Parameter Types + +Event parameters are automatically ABI-encoded based on the signature. Supported formats: + +```json +{ + "params": { + "from": "0xaaaa...", // address + "to": "0xbbbb...", // address + "value": "1000", // uint256 (string or number) + "amount": 1000, // uint256 (number) + "enabled": true, // bool + "data": "0x1234...", // bytes + "name": "Token" // string + } +} +``` + +## Block Handlers + +Block handlers are **automatically triggered** for every block. You don't need to specify block triggers in the JSON. + +### How Block Handlers Work + +The test runner auto-injects both `Start` and `End` block triggers for each block, ensuring all block handler filters work correctly: + +- **`once` filter** → Fires once at `startBlock` (via `Start` trigger) +- **No filter** → Fires on every block (via `End` trigger) +- **`polling` filter** → Fires every N blocks based on formula: `(block_number - startBlock) % every == 0` + +### Example: Basic Block Handlers + +```json +{ + "name": "Block handlers test", + "blocks": [ + {}, // Block 0 - both 'once' and regular block handlers fire + {} // Block 1 - only regular block handlers fire + ], + "assertions": [ + { + "query": "{ blocks { number } }", + "expected": { + "blocks": [ + {"number": "0"}, + {"number": "1"} + ] + } + }, + { + "query": "{ blockOnces { msg } }", + "expected": { + "blockOnces": [ + {"msg": "This fires only once at block 0"} + ] + } + } + ] +} +``` + +### Polling Block Handlers + +Polling handlers fire at regular intervals specified by the `every` parameter. The handler fires when: + +``` +(block_number - startBlock) % every == 0 +``` + +**Manifest example:** +```yaml +blockHandlers: + - handler: handleEveryThreeBlocks + filter: + kind: polling + every: 3 +``` + +**Test example (startBlock: 0):** +```json +{ + "name": "Polling handler test", + "blocks": [ + {}, // Block 0 - handler fires (0 % 3 == 0) + {}, // Block 1 - handler doesn't fire + {}, // Block 2 - handler doesn't fire + {}, // Block 3 - handler fires (3 % 3 == 0) + {}, // Block 4 - handler doesn't fire + {}, // Block 5 - handler doesn't fire + {} // Block 6 - handler fires (6 % 3 == 0) + ], + "assertions": [ + { + "query": "{ pollingBlocks(orderBy: number) { number } }", + "expected": { + "pollingBlocks": [ + {"number": "0"}, + {"number": "3"}, + {"number": "6"} + ] + } + } + ] +} +``` + +**With non-zero startBlock:** + +When your data source has `startBlock > 0`, the polling interval is calculated from that starting point. + +**Manifest:** +```yaml +dataSources: + - name: Token + source: + startBlock: 100 + mapping: + blockHandlers: + - handler: handlePolling + filter: + kind: polling + every: 5 +``` + +**Test:** +```json +{ + "name": "Polling from block 100", + "blocks": [ + {"number": 100}, // Fires: (100-100) % 5 == 0 + {"number": 101}, // Doesn't fire + {"number": 102}, // Doesn't fire + {"number": 103}, // Doesn't fire + {"number": 104}, // Doesn't fire + {"number": 105}, // Fires: (105-100) % 5 == 0 + {"number": 106}, // Doesn't fire + {"number": 107}, // Doesn't fire + {"number": 108}, // Doesn't fire + {"number": 109}, // Doesn't fire + {"number": 110} // Fires: (110-100) % 5 == 0 + ], + "assertions": [ + { + "query": "{ pollingBlocks(orderBy: number) { number } }", + "expected": { + "pollingBlocks": [ + {"number": "100"}, + {"number": "105"}, + {"number": "110"} + ] + } + } + ] +} +``` + +**Note:** The test runner automatically handles `startBlock > 0`, so blocks default to numbering from the manifest's `startBlock`. + +## eth_call Mocking + +Mock contract calls made from mapping handlers using `contract.call()`: + +```json +{ + "ethCalls": [ + { + "address": "0x1234...", + "function": "balanceOf(address)(uint256)", + "params": ["0xaaaa..."], + "returns": ["1000000000000000000"] + } + ] +} +``` + +### ethCall Fields + +| Field | Required | Description | +|-------|----------|-------------| +| `address` | Yes | Contract address | +| `function` | Yes | Full signature: `"functionName(inputTypes)(returnTypes)"` | +| `params` | Yes | Array of input parameters (as strings) | +| `returns` | Yes | Array of return values (as strings, ignored if `reverts: true`) | +| `reverts` | No | Default `false`. If `true`, the call is cached as `Retval::Null` | + +### Function Signature Format + +Use full signatures with input and return types: + +```json +{ + "function": "symbol()(string)", // No inputs, returns string + "function": "balanceOf(address)(uint256)", // One input, returns uint256 + "function": "decimals()(uint8)" // No inputs, returns uint8 +} +``` + +### Mocking Reverts + +```json +{ + "address": "0x1234...", + "function": "transfer(address,uint256)(bool)", + "params": ["0xaaaa...", "1000"], + "returns": [], + "reverts": true +} +``` + +### Real-World Example + +From the ERC20 test: + +```json +{ + "ethCalls": [ + { + "address": "0x731a10897d267e19b34503ad902d0a29173ba4b1", + "function": "symbol()(string)", + "params": [], + "returns": ["GRT"] + }, + { + "address": "0x731a10897d267e19b34503ad902d0a29173ba4b1", + "function": "name()(string)", + "params": [], + "returns": ["TheGraph"] + }, + { + "address": "0x731a10897d267e19b34503ad902d0a29173ba4b1", + "function": "balanceOf(address)(uint256)", + "params": ["0xaaaa000000000000000000000000000000000000"], + "returns": ["3000000000000000000"] + } + ] +} +``` + +## Assertions + +GraphQL queries to validate the indexed entity state after processing all blocks. + +### Assertion Fields + +| Field | Required | Description | +|-------|----------|-------------| +| `query` | Yes | GraphQL query string | +| `expected` | Yes | Expected JSON response | + +### Comparison Behavior + +| Aspect | Behavior | +|--------|----------| +| Objects | Key-compared, order-insensitive | +| Arrays | **Order-insensitive** (set comparison) | +| String vs Number | Coerced — `"123"` matches `123` | +| Nulls/Booleans | Strict equality | + +**Important:** Arrays are compared as sets (order doesn't matter). If you need ordered results, use `orderBy` in your GraphQL query: + +```json +{ + "query": "{ transfers(orderBy: timestamp, orderDirection: asc) { id from to value } }", + "expected": { ... } +} +``` + +### Multiple Assertions + +You can have multiple assertions per test. They run sequentially after all blocks are processed: + +```json +{ + "assertions": [ + { + "query": "{ tokens { id name symbol } }", + "expected": { ... } + }, + { + "query": "{ accounts { id balance } }", + "expected": { ... } + } + ] +} +``` + +### Nested Entity Queries + +Test relationships and nested entities: + +```json +{ + "query": "{ accounts { id balances { token { symbol } amount } } }", + "expected": { + "accounts": [ + { + "id": "0xbbbb...", + "balances": [ + { + "token": { "symbol": "GRT" }, + "amount": "5000000000000000000" + } + ] + } + ] + } +} +``` + +## startBlock Handling + +The test runner automatically reads `startBlock` from your subgraph manifest and handles it correctly — **no real blockchain connection needed**. + +### How It Works + +1. Extracts the **minimum `startBlock`** across all data sources in your manifest +2. If min > 0, creates a `start_block_override` to bypass graph-node's on-chain block validation +3. Test blocks without explicit `"number"` auto-increment starting from that minimum `startBlock` + +### Default Block Numbering + +The starting block number depends on your manifest: + +| Manifest Configuration | Test Block Numbers | +|----------------------|-------------------| +| `startBlock: 0` (or unset) | 0, 1, 2, ... | +| `startBlock: 100` | 100, 101, 102, ... | +| Multiple data sources: `startBlock: 50` and `startBlock: 200` | 50, 51, 52, ... (uses minimum) | + +### Example: Single Data Source + +**Manifest:** +```yaml +dataSources: + - name: Token + source: + startBlock: 1000 +``` + +**Test:** +```json +{ + "blocks": [ + {}, // Block 1000 (auto-numbered) + {} // Block 1001 (auto-numbered) + ] +} +``` + +### Example: Explicit Block Numbers + +Override auto-numbering by specifying `"number"`: + +```json +{ + "blocks": [ + { + "number": 5000, + "events": [...] + }, + { + "number": 5001, + "events": [...] + } + ] +} +``` + +### Multi-Data Source Testing + +When your subgraph has multiple data sources with different `startBlock` values, you may need to use explicit block numbers. + +**Scenario:** DataSource A at `startBlock: 50` (Transfer events), DataSource B at `startBlock: 200` (Approval events). You want to test only DataSource B. + +**Manifest:** +```yaml +dataSources: + - name: TokenTransfers + source: + startBlock: 50 + mapping: + eventHandlers: + - event: Transfer(...) + handler: handleTransfer + - name: TokenApprovals + source: + startBlock: 200 + mapping: + eventHandlers: + - event: Approval(...) + handler: handleApproval +``` + +**Test:** +```json +{ + "name": "Test Approval handler", + "blocks": [ + { + "number": 200, // Explicit number >= DataSource B's startBlock + "events": [ + { + "address": "0x5678...", + "event": "Approval(address indexed owner, address indexed spender, uint256 value)", + "params": { + "owner": "0xaaaa...", + "spender": "0xbbbb...", + "value": "500" + } + } + ] + }, + { + "number": 201, + "events": [...] + } + ] +} +``` + +**Why explicit numbers are needed:** +- Default numbering starts at the **minimum** `startBlock` across all data sources (50 in this case) +- Blocks 50-199 are below DataSource B's `startBlock: 200`, so its handlers won't fire +- Use explicit `"number": 200` to ensure the block is in DataSource B's active range + +**Note:** DataSource A is still "active" from block 50 onward, but it simply sees no matching Transfer events in blocks 200-201, so no handlers fire for it. This is normal behavior — graph-node doesn't error on inactive handlers. + +## Test Organization + +### Directory Structure + +``` +my-subgraph/ +├── subgraph.yaml +├── schema.graphql +├── src/ +│ └── mapping.ts +└── tests/ + ├── transfer.json + ├── approval.json + └── edge-cases.test.json +``` + +### Naming Conventions + +- Use `.json` or `.test.json` extension +- Descriptive names: `transfer.json`, `mint-burn.json`, `edge-cases.json` +- The test runner discovers all `*.json` and `*.test.json` files in the test directory + +## Known Limitations + +| Feature | Status | +|---------|--------| +| Log events | ✅ Supported | +| Block handlers (all filters) | ✅ Supported | +| eth_call mocking | ✅ Supported | +| Dynamic/template data sources | ✅ Supported | +| Transaction receipts (`receipt: true`) | ❌ Not implemented — handlers get `null` | +| File data sources / IPFS mocking | ❌ Not implemented | +| Call triggers (traces) | ❌ Not implemented | +| `--json` CI output | ❌ Not implemented | +| Parallel test execution | ❌ Not implemented | +| Test name filtering (`--filter`) | ❌ Not implemented | + +## Tips & Best Practices + +### Use Lowercase Addresses + +Always use lowercase hex addresses with `0x` prefix: + +```json +{ + "address": "0x731a10897d267e19b34503ad902d0a29173ba4b1" // ✅ Correct +} +``` + +Not: +```json +{ + "address": "0x731A10897D267E19B34503Ad902d0A29173Ba4B1" // ❌ Mixed case +} +``` + +### Test One Thing at a Time + +Write focused tests that validate a single behavior: + +```json +// ✅ Good - tests one scenario +{ + "name": "Transfer event creates TransferEvent entity", + "blocks": [...], + "assertions": [...] +} +``` + +```json +// ❌ Avoid - tests too many things +{ + "name": "Test everything", + "blocks": [/* 50 blocks */], + "assertions": [/* 20 assertions */] +} +``` + +### Order GraphQL Results + +If your assertion needs specific ordering, use `orderBy`: + +```json +{ + "query": "{ transfers(first: 10, orderBy: timestamp, orderDirection: asc) { id } }", + "expected": { ... } +} +``` + +### Test Block Handlers with Empty Blocks + +Use empty blocks to test that block handlers fire even without events: + +```json +{ + "blocks": [ + {}, // Empty block - block handlers still fire + {} + ] +} +``` + +### Split Complex Tests + +Instead of one large test with many blocks, split into multiple focused test files: + +``` +tests/ +├── transfer-basic.json # Basic transfer functionality +├── transfer-zero-value.json # Edge case: zero value +└── transfer-same-account.json # Edge case: self-transfer +``` + +## Architecture + +The test runner reuses real graph-node infrastructure: + +``` +test.json + ↓ +Parse & ABI encode events + ↓ +Mock block stream (StaticStreamBuilder) + ↓ +Real graph-node indexer + ├── WASM runtime + ├── Trigger processing + └── Entity storage (pgtemp database) + ↓ +GraphQL queries → Assertions +``` + +**Key design principles:** + +- **Isolated database per test:** Each test gets a pgtemp database dropped on completion (default), or a shared persistent database with post-test cleanup (`--postgres-url`) +- **Real WASM runtime:** Uses `EthereumRuntimeAdapterBuilder` with real `ethereum.call` host function +- **Pre-populated call cache:** `eth_call` responses are cached before indexing starts +- **No IPFS for manifest:** Uses `FileLinkResolver` to load manifest/WASM from build directory +- **Dummy RPC adapter:** Registered at `http://0.0.0.0:0` for capability lookup; never actually called + +## Troubleshooting + +### Test Fails: "Entity not found" + +**Cause:** Handler didn't create the expected entity. + +**Fix:** +1. Check event signature matches ABI (include `indexed` keywords) +2. Verify contract address matches manifest +3. Check block number is >= data source's `startBlock` +4. Add debug logging to your mapping handler + +### Test Timeout + +**Cause:** Indexer took longer than 60 seconds (default timeout). + +**Fix:** +1. Reduce number of blocks in test +2. Simplify mapping logic +3. Check for infinite loops in handler code + +### eth_call Returns Wrong Value + +**Cause:** Call cache miss — no matching mock in `ethCalls`. + +**Fix:** +1. Verify `address`, `function`, and `params` exactly match the call from your mapping +2. Check function signature format: `"functionName(inputTypes)(returnTypes)"` +3. Ensure parameters are in correct order + +### Block Handler Not Firing + +**Cause:** Block handlers auto-fire, but might be outside data source's active range. + +**Fix:** +1. Check data source's `startBlock` in manifest +2. Use explicit `"number"` in test blocks to ensure they're >= `startBlock` +3. Verify handler is defined in manifest's `blockHandlers` section + +## Legacy Matchstick Mode + +Fall back to the external Matchstick test runner for backward compatibility: + +```bash +gnd test --matchstick +``` + +This is useful if: +- You have existing Matchstick tests +- You need features not yet supported by the mock-based runner +- You're migrating gradually from Matchstick to the new test format + +## See Also + +- [Subgraph Manifest Documentation](https://thegraph.com/docs/en/developing/creating-a-subgraph/) +- [AssemblyScript Mapping API](https://thegraph.com/docs/en/developing/assemblyscript-api/) +- [GraphQL Schema](https://thegraph.com/docs/en/developing/creating-a-subgraph/#the-graph-ql-schema) diff --git a/gnd/src/commands/test.rs b/gnd/src/commands/test.rs deleted file mode 100644 index 692db1abb0e..00000000000 --- a/gnd/src/commands/test.rs +++ /dev/null @@ -1,261 +0,0 @@ -//! Test command for running Matchstick tests. -//! -//! This command runs the Matchstick test runner for subgraph unit tests. -//! Matchstick is a testing framework for subgraphs that allows testing -//! event handlers, entity storage, and contract calls. - -use std::path::PathBuf; -use std::process::Command; - -use anyhow::{anyhow, Context, Result}; -use clap::Parser; - -use crate::output::{step, Step}; - -#[derive(Clone, Debug, Parser)] -#[clap(about = "Run Matchstick tests for the subgraph")] -pub struct TestOpt { - /// Specific data source to test (optional, tests all if not specified) - #[clap()] - pub datasource: Option, - - /// Run tests with coverage reporting - #[clap(short = 'c', long)] - pub coverage: bool, - - /// Run tests in a Docker container - #[clap(short = 'd', long)] - pub docker: bool, - - /// Force redownload of Matchstick binary / rebuild Docker image - #[clap(short = 'f', long)] - pub force: bool, - - /// Show debug logs (OS info, download URLs) - #[clap(short = 'l', long)] - pub logs: bool, - - /// Force recompilation of tests - #[clap(short = 'r', long)] - pub recompile: bool, - - /// Matchstick version to use - #[clap(short = 'v', long)] - pub version: Option, -} - -/// Run the test command. -pub fn run_test(opt: TestOpt) -> Result<()> { - // Check if Matchstick binary exists in node_modules or PATH - let matchstick_path = find_matchstick()?; - - if opt.docker { - run_docker_tests(&opt) - } else { - run_binary_tests(&matchstick_path, &opt) - } -} - -/// Check if a binary with the given name exists in any PATH directory. -fn is_in_path(name: &str) -> bool { - let Some(path_var) = std::env::var_os("PATH") else { - return false; - }; - std::env::split_paths(&path_var).any(|dir| dir.join(name).is_file()) -} - -/// Find the Matchstick binary. -fn find_matchstick() -> Result { - // First, check node_modules/.bin/graph-test (graph-cli's matchstick wrapper) - let node_modules_path = PathBuf::from("node_modules/.bin/graph-test"); - if node_modules_path.exists() { - return Ok(node_modules_path); - } - - // Check for matchstick directly in node_modules - let matchstick_path = PathBuf::from("node_modules/.bin/matchstick"); - if matchstick_path.exists() { - return Ok(matchstick_path); - } - - // Check if matchstick is in PATH - if is_in_path("matchstick") { - return Ok(PathBuf::from("matchstick")); - } - - Err(anyhow!( - "Matchstick not found. Please install it with:\n \ - npm install --save-dev matchstick-as\n\n\ - Or use Docker mode:\n \ - gnd test -d" - )) -} - -/// Run tests using the Matchstick binary. -fn run_binary_tests(matchstick_path: &PathBuf, opt: &TestOpt) -> Result<()> { - step(Step::Generate, "Running Matchstick tests"); - - let mut cmd = Command::new(matchstick_path); - - // Add flags - if opt.coverage { - cmd.arg("-c"); - } - if opt.recompile { - cmd.arg("-r"); - } - - // Add datasource filter if specified - if let Some(datasource) = &opt.datasource { - cmd.arg(datasource); - } - - let status = cmd.status().context("Failed to run Matchstick")?; - - if status.success() { - step(Step::Done, "Tests passed"); - Ok(()) - } else { - Err(anyhow!("Tests failed")) - } -} - -/// Run tests using Docker. -fn run_docker_tests(opt: &TestOpt) -> Result<()> { - step(Step::Generate, "Running Matchstick tests in Docker"); - - // Check if Docker is available - Command::new("docker") - .arg("--version") - .output() - .context("Docker not found. Please install Docker to use -d/--docker mode.")?; - - // Build test arguments - let mut test_args = String::new(); - if opt.coverage { - test_args.push_str(" -c"); - } - if opt.recompile { - test_args.push_str(" -r"); - } - if let Some(datasource) = &opt.datasource { - test_args.push_str(&format!(" {}", datasource)); - } - - // Get current working directory - let cwd = std::env::current_dir().context("Failed to get current directory")?; - - // Build docker run command - let mut cmd = Command::new("docker"); - cmd.args([ - "run", - "-it", - "--rm", - "--mount", - &format!("type=bind,source={},target=/matchstick", cwd.display()), - ]); - - if !test_args.is_empty() { - cmd.args(["-e", &format!("ARGS={}", test_args.trim())]); - } - - cmd.arg("matchstick"); - - // Check if matchstick image exists - let image_check = Command::new("docker") - .args(["images", "-q", "matchstick"]) - .output() - .context("Failed to check for Docker image")?; - - let image_exists = !image_check.stdout.is_empty(); - - if !image_exists || opt.force { - // Need to build the image first - step(Step::Generate, "Building Matchstick Docker image"); - - // Create Dockerfile if it doesn't exist - let dockerfile_path = PathBuf::from("tests/.docker/Dockerfile"); - if !dockerfile_path.exists() || opt.force { - create_dockerfile(&dockerfile_path, opt.version.as_deref())?; - } - - let build_status = Command::new("docker") - .args([ - "build", - "-f", - &dockerfile_path.to_string_lossy(), - "-t", - "matchstick", - ".", - ]) - .status() - .context("Failed to build Docker image")?; - - if !build_status.success() { - return Err(anyhow!("Failed to build Matchstick Docker image")); - } - } - - // Run the container - let status = cmd.status().context("Failed to run Docker container")?; - - if status.success() { - step(Step::Done, "Tests passed"); - Ok(()) - } else { - Err(anyhow!("Tests failed")) - } -} - -/// Create the Dockerfile for Matchstick. -fn create_dockerfile(path: &PathBuf, version: Option<&str>) -> Result<()> { - use std::fs; - - // Create directory if needed - if let Some(parent) = path.parent() { - fs::create_dir_all(parent)?; - } - - let version = version.unwrap_or("0.6.0"); - - let dockerfile_content = format!( - r#"FROM node:18-slim - -# Install build dependencies -RUN apt-get update && apt-get install -y \ - build-essential \ - git \ - && rm -rf /var/lib/apt/lists/* - -# Install matchstick -RUN npm install -g matchstick-as@{version} - -WORKDIR /matchstick - -# Entry point runs tests -ENTRYPOINT ["sh", "-c", "npm install && graph test $ARGS"] -"#, - version = version - ); - - fs::write(path, dockerfile_content) - .with_context(|| format!("Failed to write Dockerfile to {}", path.display()))?; - - step(Step::Write, &format!("Created {}", path.display())); - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_find_matchstick_not_found() { - // In test environment, matchstick likely isn't installed - // This should return an error with helpful message - let result = find_matchstick(); - // Either finds it or returns error - both are valid - assert!(result.is_ok() || result.is_err()); - } -} diff --git a/gnd/src/commands/test/assertion.rs b/gnd/src/commands/test/assertion.rs new file mode 100644 index 00000000000..6ecc868f06a --- /dev/null +++ b/gnd/src/commands/test/assertion.rs @@ -0,0 +1,229 @@ +//! GraphQL assertion execution for test validation. + +use super::runner::TestContext; +use super::schema::{Assertion, AssertionFailure, AssertionOutcome, TestResult}; +use anyhow::{anyhow, Result}; +use graph::data::query::{Query, QueryResults, QueryTarget}; +use graph::prelude::{q, r, ApiVersion, GraphQlRunner as GraphQlRunnerTrait}; + +pub(super) async fn run_assertions( + ctx: &TestContext, + assertions: &[Assertion], +) -> Result { + let mut outcomes = Vec::new(); + + for assertion in assertions { + match run_single_assertion(ctx, assertion).await { + Ok(None) => { + outcomes.push(AssertionOutcome::Passed { + query: assertion.query.clone(), + }); + } + Ok(Some(failure)) => { + outcomes.push(AssertionOutcome::Failed(failure)); + } + Err(e) => { + outcomes.push(AssertionOutcome::Failed(AssertionFailure { + query: assertion.query.clone(), + expected: assertion.expected.clone(), + actual: serde_json::json!({ "error": e.to_string() }), + })); + } + } + } + + Ok(TestResult { + handler_error: None, + assertions: outcomes, + }) +} + +/// Execute a single assertion. Returns `None` on pass, `Some(failure)` on mismatch. +async fn run_single_assertion( + ctx: &TestContext, + assertion: &Assertion, +) -> Result> { + let target = QueryTarget::Deployment(ctx.deployment.hash.clone(), ApiVersion::default()); + let query = Query::new( + q::parse_query(&assertion.query) + .map_err(|e| anyhow!("Failed to parse query: {:?}", e))? + .into_static(), + None, + false, + ); + + let query_res: QueryResults = ctx.graphql_runner.clone().run_query(query, target).await; + + let result = query_res + .first() + .ok_or_else(|| anyhow!("No query result"))? + .duplicate() + .to_result() + .map_err(|errors| anyhow!("Query errors: {:?}", errors))?; + + let actual_json = match result { + Some(value) => r_value_to_json(&value), + None => serde_json::Value::Null, + }; + + if json_equal(&actual_json, &assertion.expected) { + Ok(None) + } else { + Ok(Some(AssertionFailure { + query: assertion.query.clone(), + expected: assertion.expected.clone(), + actual: actual_json, + })) + } +} + +/// Convert graph-node's internal `r::Value` (GraphQL result) to `serde_json::Value`. +/// +/// Graph-node uses its own value type for GraphQL results. This converts to +/// standard JSON for comparison with the expected values in the test file. +fn r_value_to_json(value: &r::Value) -> serde_json::Value { + match value { + r::Value::Null => serde_json::Value::Null, + r::Value::Boolean(b) => serde_json::Value::Bool(*b), + r::Value::Int(n) => serde_json::Value::Number((*n).into()), + r::Value::Float(f) => serde_json::json!(*f), + r::Value::String(s) => serde_json::Value::String(s.clone()), + r::Value::Enum(s) => serde_json::Value::String(s.clone()), + r::Value::List(list) => { + serde_json::Value::Array(list.iter().map(r_value_to_json).collect()) + } + r::Value::Object(obj) => { + let map: serde_json::Map = obj + .iter() + .map(|(k, v)| (k.to_string(), r_value_to_json(v))) + .collect(); + serde_json::Value::Object(map) + } + r::Value::Timestamp(t) => serde_json::Value::String(t.to_string()), + } +} + +/// Reorder `actual` arrays to align with `expected`'s element ordering. +/// +/// When a test fails, the raw diff can be misleading if array elements appear +/// in a different order — every line shows as changed even if only one field +/// differs. This function reorders `actual` so that elements are paired with +/// their closest match in `expected`, producing a diff that highlights only +/// real value differences. +pub(super) fn align_for_diff( + expected: &serde_json::Value, + actual: &serde_json::Value, +) -> serde_json::Value { + match (expected, actual) { + (serde_json::Value::Array(exp), serde_json::Value::Array(act)) => { + let mut used = vec![false; act.len()]; + let mut aligned = Vec::with_capacity(exp.len().max(act.len())); + + for exp_elem in exp { + let best = act + .iter() + .enumerate() + .filter(|(i, _)| !used[*i]) + .max_by_key(|(_, a)| json_similarity(exp_elem, a)); + + if let Some((idx, _)) = best { + used[idx] = true; + aligned.push(align_for_diff(exp_elem, &act[idx])); + } + } + + for (i, elem) in act.iter().enumerate() { + if !used[i] { + aligned.push(elem.clone()); + } + } + + serde_json::Value::Array(aligned) + } + (serde_json::Value::Object(exp), serde_json::Value::Object(act)) => { + let aligned: serde_json::Map = act + .iter() + .map(|(k, v)| { + let aligned_v = if let Some(exp_v) = exp.get(k) { + align_for_diff(exp_v, v) + } else { + v.clone() + }; + (k.clone(), aligned_v) + }) + .collect(); + serde_json::Value::Object(aligned) + } + _ => actual.clone(), + } +} + +/// Score how similar two JSON values are for use in [`align_for_diff`]. +/// +/// For objects, counts the number of fields whose values are equal in both. +/// A matching `"id"` field is weighted heavily (+100) since it is the +/// strongest signal that two objects represent the same entity. +/// For all other value types, returns 1 if equal, 0 otherwise. +fn json_similarity(a: &serde_json::Value, b: &serde_json::Value) -> usize { + match (a, b) { + (serde_json::Value::Object(a_obj), serde_json::Value::Object(b_obj)) => { + let mut score = 0; + for (k, v) in a_obj { + if let Some(bv) = b_obj.get(k) { + if json_equal(v, bv) { + // `id` match is a strong signal for entity identity. + score += if k == "id" { 100 } else { 1 }; + } + } + } + score + } + _ => { + if json_equal(a, b) { + 1 + } else { + 0 + } + } + } +} + +/// Compare two JSON values for equality (ignoring key ordering in objects). +/// +/// Also handles string-vs-number coercion: GraphQL returns `BigInt` and +/// `BigDecimal` fields as JSON strings (e.g., `"1000000000000000000"`), +/// but test authors may write them as JSON numbers. This function treats +/// `String("123")` and `Number(123)` as equal when they represent the +/// same value. +fn json_equal(a: &serde_json::Value, b: &serde_json::Value) -> bool { + match (a, b) { + (serde_json::Value::Null, serde_json::Value::Null) => true, + (serde_json::Value::Bool(a), serde_json::Value::Bool(b)) => a == b, + (serde_json::Value::Number(a), serde_json::Value::Number(b)) => a == b, + (serde_json::Value::String(a), serde_json::Value::String(b)) => a == b, + (serde_json::Value::String(s), serde_json::Value::Number(n)) + | (serde_json::Value::Number(n), serde_json::Value::String(s)) => s == &n.to_string(), + (serde_json::Value::Array(a), serde_json::Value::Array(b)) => { + if a.len() != b.len() { + return false; + } + // Order-insensitive: O(n²), fine for realistic test sizes. + let mut used = vec![false; b.len()]; + a.iter().all(|a_elem| { + for (i, b_elem) in b.iter().enumerate() { + if !used[i] && json_equal(a_elem, b_elem) { + used[i] = true; + return true; + } + } + false + }) + } + (serde_json::Value::Object(a), serde_json::Value::Object(b)) => { + a.len() == b.len() + && a.iter() + .all(|(k, v)| b.get(k).map(|bv| json_equal(v, bv)).unwrap_or(false)) + } + _ => false, + } +} diff --git a/gnd/src/commands/test/block_stream.rs b/gnd/src/commands/test/block_stream.rs new file mode 100644 index 00000000000..e07eb9aa104 --- /dev/null +++ b/gnd/src/commands/test/block_stream.rs @@ -0,0 +1,111 @@ +//! Mock `BlockStreamBuilder` that feeds pre-defined test blocks. + +use async_trait::async_trait; +use graph::blockchain::block_stream::{ + BlockStream, BlockStreamBuilder, BlockStreamError, BlockStreamEvent, BlockWithTriggers, + FirehoseCursor, +}; +use graph::blockchain::{BlockPtr, Blockchain, TriggerFilterWrapper}; +use graph::components::store::{DeploymentLocator, SourceableStore}; +use graph::futures03::Stream; +use graph::prelude::BlockNumber; +use graph_chain_ethereum::Chain; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context as TaskContext, Poll}; + +pub(super) struct StaticStreamBuilder { + pub chain: Vec>, +} + +#[async_trait] +impl BlockStreamBuilder for StaticStreamBuilder { + async fn build_firehose( + &self, + _chain: &Chain, + _deployment: DeploymentLocator, + _block_cursor: FirehoseCursor, + _start_blocks: Vec, + current_block: Option, + _filter: Arc<::TriggerFilter>, + _unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, + ) -> anyhow::Result>> { + let current_idx = current_block.map(|current_block| { + self.chain + .iter() + .enumerate() + .find(|(_, b)| b.ptr() == current_block) + .map(|(i, _)| i) + .unwrap_or(0) + }); + Ok(Box::new(StaticStream::new(self.chain.clone(), current_idx))) + } + + async fn build_polling( + &self, + _chain: &Chain, + _deployment: DeploymentLocator, + _start_blocks: Vec, + _source_subgraph_stores: Vec>, + current_block: Option, + _filter: Arc>, + _unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, + ) -> anyhow::Result>> { + let current_idx = current_block.map(|current_block| { + self.chain + .iter() + .enumerate() + .find(|(_, b)| b.ptr() == current_block) + .map(|(i, _)| i) + .unwrap_or(0) + }); + Ok(Box::new(StaticStream::new(self.chain.clone(), current_idx))) + } +} + +/// A `Stream` that synchronously yields pre-defined blocks one at a time. +/// +/// Each `poll_next` call returns the next block immediately (no async waiting). +/// When all blocks have been emitted, returns `None` to signal stream completion, +/// which tells the indexer that sync is done. +struct StaticStream { + blocks: Vec>, + current_idx: usize, +} + +impl StaticStream { + /// Create a new stream, optionally skipping past already-processed blocks. + /// + /// `skip_to`: If `Some(i)`, start from block `i+1` (block `i` was already processed). + /// If `None`, start from the beginning. + fn new(blocks: Vec>, skip_to: Option) -> Self { + Self { + blocks, + current_idx: skip_to.map(|i| i + 1).unwrap_or(0), + } + } +} + +impl BlockStream for StaticStream { + fn buffer_size_hint(&self) -> usize { + 1 + } +} + +impl Unpin for StaticStream {} + +impl Stream for StaticStream { + type Item = Result, BlockStreamError>; + + fn poll_next(mut self: Pin<&mut Self>, _cx: &mut TaskContext<'_>) -> Poll> { + if self.current_idx >= self.blocks.len() { + return Poll::Ready(None); + } + + let block = self.blocks[self.current_idx].clone(); + let cursor = FirehoseCursor::from(format!("test-cursor-{}", self.current_idx)); + self.current_idx += 1; + + Poll::Ready(Some(Ok(BlockStreamEvent::ProcessBlock(block, cursor)))) + } +} diff --git a/gnd/src/commands/test/eth_calls.rs b/gnd/src/commands/test/eth_calls.rs new file mode 100644 index 00000000000..60883d70193 --- /dev/null +++ b/gnd/src/commands/test/eth_calls.rs @@ -0,0 +1,235 @@ +//! Pre-populates the eth_call cache with mock responses for `gnd test`. +//! +//! Function signatures use graph-node's convention: `name(inputs):(outputs)` +//! e.g. `"balanceOf(address):(uint256)"`, `"getReserves():(uint112,uint112,uint32)"`. +//! Call data is encoded using the same path as production graph-node, so cache +//! IDs match exactly what the runtime generates. + +use super::schema::{MockEthCall, TestFile}; +use super::trigger::json_to_sol_value; +use anyhow::{anyhow, Context, Result}; +use graph::abi::FunctionExt as GraphFunctionExt; +use graph::blockchain::block_stream::BlockWithTriggers; +use graph::blockchain::BlockPtr; +use graph::components::store::EthereumCallCache; +use graph::data::store::ethereum::call; +use graph::prelude::alloy::dyn_abi::{DynSolType, FunctionExt as AlloyFunctionExt}; +use graph::prelude::alloy::json_abi::Function; +use graph::prelude::alloy::primitives::Address; +use graph::slog::Logger; +use graph_chain_ethereum::Chain; +use graph_store_postgres::ChainStore; +use std::sync::Arc; + +/// ABI-encode a function call (selector + params) using graph-node's encoding path. +fn encode_function_call(function_sig: &str, params: &[serde_json::Value]) -> Result> { + let alloy_sig = to_alloy_signature(function_sig); + let function = Function::parse(&alloy_sig).map_err(|e| { + anyhow!( + "Failed to parse function signature '{}': {:?}", + function_sig, + e + ) + })?; + + if params.len() != function.inputs.len() { + return Err(anyhow!( + "Parameter count mismatch for '{}': expected {} parameters, got {}", + function_sig, + function.inputs.len(), + params.len() + )); + } + + let args: Vec<_> = params + .iter() + .zip(&function.inputs) + .map(|(json, param)| { + let sol_type: DynSolType = param + .ty + .parse() + .map_err(|e| anyhow!("Invalid type '{}': {:?}", param.ty, e))?; + json_to_sol_value(&sol_type, json) + }) + .collect::>>()?; + + GraphFunctionExt::abi_encode_input(&function, &args).context("Failed to encode function call") +} + +/// ABI-encode function return values (no selector prefix). +fn encode_return_value(function_sig: &str, returns: &[serde_json::Value]) -> Result> { + let alloy_sig = to_alloy_signature(function_sig); + let function = Function::parse(&alloy_sig).map_err(|e| { + anyhow!( + "Failed to parse function signature '{}': {:?}", + function_sig, + e + ) + })?; + + if returns.len() != function.outputs.len() { + return Err(anyhow!( + "Return value count mismatch for '{}': expected {} return values, got {}", + function_sig, + function.outputs.len(), + returns.len() + )); + } + + let output_values: Vec<_> = returns + .iter() + .zip(&function.outputs) + .map(|(json, param)| { + let sol_type: DynSolType = param + .ty + .parse() + .map_err(|e| anyhow!("Invalid type '{}': {:?}", param.ty, e))?; + json_to_sol_value(&sol_type, json) + }) + .collect::>>()?; + + AlloyFunctionExt::abi_encode_output(&function, &output_values) + .map_err(|e| anyhow!("Failed to encode return value: {}", e)) +} + +/// Convert a graph-node style function signature to alloy's expected format. +/// +/// Graph-node uses `name(inputs):(outputs)` while alloy expects +/// `name(inputs) returns (outputs)`. +/// +/// Examples: +/// - `"balanceOf(address):(uint256)"` → `"balanceOf(address) returns (uint256)"` +/// - `"name():(string)"` → `"name() returns (string)"` +/// - `"transfer(address,uint256)"` → `"transfer(address,uint256)"` (no change) +/// - `"balanceOf(address) returns (uint256)"` → unchanged (already alloy format) +fn to_alloy_signature(sig: &str) -> String { + // If it already contains "returns", assume alloy format. + if sig.contains(" returns ") { + return sig.to_string(); + } + + // Look for the "):(" pattern that separates inputs from outputs. + if let Some(pos) = sig.find("):(") { + let inputs = &sig[..=pos]; // "name(inputs)" + let outputs = &sig[pos + 2..]; // "(outputs)" + format!("{} returns {}", inputs, outputs) + } else { + sig.to_string() + } +} + +/// Populate the eth_call cache from test block mock calls before indexing starts. +pub async fn populate_eth_call_cache( + logger: &Logger, + chain_store: Arc, + blocks: &[BlockWithTriggers], + test_file: &TestFile, +) -> Result<()> { + for (block_data, test_block) in blocks.iter().zip(&test_file.blocks) { + let block_ptr = block_data.ptr(); + + for eth_call in &test_block.eth_calls { + populate_single_call(logger, chain_store.clone(), &block_ptr, eth_call).await?; + } + } + Ok(()) +} + +async fn populate_single_call( + logger: &Logger, + chain_store: Arc, + block_ptr: &BlockPtr, + eth_call: &MockEthCall, +) -> Result<()> { + let address: Address = eth_call.address.parse()?; + + let encoded_call = encode_function_call(ð_call.function, ð_call.params)?; + + let request = call::Request::new(address, encoded_call, 0); + + let retval = if eth_call.reverts { + call::Retval::Null + } else { + let encoded_return = encode_return_value(ð_call.function, ð_call.returns)?; + call::Retval::Value(encoded_return.into()) + }; + + chain_store + .set_call(logger, request, block_ptr.clone(), retval) + .await?; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_to_alloy_signature_with_colon() { + assert_eq!( + to_alloy_signature("balanceOf(address):(uint256)"), + "balanceOf(address) returns (uint256)" + ); + } + + #[test] + fn test_to_alloy_signature_multiple_outputs() { + assert_eq!( + to_alloy_signature("getReserves():(uint112,uint112,uint32)"), + "getReserves() returns (uint112,uint112,uint32)" + ); + } + + #[test] + fn test_to_alloy_signature_no_outputs() { + assert_eq!( + to_alloy_signature("transfer(address,uint256)"), + "transfer(address,uint256)" + ); + } + + #[test] + fn test_to_alloy_signature_already_alloy_format() { + assert_eq!( + to_alloy_signature("balanceOf(address) returns (uint256)"), + "balanceOf(address) returns (uint256)" + ); + } + + #[test] + fn test_encode_function_call_balanceof() { + let encoded = encode_function_call( + "balanceOf(address):(uint256)", + &[serde_json::json!( + "0x0000000000000000000000000000000000000001" + )], + ) + .unwrap(); + + // First 4 bytes should be the selector for balanceOf(address) + assert_eq!(&encoded[..4], &[0x70, 0xa0, 0x82, 0x31]); + // Total length: 4 (selector) + 32 (address param) = 36 + assert_eq!(encoded.len(), 36); + } + + #[test] + fn test_encode_return_value_uint256() { + let encoded = encode_return_value( + "balanceOf(address):(uint256)", + &[serde_json::json!("1000000000000000000")], + ) + .unwrap(); + + // ABI-encoded uint256 is 32 bytes (no selector) + assert_eq!(encoded.len(), 32); + } + + #[test] + fn test_encode_function_call_no_params() { + let encoded = encode_function_call("symbol():(string)", &[]).unwrap(); + + // Just the 4-byte selector + assert_eq!(encoded.len(), 4); + } +} diff --git a/gnd/src/commands/test/matchstick.rs b/gnd/src/commands/test/matchstick.rs new file mode 100644 index 00000000000..ee2409a0ef8 --- /dev/null +++ b/gnd/src/commands/test/matchstick.rs @@ -0,0 +1,398 @@ +//! Backward-compatible Matchstick test runner (legacy mode). +//! +//! Dispatches to Docker mode or binary mode depending on the `--docker` flag. +//! This is the legacy path for projects that haven't migrated to the new +//! JSON-based test format yet. + +use anyhow::{anyhow, Context, Result}; +use serde::{Deserialize, Serialize}; +use std::path::{Path, PathBuf}; +use std::time::{SystemTime, UNIX_EPOCH}; + +use crate::output::{step, Step}; + +use super::TestOpt; + +const MATCHSTICK_GITHUB_RELEASES: &str = + "https://api.github.com/repos/LimeChain/matchstick/releases/latest"; +const MATCHSTICK_DOWNLOAD_BASE: &str = "https://github.com/LimeChain/matchstick/releases/download"; +const MATCHSTICK_FALLBACK_VERSION: &str = "0.6.0"; +const VERSION_CACHE_TTL_SECS: u64 = 86400; // 24 hours + +/// Cached version info written to `{test_dir}/.latest.json`. +#[derive(Serialize, Deserialize)] +struct VersionCache { + version: String, + timestamp: u64, +} + +pub(super) async fn run(opt: &TestOpt) -> Result<()> { + if opt.docker { + run_docker_tests(opt).await + } else { + run_binary_tests(opt).await + } +} + +/// Resolve the Matchstick version to use. +/// +/// Priority: CLI flag → cached `.latest.json` (24h TTL) → GitHub API → fallback. +async fn resolve_matchstick_version( + explicit_version: Option<&str>, + cache_dir: &Path, +) -> Result { + if let Some(v) = explicit_version { + return Ok(v.to_string()); + } + + let cache_path = cache_dir.join(".latest.json"); + + if let Some(cached) = read_version_cache(&cache_path) { + return Ok(cached); + } + + step(Step::Load, "Fetching latest Matchstick version"); + match fetch_latest_version().await { + Ok(version) => { + let _ = write_version_cache(&cache_path, &version); + Ok(version) + } + Err(e) => { + step( + Step::Warn, + &format!( + "Failed to fetch latest version ({}), using {}", + e, MATCHSTICK_FALLBACK_VERSION + ), + ); + Ok(MATCHSTICK_FALLBACK_VERSION.to_string()) + } + } +} + +async fn fetch_latest_version() -> Result { + let client = reqwest::Client::builder().user_agent("gnd-cli").build()?; + + let resp: serde_json::Value = client + .get(MATCHSTICK_GITHUB_RELEASES) + .send() + .await + .context("Failed to reach GitHub API")? + .error_for_status() + .context("GitHub API returned an error")? + .json() + .await + .context("Failed to parse GitHub API response")?; + + resp["tag_name"] + .as_str() + .map(|s| s.to_string()) + .ok_or_else(|| anyhow!("GitHub API response missing tag_name")) +} + +fn read_version_cache(path: &Path) -> Option { + let data = std::fs::read_to_string(path).ok()?; + let cache: VersionCache = serde_json::from_str(&data).ok()?; + let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?.as_secs(); + + if now.saturating_sub(cache.timestamp) < VERSION_CACHE_TTL_SECS { + Some(cache.version) + } else { + None + } +} + +fn write_version_cache(path: &Path, version: &str) -> Result<()> { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .context("System clock before UNIX epoch")? + .as_secs(); + + let cache = VersionCache { + version: version.to_string(), + timestamp: now, + }; + + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent)?; + } + std::fs::write(path, serde_json::to_string_pretty(&cache)?)?; + Ok(()) +} + +/// Platform-specific binary name for a Matchstick version. +/// Mirrors getPlatform from graph-tooling: versions > 0.5.4 use simplified names. +fn get_platform(version: &str) -> Result { + let ver = semver::Version::parse(version)?; + let cutoff = semver::Version::new(0, 5, 4); + + let os = std::env::consts::OS; + let arch = std::env::consts::ARCH; + + if arch != "x86_64" && !(os == "macos" && arch == "aarch64") { + return Err(anyhow!("Unsupported platform: {} {}", os, arch)); + } + + if ver > cutoff { + match os { + "macos" if arch == "aarch64" => Ok("binary-macos-12-m1".to_string()), + "macos" => Ok("binary-macos-12".to_string()), + "linux" => Ok("binary-linux-22".to_string()), + _ => Err(anyhow!("Unsupported OS: {}", os)), + } + } else { + // Legacy platform detection for versions <= 0.5.4 + match os { + "macos" => { + let darwin_major = get_darwin_major_version(); + if matches!(darwin_major, Some(18) | Some(19)) { + Ok("binary-macos-10.15".to_string()) + } else if arch == "aarch64" { + Ok("binary-macos-11-m1".to_string()) + } else { + Ok("binary-macos-11".to_string()) + } + } + "linux" => { + let linux_major = get_linux_major_version(); + match linux_major { + Some(18) => Ok("binary-linux-18".to_string()), + Some(22) | Some(24) => Ok("binary-linux-22".to_string()), + _ => Ok("binary-linux-20".to_string()), + } + } + _ => Err(anyhow!("Unsupported OS: {}", os)), + } + } +} + +/// Darwin kernel major version from `uname -r`. Darwin 18/19 → macOS 10.14/10.15. +fn get_darwin_major_version() -> Option { + let output = std::process::Command::new("uname") + .arg("-r") + .output() + .ok()?; + let release = String::from_utf8_lossy(&output.stdout); + release.trim().split('.').next()?.parse().ok() +} + +fn get_linux_major_version() -> Option { + let content = std::fs::read_to_string("/etc/os-release").ok()?; + for line in content.lines() { + if let Some(val) = line.strip_prefix("VERSION_ID=") { + let val = val.trim_matches('"'); + // Handle "22.04" → 22, or "22" → 22 + return val.split('.').next()?.parse().ok(); + } + } + None +} + +/// Download the Matchstick binary to `node_modules/.bin/matchstick-{platform}`. Skips if already exists. +async fn download_matchstick_binary(version: &str, platform: &str, force: bool) -> Result { + let bin_dir = PathBuf::from("node_modules/.bin"); + let bin_path = bin_dir.join(format!("matchstick-{platform}")); + + if bin_path.exists() && !force { + step( + Step::Done, + &format!("Binary already exists: {}", bin_path.display()), + ); + return Ok(bin_path); + } + + std::fs::create_dir_all(&bin_dir)?; + + let url = format!("{MATCHSTICK_DOWNLOAD_BASE}/{version}/{platform}"); + step(Step::Load, &format!("Downloading Matchstick {version}")); + + let client = reqwest::Client::builder().user_agent("gnd-cli").build()?; + + let resp = client.get(&url).send().await?.error_for_status()?; + + let bytes = resp + .bytes() + .await + .context("Failed to read download response")?; + + std::fs::write(&bin_path, &bytes)?; + + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + std::fs::set_permissions(&bin_path, std::fs::Permissions::from_mode(0o755))?; + } + + step(Step::Done, &format!("Downloaded to {}", bin_path.display())); + Ok(bin_path) +} + +async fn run_binary_tests(opt: &TestOpt) -> Result<()> { + step(Step::Generate, "Running Matchstick tests (legacy mode)"); + + let version = resolve_matchstick_version( + opt.matchstick_version.as_deref(), + Path::new(super::DEFAULT_TEST_DIR), + ) + .await?; + + let platform = get_platform(&version)?; + let bin_path = download_matchstick_binary(&version, &platform, opt.force).await?; + + let workdir = opt + .manifest + .parent() + .filter(|p| !p.as_os_str().is_empty()) + .unwrap_or(Path::new(".")); + let mut cmd = std::process::Command::new(&bin_path); + cmd.current_dir(workdir); + + if opt.coverage { + cmd.arg("-c"); + } + if opt.recompile { + cmd.arg("-r"); + } + if let Some(datasource) = &opt.datasource { + cmd.arg(datasource); + } + + let status = cmd.status()?; + + if status.success() { + step(Step::Done, "Matchstick tests passed"); + Ok(()) + } else { + Err(anyhow!("Matchstick tests failed")) + } +} + +/// Run Matchstick tests in Docker (recommended on macOS where the native binary is bugged). +async fn run_docker_tests(opt: &TestOpt) -> Result<()> { + step(Step::Generate, "Running Matchstick tests in Docker"); + + std::process::Command::new("docker") + .arg("--version") + .output() + .context("Docker not found. Please install Docker to use -d/--docker mode.")?; + + let mut test_args = String::new(); + if opt.coverage { + test_args.push_str(" -c"); + } + if opt.recompile { + test_args.push_str(" -r"); + } + if let Some(datasource) = &opt.datasource { + // Validate datasource name to prevent shell injection via Docker's + // `sh -c "matchstick $ARGS"` expansion. + if !datasource + .chars() + .all(|c| c.is_alphanumeric() || c == '_' || c == '-') + { + anyhow::bail!( + "Invalid datasource name '{}': must contain only alphanumeric characters, hyphens, or underscores", + datasource + ); + } + test_args.push_str(&format!(" {}", datasource)); + } + + let cwd = std::env::current_dir().context("Failed to get current directory")?; + + let mut cmd = std::process::Command::new("docker"); + cmd.args([ + "run", + "-it", + "--rm", + "--mount", + &format!("type=bind,source={},target=/matchstick", cwd.display()), + ]); + if !test_args.is_empty() { + cmd.args(["-e", &format!("ARGS={}", test_args.trim())]); + } + cmd.arg("matchstick"); + + // Check if the Docker image already exists. + let image_check = std::process::Command::new("docker") + .args(["images", "-q", "matchstick"]) + .output() + .context("Failed to check for Docker image")?; + let image_exists = !image_check.stdout.is_empty(); + + if !image_exists || opt.force { + let version = resolve_matchstick_version( + opt.matchstick_version.as_deref(), + Path::new(super::DEFAULT_TEST_DIR), + ) + .await?; + + step(Step::Generate, "Building Matchstick Docker image"); + let dockerfile_path = PathBuf::from("tests/.docker/Dockerfile"); + if !dockerfile_path.exists() || opt.force { + create_dockerfile(&dockerfile_path, &version)?; + } + let build_status = std::process::Command::new("docker") + .args([ + "build", + "-f", + &dockerfile_path.to_string_lossy(), + "-t", + "matchstick", + ".", + ]) + .status() + .context("Failed to build Docker image")?; + if !build_status.success() { + return Err(anyhow!("Failed to build Matchstick Docker image")); + } + } + + let status = cmd.status().context("Failed to run Docker container")?; + if status.success() { + step(Step::Done, "Tests passed"); + Ok(()) + } else { + Err(anyhow!("Tests failed")) + } +} + +/// Generate a Dockerfile that downloads the Matchstick runner binary (not from npm). +fn create_dockerfile(path: &PathBuf, version: &str) -> Result<()> { + use std::fs; + + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + + let dockerfile_content = format!( + r#"FROM --platform=linux/x86_64 ubuntu:22.04 + +ARG DEBIAN_FRONTEND=noninteractive +ENV ARGS="" + +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + curl ca-certificates postgresql postgresql-contrib \ + && rm -rf /var/lib/apt/lists/* + +RUN curl -fsSL https://deb.nodesource.com/setup_18.x | bash - \ + && apt-get install -y --no-install-recommends nodejs \ + && rm -rf /var/lib/apt/lists/* + +RUN curl -fsSL -o /usr/local/bin/matchstick \ + https://github.com/LimeChain/matchstick/releases/download/{version}/binary-linux-22 \ + && chmod +x /usr/local/bin/matchstick + +RUN mkdir /matchstick +WORKDIR /matchstick + +CMD ["sh", "-c", "matchstick $ARGS"] +"#, + version = version + ); + + fs::write(path, dockerfile_content)?; + step(Step::Write, &format!("Created {}", path.display())); + Ok(()) +} diff --git a/gnd/src/commands/test/mock_chain.rs b/gnd/src/commands/test/mock_chain.rs new file mode 100644 index 00000000000..8978284eb63 --- /dev/null +++ b/gnd/src/commands/test/mock_chain.rs @@ -0,0 +1,19 @@ +//! Block pointer utilities for mock chains. + +use graph::blockchain::block_stream::BlockWithTriggers; +use graph::prelude::alloy::primitives::B256; +use graph::prelude::BlockPtr; +use graph_chain_ethereum::Chain; + +/// Last block pointer — used as the indexer's stop target. +pub fn final_block_ptr(blocks: &[BlockWithTriggers]) -> Option { + blocks.last().map(|b| b.ptr()) +} + +/// Genesis block (block 0, zero hash) — used as stop target when there are no blocks. +pub fn genesis_ptr() -> BlockPtr { + BlockPtr { + hash: B256::ZERO.into(), + number: 0, + } +} diff --git a/gnd/src/commands/test/mod.rs b/gnd/src/commands/test/mod.rs new file mode 100644 index 00000000000..6983ad1865f --- /dev/null +++ b/gnd/src/commands/test/mod.rs @@ -0,0 +1,210 @@ +//! Mock-based subgraph test runner for `gnd test`. +//! +//! This module replaces the old Matchstick-only test command with a mock-based +//! integration testing solution. Tests are defined as JSON files containing mock +//! blockchain data (blocks, log events, block triggers) and GraphQL assertions +//! that validate the resulting entity state. +//! +//! ## How it works +//! +//! 1. Build the subgraph (unless `--skip-build`) +//! 2. Discover `*.json` / `*.test.json` files in the test directory +//! 3. For each test file: +//! a. Parse JSON into mock blocks with triggers +//! b. Spin up a temporary PostgreSQL database (pgtemp on Unix) +//! c. Initialize graph-node stores and deploy the subgraph +//! d. Feed mock blocks through a static block stream (no real RPC) +//! e. Wait for the indexer to process all blocks +//! f. Run GraphQL assertions against the indexed data +//! 4. Report pass/fail results +//! +//! The key insight is that we reuse real graph-node infrastructure (stores, +//! subgraph deployment, WASM runtime) and only mock the blockchain layer. +//! This means tests exercise the same code paths as production indexing. +//! +//! ## Legacy mode +//! +//! The `--matchstick` flag falls back to the external Matchstick test runner +//! for backward compatibility with existing test suites. + +mod assertion; +mod block_stream; +mod eth_calls; +mod matchstick; +mod mock_chain; +mod noop; +mod output; +mod runner; +mod schema; +mod trigger; + +use anyhow::{anyhow, Result}; +use clap::Parser; +use console::style; +use std::path::PathBuf; + +use crate::output::{step, Step}; + +const DEFAULT_TEST_DIR: &str = "tests"; + +#[derive(Clone, Debug, Parser)] +#[clap(about = "Run subgraph tests")] +pub struct TestOpt { + /// Test files or directories to run. Directories are scanned for *.json / *.test.json. + /// Defaults to the "tests/" directory when nothing is specified. + pub tests: Vec, + + /// Path to subgraph manifest + #[clap(short = 'm', long, default_value = "subgraph.yaml")] + pub manifest: PathBuf, + + /// Skip building the subgraph before testing + #[clap(long)] + pub skip_build: bool, + + /// PostgreSQL connection URL. If not provided, a temporary database will be created (Unix only). + #[clap(long, env = "POSTGRES_URL")] + pub postgres_url: Option, + + /// Use Matchstick runner instead (legacy mode) + #[clap(long)] + pub matchstick: bool, + + /// Run Matchstick tests in Docker (recommended on macOS where the native binary is bugged) + #[clap(short = 'd', long, requires = "matchstick")] + pub docker: bool, + + /// Run tests with coverage reporting (Matchstick only) + #[clap(short = 'c', long, requires = "matchstick")] + pub coverage: bool, + + /// Force recompilation of tests (Matchstick only) + #[clap(short = 'r', long, requires = "matchstick")] + pub recompile: bool, + + /// Force redownload of Matchstick binary / rebuild Docker image + #[clap(short = 'f', long, requires = "matchstick")] + pub force: bool, + + /// Matchstick version to use (default: latest from GitHub) + #[clap(long, requires = "matchstick")] + pub matchstick_version: Option, + + /// Specific data source to test (Matchstick only) + #[clap(long, requires = "matchstick")] + pub datasource: Option, + + /// Increase graph-node log verbosity (-v info, -vv debug, -vvv trace). + /// Overridden by GRAPH_LOG env var when set. + #[clap(short = 'v', long, action = clap::ArgAction::Count)] + pub verbose: u8, +} + +pub async fn run_test(opt: TestOpt) -> Result<()> { + if opt.matchstick { + return matchstick::run(&opt).await; + } + + // Build the subgraph first so the WASM and schema are available in build/. + // This mirrors what a user would do manually before running tests. + if !opt.skip_build { + step(Step::Generate, "Building subgraph"); + let build_opt = crate::commands::BuildOpt { + manifest: opt.manifest.clone(), + output_dir: std::path::PathBuf::from("build"), + output_format: "wasm".to_string(), + skip_migrations: false, + watch: false, + ipfs: None, + network: None, + network_file: std::path::PathBuf::from("networks.json"), + skip_asc_version_check: false, + }; + crate::commands::run_build(build_opt).await?; + step(Step::Done, "Build complete"); + } + + // Resolve test files from positional args. Default to "tests/" when none given. + let tests = if opt.tests.is_empty() { + vec![PathBuf::from(DEFAULT_TEST_DIR)] + } else { + opt.tests.clone() + }; + + step(Step::Load, "Loading manifest"); + let manifest_info = runner::load_manifest_info(&opt)?; + + step(Step::Load, "Discovering test files"); + let test_files = resolve_test_paths(&tests)?; + + if test_files.is_empty() { + step(Step::Warn, "No test files found"); + for test in &tests { + println!( + " Looking in: {}", + test.canonicalize().unwrap_or(test.clone()).display() + ); + } + println!(" Expected: *.test.json or *.json files"); + return Ok(()); + } + + let mut passed = 0; + let mut failed = 0; + let mut all_failures = Vec::new(); + + for path in test_files { + output::print_test_start(&path); + + let test_file = match schema::parse_test_file(&path) { + Ok(tf) => tf, + Err(e) => { + println!(" {} Failed to parse: {}", style("✘").red(), e); + failed += 1; + continue; + } + }; + + match runner::run_single_test(&opt, &manifest_info, &test_file).await { + Ok(result) => { + output::print_test_result(&test_file.name, &result); + if result.is_passed() { + passed += 1; + } else { + all_failures.extend(output::collect_failures(&test_file.name, &result)); + failed += 1; + } + } + Err(e) => { + println!(" {} {} - Error: {}", style("✘").red(), test_file.name, e); + failed += 1; + } + } + } + + output::print_failure_details(&all_failures); + output::print_summary(passed, failed); + + if failed > 0 { + Err(anyhow!("{} test(s) failed", failed)) + } else { + Ok(()) + } +} + +fn resolve_test_paths(paths: &[PathBuf]) -> Result> { + let mut files = Vec::new(); + + for path in paths { + if path.is_dir() { + files.extend(schema::discover_test_files(path)?); + } else if path.exists() { + files.push(path.clone()); + } else { + anyhow::bail!("Test file not found: {}", path.display()); + } + } + + files.sort(); + Ok(files) +} diff --git a/gnd/src/commands/test/noop.rs b/gnd/src/commands/test/noop.rs new file mode 100644 index 00000000000..b686177b795 --- /dev/null +++ b/gnd/src/commands/test/noop.rs @@ -0,0 +1,121 @@ +//! Noop/stub trait implementations for the mock `Chain`. +//! +//! These types satisfy the trait bounds required by the `Chain` constructor +//! but are never called during normal test execution because: +//! - Triggers are provided directly via `StaticStreamBuilder` (no scanning needed) +//! - The real `EthereumRuntimeAdapterBuilder` is used for host functions +//! (ethereum.call, ethereum.getBalance, ethereum.hasCode), backed by the call cache + +use async_trait::async_trait; +use graph::blockchain::block_stream::{BlockRefetcher, BlockWithTriggers, FirehoseCursor}; +use graph::blockchain::{BlockPtr, Blockchain, TriggersAdapter, TriggersAdapterSelector}; +use graph::components::store::DeploymentLocator; +use graph::prelude::{BlockHash, BlockNumber, Error}; +use graph::slog::{Discard, Logger}; +use std::collections::BTreeSet; +use std::marker::PhantomData; +use std::sync::Arc; + +use graph::slog::o; + +/// No-op block refetcher. Tests have no reorgs, so refetching is never needed. +pub(super) struct StaticBlockRefetcher { + pub _phantom: PhantomData, +} + +#[async_trait] +impl BlockRefetcher for StaticBlockRefetcher { + fn required(&self, _chain: &C) -> bool { + false + } + + async fn get_block( + &self, + _chain: &C, + _logger: &Logger, + _cursor: FirehoseCursor, + ) -> Result { + Err(anyhow::anyhow!( + "StaticBlockRefetcher::get_block should never be called — block refetching is disabled in test mode" + )) + } +} + +pub(super) struct NoopAdapterSelector { + pub _phantom: PhantomData, +} + +impl TriggersAdapterSelector for NoopAdapterSelector { + fn triggers_adapter( + &self, + _loc: &DeploymentLocator, + _capabilities: &::NodeCapabilities, + _unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, + ) -> Result>, Error> { + Ok(Arc::new(NoopTriggersAdapter { + _phantom: PhantomData, + })) + } +} + +/// Returns empty/default results. Never called since triggers come from `StaticStreamBuilder`. +struct NoopTriggersAdapter { + _phantom: PhantomData, +} + +#[async_trait] +impl TriggersAdapter for NoopTriggersAdapter { + async fn ancestor_block( + &self, + _ptr: BlockPtr, + _offset: BlockNumber, + _root: Option, + ) -> Result::Block>, Error> { + Ok(None) + } + + async fn load_block_ptrs_by_numbers( + &self, + _logger: Logger, + _block_numbers: BTreeSet, + ) -> Result, Error> { + Ok(vec![]) + } + + async fn chain_head_ptr(&self) -> Result, Error> { + Ok(None) + } + + async fn scan_triggers( + &self, + _from: BlockNumber, + _to: BlockNumber, + _filter: &C::TriggerFilter, + ) -> Result<(Vec>, BlockNumber), Error> { + Ok((vec![], 0)) + } + + async fn triggers_in_block( + &self, + _logger: &Logger, + block: ::Block, + _filter: &::TriggerFilter, + ) -> Result, Error> { + let logger = Logger::root(Discard, o!()); + Ok(BlockWithTriggers::new(block, Vec::new(), &logger)) + } + + async fn is_on_main_chain(&self, _ptr: BlockPtr) -> Result { + Ok(true) + } + + async fn parent_ptr(&self, block: &BlockPtr) -> Result, Error> { + match block.number { + 0 => Ok(None), + n => Ok(Some(BlockPtr { + hash: BlockHash::default(), + number: n - 1, + })), + } + } +} diff --git a/gnd/src/commands/test/output.rs b/gnd/src/commands/test/output.rs new file mode 100644 index 00000000000..e02b90f9179 --- /dev/null +++ b/gnd/src/commands/test/output.rs @@ -0,0 +1,113 @@ +//! Console output formatting for test results. + +use console::style; +use similar::{ChangeTag, TextDiff}; + +use super::assertion::align_for_diff; +use super::schema::{AssertionFailure, AssertionOutcome, TestResult}; +use crate::output::{step, Step}; + +pub fn print_test_start(path: &std::path::Path) { + step(Step::Load, &format!("Running {}", path.display())); +} + +/// Print pass/fail for a test case. Diffs are deferred to `print_failure_details`. +pub fn print_test_result(name: &str, result: &TestResult) { + if result.is_passed() { + println!(" {} {}", style("✔").green(), name); + } else { + println!(" {} {}", style("✘").red(), name); + } + + if let Some(err) = &result.handler_error { + println!(" {} {}", style("Handler error:").red(), err); + } + + for outcome in &result.assertions { + match outcome { + AssertionOutcome::Passed { query } => { + println!(" {} {}", style("✔").green(), style(query).dim()); + } + AssertionOutcome::Failed(failure) => { + println!(" {} {}", style("✘").red(), failure.query); + } + } + } +} + +pub struct FailureDetail { + pub test_name: String, + pub failure: AssertionFailure, +} + +pub fn collect_failures(test_name: &str, result: &TestResult) -> Vec { + result + .assertions + .iter() + .filter_map(|outcome| match outcome { + AssertionOutcome::Passed { .. } => None, + AssertionOutcome::Failed(failure) => Some(FailureDetail { + test_name: test_name.to_string(), + failure: AssertionFailure { + query: failure.query.clone(), + expected: failure.expected.clone(), + actual: failure.actual.clone(), + }, + }), + }) + .collect() +} + +pub fn print_failure_details(details: &[FailureDetail]) { + if details.is_empty() { + return; + } + + println!("\n{}", style("Failures:").red().bold()); + + for detail in details { + println!( + "\n {} {} {}", + style("●").red(), + style(&detail.test_name).bold(), + style("→").dim(), + ); + println!(" {} {}", style("Query:").yellow(), detail.failure.query); + + let expected = serde_json::to_string_pretty(&detail.failure.expected).unwrap_or_default(); + let aligned_actual = align_for_diff(&detail.failure.expected, &detail.failure.actual); + let actual = serde_json::to_string_pretty(&aligned_actual).unwrap_or_default(); + + println!( + " {} {} expected {} actual", + style("Diff:").yellow(), + style("(-)").red(), + style("(+)").green(), + ); + + let diff = TextDiff::from_lines(&expected, &actual); + for change in diff.iter_all_changes() { + let text = change.value().trim_end_matches('\n'); + match change.tag() { + ChangeTag::Delete => println!(" {}", style(format!("- {text}")).red()), + ChangeTag::Insert => println!(" {}", style(format!("+ {text}")).green()), + ChangeTag::Equal => println!(" {text}"), + } + } + } +} + +pub fn print_summary(passed: usize, failed: usize) { + println!(); + if failed == 0 { + println!( + "{}", + style(format!("Tests: {} passed, {} failed", passed, failed)).green() + ); + } else { + println!( + "{}", + style(format!("Tests: {} passed, {} failed", passed, failed)).red() + ); + } +} diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs new file mode 100644 index 00000000000..de95946c986 --- /dev/null +++ b/gnd/src/commands/test/runner.rs @@ -0,0 +1,789 @@ +//! Test runner: orchestrates subgraph indexing with mock blockchain data. +//! +//! This is the core of `gnd test`. For each test file, it: +//! +//! 1. Creates a test database (`TestDatabase::Temporary` via pgtemp, or +//! `TestDatabase::Persistent` via `--postgres-url`) for test isolation +//! 2. Initializes graph-node stores (entity storage, block storage, chain store) +//! 3. Constructs a mock Ethereum chain that feeds pre-defined blocks +//! 4. Deploys the subgraph and starts the indexer +//! 5. Waits for all blocks to be processed (or a fatal error) +//! 6. Runs GraphQL assertions against the indexed entity state + +use super::assertion::run_assertions; +use super::block_stream::StaticStreamBuilder; +use super::mock_chain; +use super::noop::{NoopAdapterSelector, StaticBlockRefetcher}; +use super::schema::{TestFile, TestResult}; +use super::trigger::build_blocks_with_triggers; +use super::TestOpt; +use crate::manifest::{load_manifest, Manifest}; +use anyhow::{anyhow, ensure, Context, Result}; +use graph::amp::FlightClient; +use graph::blockchain::block_stream::BlockWithTriggers; +use graph::blockchain::{BlockPtr, BlockchainMap, ChainIdentifier}; +use graph::cheap_clone::CheapClone; +use graph::components::link_resolver::{ArweaveClient, FileLinkResolver}; +use graph::components::metrics::MetricsRegistry; +use graph::components::network_provider::{ + AmpChainNames, ChainName, ProviderCheckStrategy, ProviderManager, +}; +use graph::components::store::DeploymentLocator; +use graph::components::subgraph::{Settings, SubgraphInstanceManager as _}; +use graph::data::graphql::load_manager::LoadManager; +use graph::data::subgraph::schema::SubgraphError; +use graph::endpoint::EndpointMetrics; +use graph::env::EnvVars; +use graph::firehose::{FirehoseEndpoint, FirehoseEndpoints, SubgraphLimit}; +use graph::ipfs::{IpfsMetrics, IpfsRpcClient, ServerAddress}; +use graph::prelude::{ + DeploymentHash, LoggerFactory, NodeId, SubgraphCountMetric, SubgraphName, SubgraphRegistrar, + SubgraphStore as SubgraphStoreTrait, SubgraphVersionSwitchingMode, +}; +use graph::slog::{info, o, Logger}; +use graph_chain_ethereum::chain::EthereumRuntimeAdapterBuilder; +use graph_chain_ethereum::network::{EthereumNetworkAdapter, EthereumNetworkAdapters}; +use graph_chain_ethereum::{ + Chain, EthereumAdapter, NodeCapabilities, ProviderEthRpcMetrics, Transport, +}; +use graph_core::polling_monitor::{arweave_service, ipfs_service}; +use graph_graphql::prelude::GraphQlRunner; +use graph_node::config::Config; +use graph_node::manager::PanicSubscriptionManager; +use graph_node::store_builder::StoreBuilder; +use graph_store_postgres::{ChainHeadUpdateListener, ChainStore, Store, SubgraphStore}; +use std::marker::PhantomData; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; + +#[cfg(unix)] +use pgtemp::PgTempDBBuilder; + +const NODE_ID: &str = "gnd-test"; + +/// Build a logger from the `-v` flag. `GRAPH_LOG` env var always takes precedence. +/// `verbose`: 0=off, 1=Info, 2=Debug, 3+=Trace. +fn make_test_logger(verbose: u8) -> Logger { + if std::env::var("GRAPH_LOG").is_ok() { + return graph::log::logger(true); + } + + match verbose { + 0 => graph::log::discard(), + 1 => graph::log::logger_with_levels(false, None), + 2 => graph::log::logger_with_levels(true, None), + // "trace" is parsed by slog_envlogger::LogBuilder::parse() as a global + // level filter — equivalent to setting GRAPH_LOG=trace. + _ => graph::log::logger_with_levels(true, Some("trace")), + } +} + +struct TestStores { + network_name: ChainName, + /// Listens for chain head updates — needed by the Chain constructor. + chain_head_listener: Arc, + network_store: Arc, + chain_store: Arc, +} + +/// Components needed to run a test after infrastructure setup. +pub(super) struct TestContext { + pub(super) provider: Arc, + pub(super) store: Arc, + pub(super) deployment: DeploymentLocator, + pub(super) graphql_runner: Arc>, +} + +/// Pre-computed manifest data shared across all tests in a run. +/// +/// Loaded once to avoid redundant parsing. +pub(super) struct ManifestInfo { + pub build_dir: PathBuf, + /// Canonical path to the built manifest file (e.g., `build/subgraph.yaml`). + /// Registered as an alias for `hash` in `FileLinkResolver` so that + /// `clone_for_manifest` can resolve the Qm hash to a real filesystem path. + pub manifest_path: PathBuf, + pub network_name: ChainName, + pub min_start_block: u64, + /// Override for on-chain block validation when startBlock > 0. + pub start_block_override: Option, + pub hash: DeploymentHash, + pub subgraph_name: SubgraphName, +} + +/// Compute a `DeploymentHash` from a path and seed. +/// +/// Produces `"Qm" + hex(sha1(path + '\0' + seed))`. The seed makes each run +/// produce a distinct hash so sequential runs never collide in the store. +fn deployment_hash_from_path_and_seed(path: &Path, seed: u128) -> Result { + use sha1::{Digest, Sha1}; + + let input = format!("{}\0{}", path.display(), seed); + let digest = Sha1::digest(input.as_bytes()); + let qm = format!("Qm{:x}", digest); + DeploymentHash::new(qm).map_err(|e| anyhow!("Failed to create deployment hash: {}", e)) +} + +pub(super) fn load_manifest_info(opt: &TestOpt) -> Result { + let manifest_dir = opt + .manifest + .parent() + .map(|p| p.to_path_buf()) + .unwrap_or_else(|| PathBuf::from(".")); + + let build_dir = manifest_dir.join("build"); + + let manifest_filename = opt + .manifest + .file_name() + .and_then(|s| s.to_str()) + .unwrap_or("subgraph.yaml"); + let built_manifest_path = build_dir.join(manifest_filename); + let built_manifest_path = built_manifest_path + .canonicalize() + .context("Failed to resolve built manifest path — did you run 'gnd build'?")?; + + let manifest = load_manifest(&built_manifest_path)?; + + let network_name: ChainName = extract_network_from_manifest(&manifest)?.into(); + let min_start_block = extract_start_block_from_manifest(&manifest)?; + + let start_block_override = if min_start_block > 0 { + use graph::prelude::alloy::primitives::keccak256; + let hash = keccak256((min_start_block - 1).to_be_bytes()); + ensure!( + min_start_block - 1 <= i32::MAX as u64, + "block number {} exceeds i32::MAX", + min_start_block - 1 + ); + Some(BlockPtr::new(hash.into(), (min_start_block - 1) as i32)) + } else { + None + }; + + // Use Unix epoch millis as a per-run seed so each invocation gets a unique + // deployment hash and subgraph name, avoiding conflicts with previous runs. + let seed = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_millis(); + + let hash = deployment_hash_from_path_and_seed(&built_manifest_path, seed)?; + + // Derive subgraph name from the root directory (e.g., "my-subgraph" → "test/my-subgraph-"). + // Sanitize to alphanumeric + hyphens + underscores for SubgraphName compatibility. + let root_dir_name = manifest_dir + .canonicalize() + .unwrap_or(manifest_dir.clone()) + .file_name() + .and_then(|s| s.to_str()) + .unwrap_or("gnd-test") + .chars() + .filter(|c| c.is_alphanumeric() || *c == '-' || *c == '_') + .collect::(); + let subgraph_name = SubgraphName::new(format!("test/{}-{}", root_dir_name, seed)) + .map_err(|e| anyhow!("{}", e))?; + + Ok(ManifestInfo { + build_dir, + manifest_path: built_manifest_path, + network_name, + min_start_block, + start_block_override, + hash, + subgraph_name, + }) +} + +fn extract_network_from_manifest(manifest: &Manifest) -> Result { + let network = manifest + .data_sources + .first() + .and_then(|ds| ds.network.clone()) + .unwrap_or_else(|| "mainnet".to_string()); + + Ok(network) +} + +/// Extract the minimum `startBlock` across all data sources. +/// +/// Used to build `start_block_override` for bypassing on-chain validation. +fn extract_start_block_from_manifest(manifest: &Manifest) -> Result { + Ok(manifest + .data_sources + .iter() + .map(|ds| ds.start_block) + .min() + .unwrap_or(0)) +} + +pub async fn run_single_test( + opt: &TestOpt, + manifest_info: &ManifestInfo, + test_file: &TestFile, +) -> Result { + // Warn (and short-circuit) when there are no assertions. + if test_file.assertions.is_empty() { + if test_file.blocks.is_empty() { + eprintln!( + " {} Test '{}' has no blocks and no assertions", + console::style("⚠").yellow(), + test_file.name + ); + return Ok(TestResult { + handler_error: None, + assertions: vec![], + }); + } else { + eprintln!( + " {} Test '{}' has blocks but no assertions", + console::style("⚠").yellow(), + test_file.name + ); + } + } + + // Default block numbering starts at the manifest's startBlock so that + // test blocks without explicit numbers fall in the subgraph's indexed range. + let blocks = build_blocks_with_triggers(test_file, manifest_info.min_start_block)?; + + // Create the database for this test. For pgtemp, the `db` value must + // stay alive for the duration of the test — dropping it destroys the database. + let db = create_test_database(opt, &manifest_info.build_dir)?; + + let logger = make_test_logger(opt.verbose).new(o!("test" => test_file.name.clone())); + + let stores = setup_stores(&logger, &db, &manifest_info.network_name).await?; + + let chain = setup_chain(&logger, blocks.clone(), &stores).await?; + + let ctx = setup_context(&logger, &stores, &chain, manifest_info).await?; + + // Populate eth_call cache with mock responses before starting indexer. + // This ensures handlers can successfully retrieve mocked contract call results. + super::eth_calls::populate_eth_call_cache( + &logger, + stores.chain_store.cheap_clone(), + &blocks, + test_file, + ) + .await?; + + // Determine the target block — the indexer will process until it reaches this. + let stop_block = if blocks.is_empty() { + mock_chain::genesis_ptr() + } else { + mock_chain::final_block_ptr(&blocks).ok_or_else(|| anyhow!("No blocks to process"))? + }; + + info!(logger, "Starting subgraph indexing"; "stop_block" => stop_block.number); + + ctx.provider + .clone() + .start_subgraph(ctx.deployment.clone(), Some(stop_block.number)) + .await; + + let result = match wait_for_sync( + &logger, + ctx.store.clone(), + &ctx.deployment, + stop_block.clone(), + ) + .await + { + Ok(()) => run_assertions(&ctx, &test_file.assertions).await, + Err(subgraph_error) => { + // The subgraph handler threw a fatal error during indexing. + // Report it as a test failure without running assertions. + Ok(TestResult { + handler_error: Some(subgraph_error.message), + assertions: vec![], + }) + } + }; + + // Always stop the subgraph to ensure cleanup, even when wait_for_sync errors + ctx.provider + .clone() + .stop_subgraph(ctx.deployment.clone()) + .await; + + // For persistent databases, clean up the deployment after the test so the + // database is left in a clean state. Each run generates a unique hash and + // subgraph name (via the seed), so pre-test cleanup is not needed — only + // post-test cleanup of the current run's deployment. + if db.needs_cleanup() { + cleanup( + &ctx.store, + &manifest_info.subgraph_name, + &manifest_info.hash, + ) + .await + .ok(); + } + + result +} + +/// Create the database for this test run. +/// +/// Returns `Temporary` (pgtemp, auto-dropped) or `Persistent` (--postgres-url). +/// On non-Unix systems, `--postgres-url` is required. +fn create_test_database(opt: &TestOpt, build_dir: &Path) -> Result { + if let Some(url) = &opt.postgres_url { + return Ok(TestDatabase::Persistent { url: url.clone() }); + } + + #[cfg(unix)] + { + if !build_dir.exists() { + anyhow::bail!( + "Build directory does not exist: {}. Run 'gnd build' first.", + build_dir.display() + ); + } + + // pgtemp sets `unix_socket_directories` to the data dir by default. + // On macOS the temp dir path can exceed the 104-byte Unix socket limit + // (e.g. /private/var/folders/.../build/pgtemp-xxx/pg_data_dir/.s.PGSQL.PORT), + // causing postgres to silently fail to start. Override to /tmp so the + // socket path stays short. Different port numbers prevent conflicts. + let db = PgTempDBBuilder::new() + .with_data_dir_prefix(build_dir) + .persist_data(false) + .with_initdb_arg("-E", "UTF8") + .with_initdb_arg("--locale", "C") + .with_config_param("unix_socket_directories", "/tmp") + .start(); + + let url = db.connection_uri().to_string(); + Ok(TestDatabase::Temporary { url, _handle: db }) + } + + #[cfg(not(unix))] + { + let _ = build_dir; + Err(anyhow!( + "On non-Unix systems, please provide --postgres-url" + )) + } +} + +/// Database used for a single test run. +enum TestDatabase { + #[cfg(unix)] + Temporary { + url: String, + _handle: pgtemp::PgTempDB, + }, + Persistent { + url: String, + }, +} + +impl TestDatabase { + fn url(&self) -> &str { + match self { + #[cfg(unix)] + Self::Temporary { url, .. } => url, + Self::Persistent { url } => url, + } + } + + /// Persistent databases accumulate state across test runs and need + /// explicit post-test cleanup to remove each run's deployment. + /// Temporary databases are dropped automatically — no cleanup needed. + fn needs_cleanup(&self) -> bool { + match self { + #[cfg(unix)] + Self::Temporary { .. } => false, + Self::Persistent { .. } => true, + } + } +} + +async fn setup_stores( + logger: &Logger, + db: &TestDatabase, + network_name: &ChainName, +) -> Result { + let config_str = format!( + r#" +[store] +[store.primary] +connection = "{}" +pool_size = 2 + +[deployment] +[[deployment.rule]] +store = "primary" +indexers = [ "default" ] + +[chains] +ingestor = "default" +"#, + db.url() + ); + + let config = Config::from_str(&config_str, "default") + .map_err(|e| anyhow!("Failed to parse config: {}", e))?; + + let mock_registry = Arc::new(MetricsRegistry::mock()); + let node_id = NodeId::new(NODE_ID).unwrap(); + + // StoreBuilder runs migrations and creates connection pools. + let store_builder = + StoreBuilder::new(logger, &node_id, &config, None, mock_registry.clone()).await; + + let chain_head_listener = store_builder.chain_head_update_listener(); + let network_identifiers: Vec = vec![network_name.clone()]; + let network_store = store_builder.network_store(network_identifiers).await; + + let block_store = network_store.block_store(); + + // Synthetic chain identifier — net_version "1" with zero genesis hash. + let ident = ChainIdentifier { + net_version: "1".into(), + genesis_block_hash: graph::prelude::alloy::primitives::B256::ZERO.into(), + }; + + let chain_store = block_store + .create_chain_store(network_name, ident) + .await + .context("Failed to create chain store")?; + + Ok(TestStores { + network_name: network_name.clone(), + chain_head_listener, + network_store, + chain_store, + }) +} + +/// Construct a mock Ethereum `Chain` with pre-built blocks. +/// +/// Uses `StaticStreamBuilder` for blocks, noops for unused adapters, +/// and a dummy firehose endpoint (never connected to). +async fn setup_chain( + logger: &Logger, + blocks: Vec>, + stores: &TestStores, +) -> Result> { + let mock_registry = Arc::new(MetricsRegistry::mock()); + let logger_factory = LoggerFactory::new(logger.clone(), None, mock_registry.clone()); + + // Dummy firehose endpoint — required by Chain constructor but never used. + let firehose_endpoints = FirehoseEndpoints::for_testing(vec![Arc::new(FirehoseEndpoint::new( + "", + "http://0.0.0.0:0", + None, + None, + true, + false, + SubgraphLimit::Unlimited, + Arc::new(EndpointMetrics::mock()), + ))]); + + let client = + Arc::new(graph::blockchain::client::ChainClient::::new_firehose(firehose_endpoints)); + + let block_stream_builder: Arc> = + Arc::new(StaticStreamBuilder { chain: blocks }); + + // Create a dummy Ethereum adapter with archive capabilities. + // The adapter itself is never used for RPC — ethereum.call results come from + // the pre-populated call cache. But the RuntimeAdapter needs to resolve an + // adapter with matching capabilities before it can invoke the cache lookup. + let endpoint_metrics = Arc::new(EndpointMetrics::mock()); + let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); + let transport = Transport::new_rpc( + graph::url::Url::parse("http://0.0.0.0:0").unwrap(), + graph::http::HeaderMap::new(), + endpoint_metrics.clone(), + "", + false, // no_eip2718 + graph_chain_ethereum::Compression::None, + ); + let dummy_adapter = Arc::new( + EthereumAdapter::new( + logger.clone(), + String::new(), + transport, + provider_metrics, + true, + false, + ) + .await, + ); + let adapter = EthereumNetworkAdapter::new( + endpoint_metrics, + NodeCapabilities { + archive: true, + traces: false, + }, + dummy_adapter, + SubgraphLimit::Unlimited, + ); + let provider_manager = ProviderManager::new( + logger.clone(), + vec![(stores.network_name.clone(), vec![adapter])], + ProviderCheckStrategy::MarkAsValid, + ); + let eth_adapters = Arc::new(EthereumNetworkAdapters::new( + stores.network_name.clone(), + provider_manager, + vec![], + None, + )); + + let chain = Chain::new( + logger_factory, + stores.network_name.clone(), + mock_registry, + stores.chain_store.cheap_clone(), + stores.chain_store.cheap_clone(), + client, + stores.chain_head_listener.cheap_clone(), + block_stream_builder, + Arc::new(StaticBlockRefetcher { + _phantom: PhantomData, + }), + Arc::new(NoopAdapterSelector { + _phantom: PhantomData, + }), + Arc::new(EthereumRuntimeAdapterBuilder {}), + eth_adapters, + graph::prelude::ENV_VARS.reorg_threshold(), + graph::prelude::ENV_VARS.ingestor_polling_interval, + true, + ); + + Ok(Arc::new(chain)) +} + +/// Wire up all graph-node components and deploy the subgraph. +async fn setup_context( + logger: &Logger, + stores: &TestStores, + chain: &Arc, + manifest_info: &ManifestInfo, +) -> Result { + let build_dir = &manifest_info.build_dir; + let manifest_path = &manifest_info.manifest_path; + let hash = manifest_info.hash.clone(); + let subgraph_name = manifest_info.subgraph_name.clone(); + let start_block_override = manifest_info.start_block_override.clone(); + + let env_vars = Arc::new(EnvVars::from_env().unwrap_or_default()); + let mock_registry = Arc::new(MetricsRegistry::mock()); + let logger_factory = LoggerFactory::new(logger.clone(), None, mock_registry.clone()); + let node_id = NodeId::new(NODE_ID).unwrap(); + + let subgraph_store = stores.network_store.subgraph_store(); + + // Map the network name to our mock chain so graph-node routes triggers correctly. + let mut blockchain_map = BlockchainMap::new(); + blockchain_map.insert(stores.network_name.clone(), chain.clone()); + let blockchain_map = Arc::new(blockchain_map); + + // FileLinkResolver loads the manifest and WASM from the build directory + // instead of fetching from IPFS. The alias maps the Qm deployment hash to the + // actual manifest path so that clone_for_manifest can resolve it without + // treating the hash as a filesystem path. + let aliases = + std::collections::HashMap::from([(hash.to_string(), manifest_path.to_path_buf())]); + let link_resolver: Arc = Arc::new( + FileLinkResolver::new(Some(build_dir.to_path_buf()), aliases), + ); + + // IPFS client is required by the instance manager constructor but not used + // for manifest loading (FileLinkResolver handles that). + let ipfs_metrics = IpfsMetrics::new(&mock_registry); + let ipfs_client = Arc::new( + IpfsRpcClient::new_unchecked(ServerAddress::test_rpc_api(), ipfs_metrics, logger) + .context("Failed to create IPFS client")?, + ); + + let ipfs_service = ipfs_service( + ipfs_client, + env_vars.mappings.max_ipfs_file_bytes, + env_vars.mappings.ipfs_timeout, + env_vars.mappings.ipfs_request_limit, + ); + + let arweave_resolver = Arc::new(ArweaveClient::default()); + let arweave_service = arweave_service( + arweave_resolver.cheap_clone(), + env_vars.mappings.ipfs_request_limit, + graph::components::link_resolver::FileSizeLimit::MaxBytes( + env_vars.mappings.max_ipfs_file_bytes as u64, + ), + ); + + let sg_count = Arc::new(SubgraphCountMetric::new(mock_registry.cheap_clone())); + let static_filters = env_vars.experimental_static_filters; + + let subgraph_instance_manager = Arc::new(graph_core::subgraph::SubgraphInstanceManager::< + SubgraphStore, + FlightClient, + >::new( + &logger_factory, + env_vars.cheap_clone(), + subgraph_store.clone(), + blockchain_map.clone(), + sg_count.cheap_clone(), + mock_registry.clone(), + link_resolver.cheap_clone(), + ipfs_service, + arweave_service, + None, + static_filters, + )); + + let mut subgraph_instance_managers = + graph_core::subgraph_provider::SubgraphInstanceManagers::new(); + subgraph_instance_managers.add( + graph_core::subgraph_provider::SubgraphProcessingKind::Trigger, + subgraph_instance_manager.cheap_clone(), + ); + + let subgraph_provider = Arc::new(graph_core::subgraph_provider::SubgraphProvider::new( + &logger_factory, + sg_count.cheap_clone(), + subgraph_store.clone(), + link_resolver.cheap_clone(), + tokio_util::sync::CancellationToken::new(), + subgraph_instance_managers, + )); + + let load_manager = LoadManager::new(logger, Vec::new(), Vec::new(), mock_registry.clone()); + let graphql_runner = Arc::new(GraphQlRunner::new( + logger, + stores.network_store.clone(), + Arc::new(load_manager), + mock_registry.clone(), + )); + + // The registrar handles subgraph naming and version management. + // Uses PanicSubscriptionManager because tests don't need GraphQL subscriptions. + let panicking_subscription_manager = Arc::new(PanicSubscriptionManager {}); + let subgraph_registrar = Arc::new(graph_core::subgraph::SubgraphRegistrar::new( + &logger_factory, + link_resolver.cheap_clone(), + subgraph_provider.cheap_clone(), + subgraph_store.clone(), + panicking_subscription_manager, + Option::>::None, + blockchain_map.clone(), + node_id.clone(), + SubgraphVersionSwitchingMode::Instant, + Arc::new(Settings::default()), + Arc::new(AmpChainNames::default()), + )); + + SubgraphRegistrar::create_subgraph(subgraph_registrar.as_ref(), subgraph_name.clone()).await?; + + // Deploy the subgraph version (loads manifest, compiles WASM, creates schema tables). + // start_block_override bypasses on-chain block validation when startBlock > 0. + let deployment = SubgraphRegistrar::create_subgraph_version( + subgraph_registrar.as_ref(), + subgraph_name.clone(), + hash.clone(), + node_id.clone(), + None, + start_block_override, + None, + None, + false, + ) + .await?; + + Ok(TestContext { + provider: subgraph_provider, + store: subgraph_store, + deployment, + graphql_runner, + }) +} + +/// Remove a subgraph deployment after a test run. Errors are ignored. +async fn cleanup( + subgraph_store: &SubgraphStore, + name: &SubgraphName, + hash: &DeploymentHash, +) -> Result<()> { + let locators = SubgraphStoreTrait::locators(subgraph_store, hash).await?; + + // Ignore errors - the subgraph might not exist on first run + let _ = subgraph_store.remove_subgraph(name.clone()).await; + + for locator in &locators { + // Unassign the deployment from its node first — remove_deployment + // silently skips deletion if the deployment is still assigned. + let _ = SubgraphStoreTrait::unassign_subgraph(subgraph_store, locator).await; + subgraph_store.remove_deployment(locator.id.into()).await?; + } + + Ok(()) +} + +/// Poll until the subgraph reaches `stop_block` or fails. +/// +/// Returns `Ok(())` on success or `Err(SubgraphError)` on fatal error or timeout. +async fn wait_for_sync( + logger: &Logger, + store: Arc, + deployment: &DeploymentLocator, + stop_block: BlockPtr, +) -> Result<(), SubgraphError> { + // NOTE: Hardcoded timeout/interval - could be made configurable via env var + // or CLI flag for slow subgraphs or faster iteration during development. + const MAX_WAIT: Duration = Duration::from_secs(60); + const WAIT_TIME: Duration = Duration::from_millis(500); + + let start = Instant::now(); + + async fn flush(logger: &Logger, store: &Arc, deployment: &DeploymentLocator) { + if let Ok(writable) = store + .clone() + .writable(logger.clone(), deployment.id, Arc::new(vec![])) + .await + { + let _ = writable.flush().await; + } + } + + flush(logger, &store, deployment).await; + + while start.elapsed() < MAX_WAIT { + tokio::time::sleep(WAIT_TIME).await; + flush(logger, &store, deployment).await; + + let block_ptr = match store.least_block_ptr(&deployment.hash).await { + Ok(Some(ptr)) => ptr, + _ => continue, // Not started yet + }; + + info!(logger, "Sync progress"; "current" => block_ptr.number, "target" => stop_block.number); + + // Check if the subgraph hit a fatal error (e.g., handler panic, deterministic error). + let status = store.status_for_id(deployment.id).await; + if let Some(fatal_error) = status.fatal_error { + return Err(fatal_error); + } + + if block_ptr.number >= stop_block.number { + info!(logger, "Reached stop block"); + return Ok(()); + } + } + + Err(SubgraphError { + subgraph_id: deployment.hash.clone(), + message: format!("Sync timeout after {}s", MAX_WAIT.as_secs()), + block_ptr: None, + handler: None, + deterministic: false, + }) +} diff --git a/gnd/src/commands/test/schema.rs b/gnd/src/commands/test/schema.rs new file mode 100644 index 00000000000..fb254e340f9 --- /dev/null +++ b/gnd/src/commands/test/schema.rs @@ -0,0 +1,197 @@ +//! JSON schema types for test files and result types. +//! +//! Test files are JSON documents that describe a sequence of mock blockchain +//! blocks with triggers (log events) and GraphQL assertions to validate the +//! resulting entity state after indexing. Block triggers are auto-injected +//! for every block (both `Start` and `End` types) so block handlers with any +//! filter (`once`, `polling`, or none) fire correctly without explicit config. +//! +//! ```json +//! { +//! "name": "Transfer creates entity", +//! "blocks": [ +//! { +//! "number": 1, +//! "events": [ +//! { +//! "address": "0x1234...", +//! "event": "Transfer(address indexed from, address indexed to, uint256 value)", +//! "params": { "from": "0xaaaa...", "to": "0xbbbb...", "value": "1000" } +//! } +//! ] +//! } +//! ], +//! "assertions": [ +//! { +//! "query": "{ transfer(id: \"1\") { from to value } }", +//! "expected": { "transfer": { "from": "0xaaaa...", "to": "0xbbbb...", "value": "1000" } } +//! } +//! ] +//! } +//! ``` + +use serde::Deserialize; +use serde_json::Value; +use std::path::{Path, PathBuf}; + +/// Top-level test file. A named test case with mock blocks and GraphQL assertions. +#[derive(Debug, Clone, Deserialize)] +pub struct TestFile { + pub name: String, + + /// Ordered sequence of mock blocks to index. + #[serde(default)] + pub blocks: Vec, + + /// GraphQL assertions to run after indexing. + #[serde(default)] + pub assertions: Vec, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct TestBlock { + /// Block number. If omitted, auto-increments starting from `start_block` + /// (default 1). Explicit numbers allow gaps (e.g., blocks 1, 5, 100). + #[serde(default)] + pub number: Option, + + /// Block hash as hex string (e.g., "0xabc..."). If omitted, generated + /// deterministically as `keccak256(block_number)`. + #[serde(default)] + pub hash: Option, + + /// Unix timestamp in seconds. If omitted, defaults to the block number + /// (monotonically increasing, chain-agnostic). + #[serde(default)] + pub timestamp: Option, + + /// Log events within this block. Block triggers (Start/End) are auto-injected. + #[serde(default)] + pub events: Vec, + + /// Mock contract call responses pre-cached before the test runs. + #[serde(default, rename = "ethCalls")] + pub eth_calls: Vec, +} + +/// A mock Ethereum event log. +#[derive(Debug, Clone, Deserialize)] +pub struct LogEvent { + /// Contract address that emitted the event (checksummed or lowercase hex). + pub address: String, + + /// Full event signature including parameter names and `indexed` keywords. + /// Example: `"Transfer(address indexed from, address indexed to, uint256 value)"` + /// + /// The signature is parsed to determine: + /// - topic0 (keccak256 hash of the canonical signature) + /// - Which parameters are indexed (become topics) vs non-indexed (become data) + pub event: String, + + /// Event parameter values keyed by name. Values are JSON strings/numbers + /// that get converted to the appropriate Solidity type: + /// - Addresses: hex string `"0x1234..."` + /// - Integers: string `"1000000000000000000"` or number `1000` + /// - Booleans: `true` / `false` + /// - Bytes: hex string `"0xdeadbeef"` + #[serde(default)] + pub params: serde_json::Map, + + /// Explicit tx hash, or generated as `keccak256(block_number || log_index)`. + #[serde(default)] + pub tx_hash: Option, +} + +/// A mock contract call response pre-cached for a specific block. +#[derive(Debug, Clone, Deserialize)] +pub struct MockEthCall { + pub address: String, + pub function: String, + pub params: Vec, + pub returns: Vec, + + #[serde(default)] + pub reverts: bool, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct Assertion { + pub query: String, + + /// Expected JSON result. String/number coercion is applied (BigInt/BigDecimal). + pub expected: Value, +} + +#[derive(Debug)] +pub struct TestResult { + pub handler_error: Option, + pub assertions: Vec, +} + +impl TestResult { + pub fn is_passed(&self) -> bool { + self.handler_error.is_none() + && self + .assertions + .iter() + .all(|a| matches!(a, AssertionOutcome::Passed { .. })) + } +} + +#[derive(Debug)] +pub enum AssertionOutcome { + Passed { query: String }, + Failed(AssertionFailure), +} + +#[derive(Debug)] +pub struct AssertionFailure { + pub query: String, + pub expected: Value, + pub actual: Value, +} + +/// Parse a JSON test file. NOTE: Only validates JSON schema, not semantic correctness. +pub fn parse_test_file(path: &Path) -> anyhow::Result { + let content = std::fs::read_to_string(path) + .map_err(|e| anyhow::anyhow!("Failed to read test file {}: {}", path.display(), e))?; + serde_json::from_str(&content) + .map_err(|e| anyhow::anyhow!("Failed to parse test file {}: {}", path.display(), e)) +} + +/// Discover `*.json` / `*.test.json` test files in a directory (recursive). Skips entries starting with non-alphanumeric characters. +pub fn discover_test_files(dir: &Path) -> anyhow::Result> { + let mut files = Vec::new(); + + if !dir.exists() { + return Ok(files); + } + + discover_recursive(dir, &mut files)?; + files.sort(); + Ok(files) +} + +fn discover_recursive(dir: &Path, files: &mut Vec) -> anyhow::Result<()> { + for entry in std::fs::read_dir(dir)? { + let entry = entry?; + let path = entry.path(); + let name = match path.file_name().and_then(|n| n.to_str()) { + Some(n) => n, + None => continue, + }; + + // Skip entries whose name starts with a non-alphanumeric character. + if !name.starts_with(|c: char| c.is_alphanumeric()) { + continue; + } + + if path.is_dir() { + discover_recursive(&path, files)?; + } else if path.is_file() && (name.ends_with(".test.json") || name.ends_with(".json")) { + files.push(path); + } + } + + Ok(()) +} diff --git a/gnd/src/commands/test/trigger.rs b/gnd/src/commands/test/trigger.rs new file mode 100644 index 00000000000..98396538a92 --- /dev/null +++ b/gnd/src/commands/test/trigger.rs @@ -0,0 +1,441 @@ +//! ABI encoding of JSON test triggers into graph-node's Ethereum trigger types. +//! +//! For `Transfer(address indexed from, address indexed to, uint256 value)`: +//! - topic0 = keccak256("Transfer(address,address,uint256)") +//! - topic1 = left-padded `from` address (indexed) +//! - topic2 = left-padded `to` address (indexed) +//! - data = ABI-encoded `value` (non-indexed) + +use super::schema::{LogEvent, TestFile}; +use anyhow::{anyhow, ensure, Context, Result}; +use graph::blockchain::block_stream::BlockWithTriggers; +use graph::prelude::alloy::dyn_abi::{DynSolType, DynSolValue}; +use graph::prelude::alloy::json_abi::Event; +use graph::prelude::alloy::primitives::{keccak256, Address, Bytes, B256, I256, U256}; +use graph::prelude::alloy::rpc::types::Log; +use graph::prelude::{BlockPtr, LightEthereumBlock}; +use graph_chain_ethereum::chain::BlockFinality; +use graph_chain_ethereum::trigger::{EthereumBlockTriggerType, EthereumTrigger, LogRef}; +use graph_chain_ethereum::Chain; +use std::sync::Arc; + +/// Convert test blocks into `BlockWithTriggers`, chained by parent hash. +/// Block numbers auto-increment from `start_block` when not explicit. +pub fn build_blocks_with_triggers( + test_file: &TestFile, + start_block: u64, +) -> Result>> { + let mut blocks = Vec::new(); + let mut current_number = start_block; + let mut parent_hash = B256::ZERO; + + for test_block in &test_file.blocks { + let number = test_block.number.unwrap_or(current_number); + + let hash = test_block + .hash + .as_ref() + .map(|h| h.parse::()) + .transpose() + .context("Invalid block hash")? + .unwrap_or_else(|| keccak256(number.to_be_bytes())); + + // Default: use block number as timestamp (seconds since epoch). + // Avoids assuming a chain-specific block time and prevents future timestamps + // on chains with high block numbers (e.g. Arbitrum). + let timestamp = test_block.timestamp.unwrap_or(number); + + let mut triggers = Vec::new(); + + for (log_index, log_event) in test_block.events.iter().enumerate() { + let eth_trigger = build_log_trigger(number, hash, log_index as u64, log_event)?; + triggers.push(eth_trigger); + } + + // Auto-inject block triggers for every block so that block handlers + // with any filter fire correctly: + // - Start: matches `once` handlers (at start_block) and initialization handlers + // - End: matches unfiltered and `polling` handlers + ensure!( + number <= i32::MAX as u64, + "block number {} exceeds i32::MAX", + number + ); + let block_ptr = BlockPtr::new(hash.into(), number as i32); + triggers.push(EthereumTrigger::Block( + block_ptr.clone(), + EthereumBlockTriggerType::Start, + )); + triggers.push(EthereumTrigger::Block( + block_ptr, + EthereumBlockTriggerType::End, + )); + + let block = create_block_with_triggers(number, hash, parent_hash, timestamp, triggers)?; + blocks.push(block); + + parent_hash = hash; + current_number = number + 1; + } + + Ok(blocks) +} + +/// Build an `EthereumTrigger::Log` from a test JSON event. +fn build_log_trigger( + block_number: u64, + block_hash: B256, + log_index: u64, + trigger: &LogEvent, +) -> Result { + let address: Address = trigger + .address + .parse() + .context("Invalid contract address")?; + + let (topics, data) = encode_event_log(&trigger.event, &trigger.params)?; + + // Generate deterministic tx hash if not provided: keccak256(block_number || log_index). + let tx_hash = trigger + .tx_hash + .as_ref() + .map(|h| h.parse::()) + .transpose() + .context("Invalid tx hash")? + .unwrap_or_else(|| { + keccak256([block_number.to_be_bytes(), log_index.to_be_bytes()].concat()) + }); + + let inner_log = graph::prelude::alloy::primitives::Log { + address, + data: graph::prelude::alloy::primitives::LogData::new_unchecked(topics, data), + }; + + let full_log = Arc::new(Log { + inner: inner_log, + block_hash: Some(block_hash), + block_number: Some(block_number), + block_timestamp: None, + transaction_hash: Some(tx_hash), + transaction_index: Some(0), + log_index: Some(log_index), + removed: false, + }); + + Ok(EthereumTrigger::Log(LogRef::FullLog(full_log, None))) +} + +/// Encode event parameters into EVM log topics and data using `alloy::json_abi::Event::parse()`. +/// +/// Given a human-readable event signature like: +/// `"Transfer(address indexed from, address indexed to, uint256 value)"` +/// and parameter values like: +/// `{"from": "0xaaaa...", "to": "0xbbbb...", "value": "1000"}` +/// +/// Produces: +/// - topics[0] = keccak256("Transfer(address,address,uint256)") (the event selector) +/// - topics[1] = left-padded `from` address (indexed) +/// - topics[2] = left-padded `to` address (indexed) +/// - data = ABI-encoded `value` as uint256 (non-indexed) +/// +/// Indexed parameters become topics (max 3 after topic0), non-indexed parameters +/// are ABI-encoded together as the log data. +pub fn encode_event_log( + event_sig: &str, + params: &serde_json::Map, +) -> Result<(Vec, Bytes)> { + // Event::parse expects "event EventName(...)" format. + // If the user already wrote "event Transfer(...)" use as-is, + // otherwise prepend "event ". + let sig_with_prefix = if event_sig.trim_start().starts_with("event ") { + event_sig.to_string() + } else { + format!("event {}", event_sig) + }; + + let event = Event::parse(&sig_with_prefix) + .map_err(|e| anyhow!("Failed to parse event signature '{}': {:?}", event_sig, e))?; + + let topic0 = event.selector(); + let mut topics = vec![topic0]; + let mut data_values = Vec::new(); + + for input in &event.inputs { + let value = params + .get(&input.name) + .ok_or_else(|| anyhow!("Missing parameter: {}", input.name))?; + + let sol_type: DynSolType = input + .ty + .parse() + .map_err(|e| anyhow!("Invalid type '{}': {:?}", input.ty, e))?; + + let sol_value = json_to_sol_value(&sol_type, value)?; + + if input.indexed { + let topic = sol_value_to_topic(&sol_value)?; + topics.push(topic); + } else { + data_values.push(sol_value); + } + } + + let data = if data_values.is_empty() { + Bytes::new() + } else { + let tuple = DynSolValue::Tuple(data_values); + Bytes::from(tuple.abi_encode_params()) + }; + + Ok((topics, data)) +} + +/// Convert a JSON value to the corresponding Solidity type. +/// Supports: address, uint/int (decimal or hex string), bool, bytes, string, bytes1-32, arrays, tuples. +pub fn json_to_sol_value(sol_type: &DynSolType, value: &serde_json::Value) -> Result { + match sol_type { + DynSolType::Address => { + let s = value + .as_str() + .ok_or_else(|| anyhow!("Expected string for address"))?; + let addr: Address = s.parse().context("Invalid address")?; + Ok(DynSolValue::Address(addr)) + } + DynSolType::Uint(bits) => { + let n = match value { + serde_json::Value::String(s) => { + let (digits, radix) = match s.strip_prefix("0x") { + Some(hex) => (hex, 16), + None => (s.as_str(), 10), + }; + U256::from_str_radix(digits, radix).context("Invalid uint")? + } + // JSON numbers are limited to u64 range — use strings for larger values. + serde_json::Value::Number(n) => U256::from(n.as_u64().ok_or_else(|| { + anyhow!("uint value {} does not fit in u64, use a string instead", n) + })?), + _ => return Err(anyhow!("Expected string or number for uint")), + }; + Ok(DynSolValue::Uint(n, *bits)) + } + DynSolType::Int(bits) => { + let n = match value { + serde_json::Value::String(s) => { + let (is_neg, s_abs) = match s.strip_prefix('-') { + Some(rest) => (true, rest), + None => (false, s.as_str()), + }; + let (digits, radix) = match s_abs.strip_prefix("0x") { + Some(hex) => (hex, 16), + None => (s_abs, 10), + }; + let abs = U256::from_str_radix(digits, radix).context("Invalid int")?; + if is_neg { + !abs + U256::from(1) + } else { + abs + } + } + serde_json::Value::Number(n) => { + if let Some(i) = n.as_i64() { + // into_raw() gives the two's complement U256 representation. + // Handles i64::MIN correctly (unlike `-i as u64` which overflows). + I256::try_from(i).unwrap().into_raw() + } else { + U256::from(n.as_u64().ok_or_else(|| { + anyhow!( + "int value {} not representable as u64, use a string instead", + n + ) + })?) + } + } + _ => return Err(anyhow!("Expected string or number for int")), + }; + Ok(DynSolValue::Int(I256::from_raw(n), *bits)) + } + DynSolType::Bool => { + let b = value.as_bool().ok_or_else(|| anyhow!("Expected bool"))?; + Ok(DynSolValue::Bool(b)) + } + DynSolType::Bytes => { + let s = value + .as_str() + .ok_or_else(|| anyhow!("Expected string for bytes"))?; + let hex_str = s.strip_prefix("0x").unwrap_or(s); + let bytes = hex::decode(hex_str).context("Invalid hex")?; + Ok(DynSolValue::Bytes(bytes)) + } + DynSolType::String => { + let s = value.as_str().ok_or_else(|| anyhow!("Expected string"))?; + Ok(DynSolValue::String(s.to_string())) + } + DynSolType::FixedBytes(len) => { + let s = value + .as_str() + .ok_or_else(|| anyhow!("Expected string for bytes{}", len))?; + let hex_str = s.strip_prefix("0x").unwrap_or(s); + let bytes = hex::decode(hex_str).context("Invalid hex")?; + if bytes.len() > *len { + return Err(anyhow!( + "bytes{}: got {} bytes, expected at most {}", + len, + bytes.len(), + len + )); + } + // DynSolValue::FixedBytes always wraps a B256 (32 bytes) plus the actual + // byte count. Right-zero-pad the input to fill the full 32 bytes. + let mut padded = [0u8; 32]; + padded[..bytes.len()].copy_from_slice(&bytes); + Ok(DynSolValue::FixedBytes(B256::from(padded), *len)) + } + DynSolType::Array(inner) => { + let arr = value + .as_array() + .ok_or_else(|| anyhow!("Expected JSON array for array type"))?; + let elements: Vec = arr + .iter() + .map(|elem| json_to_sol_value(inner, elem)) + .collect::>()?; + Ok(DynSolValue::Array(elements)) + } + DynSolType::FixedArray(inner, size) => { + let arr = value + .as_array() + .ok_or_else(|| anyhow!("Expected JSON array for fixed array type"))?; + ensure!( + arr.len() == *size, + "Fixed array expects {} elements, got {}", + size, + arr.len() + ); + let elements: Vec = arr + .iter() + .map(|elem| json_to_sol_value(inner, elem)) + .collect::>()?; + Ok(DynSolValue::FixedArray(elements)) + } + DynSolType::Tuple(types) => { + let arr = value + .as_array() + .ok_or_else(|| anyhow!("Expected JSON array for tuple type (positional)"))?; + ensure!( + arr.len() == types.len(), + "Tuple expects {} elements, got {}", + types.len(), + arr.len() + ); + let values: Vec = types + .iter() + .zip(arr.iter()) + .map(|(ty, val)| json_to_sol_value(ty, val)) + .collect::>()?; + Ok(DynSolValue::Tuple(values)) + } + _ => Err(anyhow!("Unsupported type: {:?}", sol_type)), + } +} + +/// Encode a Solidity value as a 32-byte EVM log topic. +fn sol_value_to_topic(value: &DynSolValue) -> Result { + match value { + DynSolValue::Address(addr) => { + let mut bytes = [0u8; 32]; + bytes[12..].copy_from_slice(addr.as_slice()); + Ok(B256::from(bytes)) + } + DynSolValue::Uint(n, _) => Ok(B256::from(*n)), + DynSolValue::Int(n, _) => Ok(B256::from(n.into_raw())), + DynSolValue::Bool(b) => { + let mut bytes = [0u8; 32]; + if *b { + bytes[31] = 1; + } + Ok(B256::from(bytes)) + } + DynSolValue::FixedBytes(b, _) => Ok(*b), + // Dynamic types are hashed per Solidity spec — the original value cannot be recovered from the topic. + DynSolValue::Bytes(b) => Ok(keccak256(b)), + DynSolValue::String(s) => Ok(keccak256(s.as_bytes())), + _ => Err(anyhow!("Cannot convert {:?} to topic", value)), + } +} + +/// Create a dummy transaction for block transaction lists. +/// Graph-node requires matching transactions for log processing. +fn dummy_transaction( + block_number: u64, + block_hash: B256, + transaction_index: u64, + transaction_hash: B256, +) -> graph::prelude::alloy::rpc::types::Transaction { + use graph::prelude::alloy::consensus::transaction::Recovered; + use graph::prelude::alloy::consensus::{Signed, TxEnvelope, TxLegacy}; + use graph::prelude::alloy::primitives::{Address, Signature, U256}; + use graph::prelude::alloy::rpc::types::Transaction; + + let signed = Signed::new_unchecked( + TxLegacy::default(), + Signature::new(U256::from(1), U256::from(1), false), + transaction_hash, + ); + + Transaction { + inner: Recovered::new_unchecked(TxEnvelope::Legacy(signed), Address::ZERO), + block_hash: Some(block_hash), + block_number: Some(block_number), + transaction_index: Some(transaction_index), + effective_gas_price: None, + } +} + +fn create_block_with_triggers( + number: u64, + hash: B256, + parent_hash: B256, + timestamp: u64, + triggers: Vec, +) -> Result> { + use graph::prelude::alloy::consensus::Header as ConsensusHeader; + use graph::prelude::alloy::rpc::types::{Block, BlockTransactions, Header}; + use std::collections::HashSet; + + // Collect unique transaction hashes from log triggers. + let mut tx_hashes: HashSet = HashSet::new(); + for trigger in &triggers { + if let EthereumTrigger::Log(LogRef::FullLog(log, _)) = trigger { + if let Some(tx_hash) = log.transaction_hash { + tx_hashes.insert(tx_hash); + } + } + } + + let transactions: Vec<_> = tx_hashes + .into_iter() + .enumerate() + .map(|(idx, tx_hash)| dummy_transaction(number, hash, idx as u64, tx_hash)) + .collect(); + + let alloy_block = Block::empty(Header { + hash, + inner: ConsensusHeader { + number, + parent_hash, + timestamp, + ..Default::default() + }, + total_difficulty: None, + size: None, + }) + .with_transactions(BlockTransactions::Full(transactions)); + + let light_block = LightEthereumBlock::new(alloy_block.into()); + let finality_block = BlockFinality::Final(Arc::new(light_block)); + + Ok(BlockWithTriggers::new( + finality_block, + triggers, + &graph::log::logger(false), + )) +} diff --git a/gnd/src/compiler/asc.rs b/gnd/src/compiler/asc.rs index 6f838b723de..d70da30322b 100644 --- a/gnd/src/compiler/asc.rs +++ b/gnd/src/compiler/asc.rs @@ -38,17 +38,19 @@ const REQUIRED_ASC_VERSION: &str = "0.19.23"; /// /// Requires asc version 0.19.23 to be installed. pub fn compile_mapping(options: &AscCompileOptions) -> Result<()> { - // Check that asc is available - if !is_asc_available() { - return Err(anyhow!( - "AssemblyScript compiler (asc) not found. Please install it with:\n \ - npm install -g assemblyscript@{REQUIRED_ASC_VERSION}" - )); - } + // Resolve the asc binary, checking global PATH and local node_modules/.bin + let asc_bin = find_asc_binary(&options.base_dir).ok_or_else(|| { + anyhow!( + "AssemblyScript compiler (asc) not found. Install it with:\n \ + npm install -g assemblyscript@{REQUIRED_ASC_VERSION}\n \ + or locally:\n \ + npm install --save-dev assemblyscript@{REQUIRED_ASC_VERSION}" + ) + })?; // Check version unless explicitly skipped if !options.skip_version_check { - let version = get_asc_version()?; + let version = get_asc_version(&asc_bin)?; if version != REQUIRED_ASC_VERSION { return Err(anyhow!( "AssemblyScript compiler version mismatch: found {}, required {}.\n \ @@ -76,7 +78,7 @@ pub fn compile_mapping(options: &AscCompileOptions) -> Result<()> { .unwrap_or(&options.output_file); // Build the asc command - let mut cmd = Command::new("asc"); + let mut cmd = Command::new(&asc_bin); // Add compiler flags matching graph-cli behavior cmd.arg("--explicitStart") @@ -177,18 +179,31 @@ pub fn find_graph_ts(source_dir: &Path) -> Result<(Vec, PathBuf)> { Ok((lib_dirs, global_file)) } -/// Check if the asc compiler is available. -fn is_asc_available() -> bool { - Command::new("asc") +/// Find the `asc` binary by checking the global PATH first, then the project's +/// root `node_modules/.bin/asc`. +fn find_asc_binary(base_dir: &Path) -> Option { + // Check global PATH first + if Command::new("asc") .arg("--version") .output() .map(|o| o.status.success()) .unwrap_or(false) + { + return Some(PathBuf::from("asc")); + } + + // Backward compatibility with graph-cli: check local node_modules/.bin/asc + let local_asc = base_dir.join("node_modules").join(".bin").join("asc"); + if local_asc.exists() { + return Some(local_asc); + } + + None } /// Get the asc compiler version. -fn get_asc_version() -> Result { - let output = Command::new("asc") +fn get_asc_version(asc_bin: &Path) -> Result { + let output = Command::new(asc_bin) .arg("--version") .output() .context("Failed to execute asc --version")?; @@ -244,11 +259,13 @@ mod tests { #[test] fn test_asc_version_check() { // Skip if asc is not installed - if !is_asc_available() { - return; - } + let temp_dir = TempDir::new().unwrap(); + let asc_bin = match find_asc_binary(temp_dir.path()) { + Some(bin) => bin, + None => return, + }; - let version = get_asc_version().unwrap(); + let version = get_asc_version(&asc_bin).unwrap(); // Version should be a semver-like string (e.g., "0.19.23") assert!( version.split('.').count() >= 2, diff --git a/gnd/src/main.rs b/gnd/src/main.rs index 24384bb0158..351dd9d220a 100644 --- a/gnd/src/main.rs +++ b/gnd/src/main.rs @@ -177,7 +177,7 @@ async fn main() -> Result<()> { Commands::Remove(remove_opt) => run_remove(remove_opt).await, Commands::Auth(auth_opt) => run_auth(auth_opt), Commands::Publish(publish_opt) => run_publish(publish_opt).await, - Commands::Test(test_opt) => run_test(test_opt), + Commands::Test(test_opt) => run_test(test_opt).await, Commands::Clean(clean_opt) => run_clean(clean_opt), Commands::Completions(completions_opt) => generate_completions(completions_opt), }; diff --git a/gnd/src/manifest.rs b/gnd/src/manifest.rs index 5f37df3418f..947eff5fd12 100644 --- a/gnd/src/manifest.rs +++ b/gnd/src/manifest.rs @@ -68,6 +68,10 @@ pub struct DataSource { pub source_address: Option, /// The ABI name referenced in `source.abi` (Ethereum data sources only). pub source_abi: Option, + /// The block number at which this data source starts indexing (from source.startBlock). + pub start_block: u64, + /// The block number at which this data source stops indexing (from source.endBlock). + pub end_block: Option, /// Event handlers from the mapping. pub event_handlers: Vec, /// Call handlers from the mapping. @@ -228,6 +232,8 @@ fn convert_data_source(ds: GraphUnresolvedDS) -> DataSource { .collect(), source_address: eth.source.address.map(|a| format!("{:?}", a)), source_abi: Some(eth.source.abi.clone()), + start_block: eth.source.start_block as u64, + end_block: eth.source.end_block.map(|b| b as u64), event_handlers: eth .mapping .event_handlers @@ -277,6 +283,8 @@ fn convert_data_source(ds: GraphUnresolvedDS) -> DataSource { .collect(), source_address: Some(sub.source.address().to_string()), source_abi: None, + start_block: sub.source.start_block() as u64, + end_block: None, // Subgraph sources don't have end_block event_handlers: vec![], call_handlers: vec![], block_handlers: vec![], @@ -290,6 +298,8 @@ fn convert_data_source(ds: GraphUnresolvedDS) -> DataSource { abis: vec![], source_address: None, source_abi: None, + start_block: 0, // Offchain data sources don't have start_block + end_block: None, event_handlers: vec![], call_handlers: vec![], block_handlers: vec![], @@ -303,6 +313,8 @@ fn convert_data_source(ds: GraphUnresolvedDS) -> DataSource { abis: vec![], source_address: None, source_abi: None, + start_block: amp.source.start_block.unwrap_or(0), + end_block: amp.source.end_block, event_handlers: vec![], call_handlers: vec![], block_handlers: vec![], diff --git a/gnd/src/validation/mod.rs b/gnd/src/validation/mod.rs index 11832b0eb23..b64c9f41398 100644 --- a/gnd/src/validation/mod.rs +++ b/gnd/src/validation/mod.rs @@ -1181,6 +1181,8 @@ type Post @entity { abis: vec![], source_address: None, source_abi: None, + start_block: 0, + end_block: None, event_handlers: vec![], call_handlers: vec![], block_handlers: vec![], @@ -1197,6 +1199,8 @@ type Post @entity { abis: vec![], source_address: Some(address.to_string()), source_abi: None, + start_block: 0, + end_block: None, event_handlers: vec![], call_handlers: vec![], block_handlers: vec![], diff --git a/gnd/tests/fixtures/gnd_test/subgraph/abis/ERC20.json b/gnd/tests/fixtures/gnd_test/subgraph/abis/ERC20.json new file mode 100644 index 00000000000..405d6b36486 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/subgraph/abis/ERC20.json @@ -0,0 +1,222 @@ +[ + { + "constant": true, + "inputs": [], + "name": "name", + "outputs": [ + { + "name": "", + "type": "string" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "_spender", + "type": "address" + }, + { + "name": "_value", + "type": "uint256" + } + ], + "name": "approve", + "outputs": [ + { + "name": "", + "type": "bool" + } + ], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "totalSupply", + "outputs": [ + { + "name": "", + "type": "uint256" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "_from", + "type": "address" + }, + { + "name": "_to", + "type": "address" + }, + { + "name": "_value", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [ + { + "name": "", + "type": "bool" + } + ], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "decimals", + "outputs": [ + { + "name": "", + "type": "uint8" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "_owner", + "type": "address" + } + ], + "name": "balanceOf", + "outputs": [ + { + "name": "balance", + "type": "uint256" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "symbol", + "outputs": [ + { + "name": "", + "type": "string" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "_to", + "type": "address" + }, + { + "name": "_value", + "type": "uint256" + } + ], + "name": "transfer", + "outputs": [ + { + "name": "", + "type": "bool" + } + ], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "_owner", + "type": "address" + }, + { + "name": "_spender", + "type": "address" + } + ], + "name": "allowance", + "outputs": [ + { + "name": "", + "type": "uint256" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "payable": true, + "stateMutability": "payable", + "type": "fallback" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "name": "spender", + "type": "address" + }, + { + "indexed": false, + "name": "value", + "type": "uint256" + } + ], + "name": "Approval", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "from", + "type": "address" + }, + { + "indexed": true, + "name": "to", + "type": "address" + }, + { + "indexed": false, + "name": "value", + "type": "uint256" + } + ], + "name": "Transfer", + "type": "event" + } +] diff --git a/gnd/tests/fixtures/gnd_test/subgraph/abis/TokenFactory.json b/gnd/tests/fixtures/gnd_test/subgraph/abis/TokenFactory.json new file mode 100644 index 00000000000..d23f24c8ae2 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/subgraph/abis/TokenFactory.json @@ -0,0 +1,28 @@ +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "addr", + "type": "address" + } + ], + "name": "FactoryTokenCreated", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "addr", + "type": "address" + } + ], + "name": "createToken", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] diff --git a/gnd/tests/fixtures/gnd_test/subgraph/package.json b/gnd/tests/fixtures/gnd_test/subgraph/package.json new file mode 100644 index 00000000000..96118f38494 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/subgraph/package.json @@ -0,0 +1,19 @@ +{ + "name": "gnd-example-subgraph", + "version": "0.1.0", + "private": true, + "scripts": { + "auth": "graph auth https://api.thegraph.com/deploy/", + "codegen": "graph codegen", + "build": "graph build", + "deploy": "graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ graphprotocol/erc20-subgraph", + "create-local": "graph create --node http://localhost:8020/ graphprotocol/erc20-subgraph", + "remove-local": "graph remove --node http://localhost:8020/ graphprotocol/erc20-subgraph", + "deploy-local": "graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 graphprotocol/erc20-subgraph" + }, + "devDependencies": { + "@graphprotocol/graph-cli": "0.98.1", + "@graphprotocol/graph-ts": "0.38.2", + "assemblyscript": "0.19.23" + } +} diff --git a/gnd/tests/fixtures/gnd_test/subgraph/schema.graphql b/gnd/tests/fixtures/gnd_test/subgraph/schema.graphql new file mode 100644 index 00000000000..f665f3118f9 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/subgraph/schema.graphql @@ -0,0 +1,33 @@ +type Account @entity(immutable: false) { + id: Bytes! # Address + balances: [Balance!]! @derivedFrom(field: "account") +} + +type Token @entity(immutable: true) { + id: Bytes! # Address + name: String! + symbol: String! + decimals: Int! +} + +type Balance @entity(immutable: false) { + id: Bytes! # Account address + token address + token: Token! + account: Account! + amount: BigInt +} + +type Block @entity(immutable: true) { + id: Bytes! + number: BigInt! +} + +type OnceBlock @entity(immutable: true) { + id: Bytes! + msg: String! +} + +type PollingBlock @entity(immutable: true) { + id: Bytes! + number: BigInt! +} diff --git a/gnd/tests/fixtures/gnd_test/subgraph/src/blocks.ts b/gnd/tests/fixtures/gnd_test/subgraph/src/blocks.ts new file mode 100644 index 00000000000..f9a271681ad --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/subgraph/src/blocks.ts @@ -0,0 +1,23 @@ +import { Bytes, ethereum, log } from "@graphprotocol/graph-ts"; +import {Block, OnceBlock, PollingBlock} from "../generated/schema"; + +export function handleEveryBlock(block: ethereum.Block): void { + log.warning("Handling every block event for block number: {}", [block.number.toString()]); + let blockEntity = new Block(block.hash); + blockEntity.number = block.number; + blockEntity.save(); +} + +export function handleOnce(block: ethereum.Block): void { + log.warning("Handling a one-time block event for block number: {}", [block.number.toString()]); + let blockOnceEntity = new OnceBlock(block.hash.concat(Bytes.fromUTF8("-once"))); + blockOnceEntity.msg = "This is a one-time block entity"; + blockOnceEntity.save(); +} + +export function handlePolling(block: ethereum.Block): void { + log.warning("Handling a polling block event for block number: {}", [block.number.toString()]); + let blockEntity = new PollingBlock(block.hash); + blockEntity.number = block.number; + blockEntity.save(); +} diff --git a/gnd/tests/fixtures/gnd_test/subgraph/src/factory.ts b/gnd/tests/fixtures/gnd_test/subgraph/src/factory.ts new file mode 100644 index 00000000000..9b00e6a0b74 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/subgraph/src/factory.ts @@ -0,0 +1,21 @@ +import { FactoryTokenCreated } from '../generated/Factory/TokenFactory' +import { Token } from '../generated/schema' +import { FactoryToken } from '../generated/templates'; +import { ERC20 } from '../generated/templates/FactoryToken/ERC20'; + +export function handleTokenCreated(event: FactoryTokenCreated): void { + FactoryToken.create(event.params.addr); + let token = new Token(event.params.addr); + + // Try to fetch token details using ERC20 interface + let erc20 = ERC20.bind(event.params.addr); + let nameResult = erc20.try_name(); + let symbolResult = erc20.try_symbol(); + let decimalsResult = erc20.try_decimals(); + + // If the calls revert, we can set default values + token.name = nameResult.reverted ? "Unknown" : nameResult.value; + token.symbol = symbolResult.reverted ? "Unknown" : symbolResult.value; + token.decimals = decimalsResult.reverted ? 18 : decimalsResult.value; + token.save(); +} diff --git a/gnd/tests/fixtures/gnd_test/subgraph/src/token.ts b/gnd/tests/fixtures/gnd_test/subgraph/src/token.ts new file mode 100644 index 00000000000..21b09f43de6 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/subgraph/src/token.ts @@ -0,0 +1,86 @@ +import { Transfer } from '../generated/StandardToken/ERC20' +import { Token, Account, Balance } from '../generated/schema' +import { ERC20 } from '../generated/StandardToken/ERC20'; +import { log } from "@graphprotocol/graph-ts"; + +export function handleTransfer(event: Transfer): void { + let token = Token.load(event.address); + let from = Account.load(event.params.from); + let to = Account.load(event.params.to); + let balanceFrom = Balance.load(event.params.from.concat(event.address)); + let balanceTo = Balance.load(event.params.to.concat(event.address)); + + let erc20 = ERC20.bind(event.address); + + if (!token) { + token = new Token(event.address); + + // Try to fetch token details using ERC20 interface + let nameResult = erc20.try_name(); + let symbolResult = erc20.try_symbol(); + let decimalsResult = erc20.try_decimals(); + + // If the calls revert, we can set default values + token.name = nameResult.reverted ? "Unknown" : nameResult.value; + token.symbol = symbolResult.reverted ? "Unknown" : symbolResult.value; + token.decimals = decimalsResult.reverted ? 18 : decimalsResult.value; + token.save(); + } + + if (!from) { + from = new Account(event.params.from); + from.save(); + } + + if (!to) { + to = new Account(event.params.to); + to.save(); + } + + if (!balanceFrom) { + log.warning("Balance for account {} and token {} does not exist. Creating new balance entity.", [event.params.from.toHexString(), event.address.toHexString()]); + balanceFrom = new Balance(event.params.from.concat(event.address)); + balanceFrom.account = event.params.from; + balanceFrom.token = event.address; + let balanceFromResult = erc20.try_balanceOf(event.params.from); + balanceFrom.amount = balanceFromResult.reverted ? null : balanceFromResult.value; + } else { + log.warning("Balance for account {} and token {} already exists. Updating balance based on transfer value.", [event.params.from.toHexString(), event.address.toHexString()]); + // If the balance already exists, we need to update it based on the transfer value + // We will fetch the current balance and then update it after the transfer + let currentBalance = balanceFrom.amount; + + if (currentBalance) { + let newBalance = currentBalance.minus(event.params.value); + balanceFrom.amount = newBalance; + } else { + // Try to fetch the balance if it was not previously set + let balanceFromResult = erc20.try_balanceOf(event.params.from); + balanceFrom.amount = balanceFromResult.reverted ? null : balanceFromResult.value; + } + } + + if (!balanceTo) { + balanceTo = new Balance(event.params.to.concat(event.address)); + balanceTo.account = event.params.to; + balanceTo.token = event.address; + let balanceToResult = erc20.try_balanceOf(event.params.to); + balanceTo.amount = balanceToResult.reverted ? null : balanceToResult.value; + } else { + // If the balance already exists, we need to update it based on the transfer value + // We will fetch the current balance and then update it after the transfer + let currentBalance = balanceTo.amount; + + if (currentBalance) { + let newBalance = currentBalance.plus(event.params.value); + balanceTo.amount = newBalance; + } else { + // Try to fetch the balance if it was not previously set + let balanceToResult = erc20.try_balanceOf(event.params.to); + balanceTo.amount = balanceToResult.reverted ? null : balanceToResult.value; + } + } + + balanceFrom.save(); + balanceTo.save(); +} diff --git a/gnd/tests/fixtures/gnd_test/subgraph/subgraph.yaml b/gnd/tests/fixtures/gnd_test/subgraph/subgraph.yaml new file mode 100644 index 00000000000..ec4aff9cfe5 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/subgraph/subgraph.yaml @@ -0,0 +1,124 @@ +specVersion: 1.3.0 +schema: + file: ./schema.graphql +dataSources: + - name: StandardToken + kind: ethereum/contract + network: arbitrum + source: + abi: ERC20 + address: "0x731a10897d267e19b34503ad902d0a29173ba4b1" + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/token.ts + entities: + - TransferEvent + abis: + - name: ERC20 + file: ./abis/ERC20.json + eventHandlers: + - event: Transfer(indexed address,indexed address,uint256) + handler: handleTransfer + - name: EveryBlock + kind: ethereum + network: arbitrum + source: + abi: ERC20 + address: "0x0000000000000000000000000000000000000000" + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/blocks.ts + entities: + - Block + abis: + - name: ERC20 + file: ./abis/ERC20.json + blockHandlers: + - handler: handleEveryBlock + - name: BlockOnce + kind: ethereum + network: arbitrum + source: + abi: ERC20 + address: "0x0000000000000000000000000000000000000000" + startBlock: 1000 + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/blocks.ts + entities: + - BlockOnce + abis: + - name: ERC20 + file: ./abis/ERC20.json + blockHandlers: + - handler: handleOnce + filter: + kind: once + - name: BlockPolling + kind: ethereum + network: arbitrum + source: + abi: ERC20 + address: "0x0000000000000000000000000000000000000000" + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/blocks.ts + entities: + - PollingBlock + abis: + - name: ERC20 + file: ./abis/ERC20.json + blockHandlers: + - handler: handlePolling + filter: + kind: polling + every: 5 + - name: Factory + kind: ethereum/contract + network: arbitrum + source: + abi: TokenFactory + address: "0x0000000000000000000000000000000000000001" + startBlock: 2000 + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/factory.ts + entities: + - FactoryToken + abis: + - name: TokenFactory + file: ./abis/TokenFactory.json + - name: ERC20 + file: ./abis/ERC20.json + eventHandlers: + - event: FactoryTokenCreated(indexed address) + handler: handleTokenCreated +templates: + - name: FactoryToken + kind: ethereum/contract + network: arbitrum + source: + abi: ERC20 + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/token.ts + entities: + - FactoryTokenTransfer + abis: + - name: ERC20 + file: ./abis/ERC20.json + eventHandlers: + - event: Transfer(indexed address,indexed address,uint256) + handler: handleTransfer diff --git a/gnd/tests/fixtures/gnd_test/subgraph/tests/blocks.json b/gnd/tests/fixtures/gnd_test/subgraph/tests/blocks.json new file mode 100644 index 00000000000..8065b6aa398 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/subgraph/tests/blocks.json @@ -0,0 +1,43 @@ +{ + "name": "Example Block Handler Testing", + "blocks": [ + {"number": 1000}, + {"number": 1001}, + {"number": 1002}, + {"number": 1003}, + {"number": 1004}, + {"number": 1005} + ], + "assertions": [ + { + "query": "{ blocks { number } }", + "expected": { + "blocks": [ + {"number": "1000"}, + {"number": "1001"}, + {"number": "1002"}, + {"number": "1003"}, + {"number": "1004"}, + {"number": "1005"} + ] + } + }, + { + "query": "{ onceBlocks { msg } }", + "expected": { + "onceBlocks": [ + {"msg": "This is a one-time block entity"} + ] + } + }, + { + "query": "{ pollingBlocks { number } }", + "expected": { + "pollingBlocks": [ + {"number":"1000"}, + {"number": "1005"} + ] + } + } + ] +} diff --git a/gnd/tests/fixtures/gnd_test/subgraph/tests/failing.json b/gnd/tests/fixtures/gnd_test/subgraph/tests/failing.json new file mode 100644 index 00000000000..e9970588221 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/subgraph/tests/failing.json @@ -0,0 +1,12 @@ +{ + "name": "Deliberately failing test (wrong expected value)", + "blocks": [ + {} + ], + "assertions": [ + { + "query": "{ tokens { id name symbol } }", + "expected": { "tokens": [ { "id": "0x731a10897d267e19b34503ad902d0a29173ba4b1", "name": "TheGraph", "symbol": "GRT" } ] } + } + ] +} diff --git a/gnd/tests/fixtures/gnd_test/subgraph/tests/templates.json b/gnd/tests/fixtures/gnd_test/subgraph/tests/templates.json new file mode 100644 index 00000000000..dc78dfc4c28 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/subgraph/tests/templates.json @@ -0,0 +1,80 @@ +{ + "name": "Test dynamic datasources support", + "blocks": [ + { + "number": 2001, + "events": [ + { + "address": "0x0000000000000000000000000000000000000001", + "event": "FactoryTokenCreated(address indexed addr)", + "params": { + "addr": "0x0000000000000000000000000000000000000002" + } + } + ], + "ethCalls": [ + { + "address": "0x0000000000000000000000000000000000000002", + "function": "symbol()(string)", + "params": [], + "returns": ["FT1"] + }, + { + "address": "0x0000000000000000000000000000000000000002", + "function": "name()(string)", + "params": [], + "returns": ["FactoryToken1"] + }, + { + "address": "0x0000000000000000000000000000000000000002", + "function": "decimals()(uint8)", + "params": [], + "returns": ["18"] + } + ] + }, + { + "number": 2002, + "events": [ + { + "address": "0x0000000000000000000000000000000000000002", + "event": "Transfer(address indexed from, address indexed to, uint256 value)", + "params": { + "from": "0xaaaa000000000000000000000000000000000000", + "to": "0xbbbb000000000000000000000000000000000000", + "value": "1000000000000000000" + } + } + ], + "ethCalls": [ + { + "address": "0x0000000000000000000000000000000000000002", + "function": "balanceOf(address)(uint256)", + "params": ["0xaaaa000000000000000000000000000000000000"], + "returns": ["3000000000000000000"] + }, + { + "address": "0x0000000000000000000000000000000000000002", + "function": "balanceOf(address)(uint256)", + "params": ["0xbbbb000000000000000000000000000000000000"], + "returns": ["5000000000000000000"] + } + ] + } + ], + "assertions": [ + { + "query": "{ tokens { id name symbol decimals } }", + "expected": { "tokens": [ { "id": "0x0000000000000000000000000000000000000002", "name": "FactoryToken1", "symbol": "FT1", "decimals": 18 } ] } + }, + { + "query": "{ accounts { id balances { token { symbol } amount } } }", + "expected": { + "accounts": [ + {"id": "0xbbbb000000000000000000000000000000000000", "balances": [{"token": { "symbol": "FT1" }, "amount": "5000000000000000000"}]}, + {"id": "0xaaaa000000000000000000000000000000000000", "balances": [{"token": { "symbol": "FT1" }, "amount": "3000000000000000000"}]} + ] + } + } + ] +} diff --git a/gnd/tests/fixtures/gnd_test/subgraph/tests/transfer.json b/gnd/tests/fixtures/gnd_test/subgraph/tests/transfer.json new file mode 100644 index 00000000000..51c5264712b --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/subgraph/tests/transfer.json @@ -0,0 +1,65 @@ +{ + "name": "Test events and eth_call mocking", + "blocks": [ + { + "events": [ + { + "address": "0x731a10897d267e19b34503ad902d0a29173ba4b1", + "event": "Transfer(address indexed from, address indexed to, uint256 value)", + "params": { + "from": "0xaaaa000000000000000000000000000000000000", + "to": "0xbbbb000000000000000000000000000000000000", + "value": "1000000000000000000" + } + } + ], + "ethCalls": [ + { + "address": "0x731a10897d267e19b34503ad902d0a29173ba4b1", + "function": "symbol()(string)", + "params": [], + "returns": ["GRT"] + }, + { + "address": "0x731a10897d267e19b34503ad902d0a29173ba4b1", + "function": "name()(string)", + "params": [], + "returns": ["TheGraph"] + }, + { + "address": "0x731a10897d267e19b34503ad902d0a29173ba4b1", + "function": "decimals()(uint8)", + "params": [], + "returns": ["18"] + }, + { + "address": "0x731a10897d267e19b34503ad902d0a29173ba4b1", + "function": "balanceOf(address)(uint256)", + "params": ["0xaaaa000000000000000000000000000000000000"], + "returns": ["3000000000000000000"] + }, + { + "address": "0x731a10897d267e19b34503ad902d0a29173ba4b1", + "function": "balanceOf(address)(uint256)", + "params": ["0xbbbb000000000000000000000000000000000000"], + "returns": ["5000000000000000000"] + } + ] + } + ], + "assertions": [ + { + "query": "{ tokens { id name symbol decimals } }", + "expected": { "tokens": [ { "id": "0x731a10897d267e19b34503ad902d0a29173ba4b1", "name": "TheGraph", "symbol": "GRT", "decimals": 18 } ] } + }, + { + "query": "{ accounts { id balances { token { symbol } amount } } }", + "expected": { + "accounts": [ + {"id": "0xbbbb000000000000000000000000000000000000", "balances": [{"token": { "symbol": "GRT" }, "amount": "5000000000000000000"}]}, + {"id": "0xaaaa000000000000000000000000000000000000", "balances": [{"token": { "symbol": "GRT" }, "amount": "3000000000000000000"}]} + ] + } + } + ] +} diff --git a/gnd/tests/gnd_test.rs b/gnd/tests/gnd_test.rs new file mode 100644 index 00000000000..0aebde3793b --- /dev/null +++ b/gnd/tests/gnd_test.rs @@ -0,0 +1,217 @@ +//! Integration tests for `gnd test` — the mock-based subgraph test runner. +//! +//! These tests verify that `gnd test` can: +//! - Build and run fixture subgraph tests end-to-end +//! - Execute individual test files +//! - Report correct pass/fail counts +//! +//! The fixture subgraph at `tests/fixtures/gnd_test/subgraph/` covers: +//! - Event handling with eth_call mocking (transfer.json) +//! - Block handlers with various filters (blocks.json) +//! - Dynamic data source templates (templates.json) +//! +//! # Prerequisites +//! +//! - Build the gnd binary: `cargo build -p gnd` +//! - AssemblyScript compiler (`asc`) in PATH +//! - npm available for dependency installation +//! +//! # Running +//! +//! ```bash +//! just test-gnd-test +//! ``` +//! +//! Tests run with `--test-threads=1` to avoid races when sharing a Postgres +//! instance via `--postgres-url` (CI). With pgtemp (default) each test gets +//! its own isolated database, but serial execution keeps things simple. + +use std::fs; +use std::path::{Path, PathBuf}; +use std::process::Command; + +use tempfile::TempDir; +use walkdir::WalkDir; + +/// Copy the fixture subgraph into a fresh temp directory, install npm +/// dependencies, and run `gnd codegen`. Returns the temp dir handle (to +/// keep it alive) and the path to the prepared subgraph directory. +fn setup_fixture() -> (TempDir, PathBuf) { + let temp_dir = TempDir::new().expect("Failed to create temp directory"); + let subgraph_dir = temp_dir.path().join("subgraph"); + fs::create_dir_all(&subgraph_dir).unwrap(); + + let fixture = fixture_path(); + assert!( + fixture.exists(), + "Fixture not found at {}", + fixture.display() + ); + + copy_dir_recursive(&fixture, &subgraph_dir).expect("Failed to copy fixture to temp directory"); + + // Install npm dependencies (graph-ts, graph-cli) + let npm_output = Command::new("npm") + .arg("install") + .current_dir(&subgraph_dir) + .output() + .expect("Failed to run `npm install`. Is npm available?"); + + assert!( + npm_output.status.success(), + "npm install failed in fixture:\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&npm_output.stdout), + String::from_utf8_lossy(&npm_output.stderr), + ); + + verify_asc_available(&subgraph_dir); + + let gnd = verify_gnd_binary(); + let codegen_output = Command::new(&gnd) + .args(["codegen", "--skip-migrations"]) + .current_dir(&subgraph_dir) + .output() + .expect("Failed to run `gnd codegen`"); + + assert!( + codegen_output.status.success(), + "gnd codegen failed in fixture:\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&codegen_output.stdout), + String::from_utf8_lossy(&codegen_output.stderr), + ); + + (temp_dir, subgraph_dir) +} + +/// Get the path to the gnd binary. +fn gnd_binary_path() -> PathBuf { + let manifest_dir = env!("CARGO_MANIFEST_DIR"); + PathBuf::from(manifest_dir) + .parent() + .unwrap() + .join("target") + .join("debug") + .join("gnd") +} + +/// Verify the gnd binary exists, panic with a helpful message if not. +fn verify_gnd_binary() -> PathBuf { + let gnd_path = gnd_binary_path(); + assert!( + gnd_path.exists(), + "gnd binary not found at {}. Run `cargo build -p gnd` first.", + gnd_path.display() + ); + gnd_path +} + +/// Get the path to the gnd_test fixture subgraph. +fn fixture_path() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("tests") + .join("fixtures") + .join("gnd_test") + .join("subgraph") +} + +/// Assert that `asc` (AssemblyScript compiler) is available in PATH or in local node_modules. +fn verify_asc_available(subgraph_dir: &Path) { + // Check global PATH first + if Command::new("asc") + .arg("--version") + .output() + .map(|o| o.status.success()) + .unwrap_or(false) + { + return; + } + + // Fall back to local node_modules/.bin/asc + let local_asc = subgraph_dir.join("node_modules").join(".bin").join("asc"); + assert!( + local_asc.exists(), + "asc compiler not found globally or at {}. \ + Install it with: npm install -g assemblyscript@0.19.23", + local_asc.display() + ); +} + +/// Copy a directory recursively. +fn copy_dir_recursive(src: &Path, dst: &Path) -> std::io::Result<()> { + for entry in WalkDir::new(src).min_depth(1) { + let entry = entry?; + let relative_path = entry.path().strip_prefix(src).unwrap(); + let dest_path = dst.join(relative_path); + + if entry.file_type().is_dir() { + fs::create_dir_all(&dest_path)?; + } else { + fs::copy(entry.path(), &dest_path)?; + } + } + Ok(()) +} + +/// Run `gnd test` with the given args in the given directory. +/// Returns the Output (status, stdout, stderr). +fn run_gnd_test(args: &[&str], cwd: &Path) -> std::process::Output { + let gnd = verify_gnd_binary(); + let mut cmd = Command::new(&gnd); + cmd.arg("test"); + + // When a database URL is provided via env var (e.g. in CI), pass it through + // to skip pgtemp which may not be available. + if let Ok(db_url) = std::env::var("THEGRAPH_STORE_POSTGRES_DIESEL_URL") { + cmd.arg("--postgres-url").arg(db_url); + } + + cmd.args(args) + .current_dir(cwd) + .output() + .expect("Failed to execute gnd test") +} + +// ============================================================================ +// gnd test — run all fixture tests +// ============================================================================ + +#[test] +fn test_gnd_test_all() { + let (_temp_dir, subgraph_dir) = setup_fixture(); + + // Run only the passing test files (exclude failing.json which is used by the negative test). + let output = run_gnd_test( + &[ + "tests/transfer.json", + "tests/blocks.json", + "tests/templates.json", + ], + &subgraph_dir, + ); + + assert!( + output.status.success(), + "gnd test failed with exit code: {:?}\nstdout: {}\nstderr: {}", + output.status.code(), + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr), + ); +} + +// ============================================================================ +// gnd test — verify failure on wrong assertions +// ============================================================================ + +#[test] +fn test_gnd_test_failing_assertions() { + let (_temp_dir, subgraph_dir) = setup_fixture(); + + let output = run_gnd_test(&["tests/failing.json"], &subgraph_dir); + + assert!( + !output.status.success(), + "gnd test should have failed for failing.json but exited with code 0\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr), + ); +} diff --git a/graph/src/data_source/subgraph.rs b/graph/src/data_source/subgraph.rs index 0207aee4df3..454f20ec29e 100644 --- a/graph/src/data_source/subgraph.rs +++ b/graph/src/data_source/subgraph.rs @@ -237,6 +237,10 @@ impl UnresolvedSource { pub fn address(&self) -> &DeploymentHash { &self.address } + + pub fn start_block(&self) -> BlockNumber { + self.start_block + } } #[derive(Clone, Debug, Default, Eq, PartialEq, Deserialize)] diff --git a/justfile b/justfile index b24b87f5138..a05ef141078 100644 --- a/justfile +++ b/justfile @@ -88,6 +88,18 @@ test-gnd-commands *EXTRA_FLAGS: cargo test {{EXTRA_FLAGS}} --package gnd --test cli_commands -- --nocapture +# Run gnd test runner tests (requires asc in PATH, uses pgtemp for PostgreSQL) +test-gnd-test *EXTRA_FLAGS: + #!/usr/bin/env bash + set -e # Exit on error + + # Build gnd binary + cargo build --bin gnd + + echo "Running gnd test runner tests" + + cargo test {{EXTRA_FLAGS}} --package gnd --test gnd_test -- --nocapture + # Clean workspace (cargo clean) clean: cargo clean