From 6b16f6fad19be03f8ee6c6dedbac220aa3af484d Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 23 Jan 2026 09:41:09 -0800 Subject: [PATCH 01/60] tests: Use foundry v1.4.0 in docker-compose That makes it use the same as what CI uses --- tests/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/docker-compose.yml b/tests/docker-compose.yml index 19dfa1f86c5..c05228f8418 100644 --- a/tests/docker-compose.yml +++ b/tests/docker-compose.yml @@ -22,7 +22,7 @@ services: anvil: # Pinned to specific version since newer versions do not produce # deterministic block hashes. Unpin once that's fixed upstream - image: ghcr.io/foundry-rs/foundry:v1.2.3 + image: ghcr.io/foundry-rs/foundry:v1.4.0 ports: - '3021:8545' command: "'anvil --host 0.0.0.0 --gas-limit 100000000000 --base-fee 1 --block-time 2 --timestamp 1743944919 --mnemonic \"test test test test test test test test test test test junk\"'" From 68b0116428e90125ce080b4a9191f50ec2d4ddbb Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 23 Jan 2026 09:41:43 -0800 Subject: [PATCH 02/60] tests: Change grafted test to not use hardcoded POIs We want to make the test independent of the IPFS hash of the subgraphs involved. Instead of using hardcoded POIs, we calculate them during the test --- pnpm-lock.yaml | 52 ---- tests/integration-tests/grafted/subgraph.yaml | 4 +- tests/src/subgraph.rs | 37 ++- tests/tests/integration_tests.rs | 267 ++++++++++++++---- 4 files changed, 255 insertions(+), 105 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9276137fd13..4fab0a09157 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -308,12 +308,6 @@ importers: specifier: 0.31.0 version: 0.31.0 - tests/runner-tests/substreams: - devDependencies: - '@graphprotocol/graph-cli': - specifier: 0.61.0 - version: 0.61.0(@types/node@24.3.0)(bufferutil@4.0.9)(encoding@0.1.13)(node-fetch@2.7.0(encoding@0.1.13))(typescript@5.9.2)(utf-8-validate@5.0.10) - tests/runner-tests/typename: devDependencies: '@graphprotocol/graph-cli': @@ -419,11 +413,6 @@ packages: engines: {node: '>=14'} hasBin: true - '@graphprotocol/graph-cli@0.61.0': - resolution: {integrity: sha512-gc3+DioZ/K40sQCt6DsNvbqfPTc9ZysuSz3I9MJ++bD6SftaSSweWwfpPysDMzDuxvUAhLAsJ6QjBACPngT2Kw==} - engines: {node: '>=14'} - hasBin: true - '@graphprotocol/graph-cli@0.69.0': resolution: {integrity: sha512-DoneR0TRkZYumsygdi/RST+OB55TgwmhziredI21lYzfj0QNXGEHZOagTOKeFKDFEpP3KR6BAq6rQIrkprJ1IQ==} engines: {node: '>=18'} @@ -3450,47 +3439,6 @@ snapshots: - typescript - utf-8-validate - '@graphprotocol/graph-cli@0.61.0(@types/node@24.3.0)(bufferutil@4.0.9)(encoding@0.1.13)(node-fetch@2.7.0(encoding@0.1.13))(typescript@5.9.2)(utf-8-validate@5.0.10)': - dependencies: - '@float-capital/float-subgraph-uncrashable': 0.0.0-internal-testing.5 - '@oclif/core': 2.8.6(@types/node@24.3.0)(typescript@5.9.2) - '@oclif/plugin-autocomplete': 2.3.10(@types/node@24.3.0)(typescript@5.9.2) - '@oclif/plugin-not-found': 2.4.3(@types/node@24.3.0)(typescript@5.9.2) - '@whatwg-node/fetch': 0.8.8 - assemblyscript: 0.19.23 - binary-install-raw: 0.0.13(debug@4.3.4) - chalk: 3.0.0 - chokidar: 3.5.3 - debug: 4.3.4(supports-color@8.1.1) - docker-compose: 0.23.19 - dockerode: 2.5.8 - fs-extra: 9.1.0 - glob: 9.3.5 - gluegun: 5.1.2(debug@4.3.4) - graphql: 15.5.0 - immutable: 4.2.1 - ipfs-http-client: 55.0.0(encoding@0.1.13)(node-fetch@2.7.0(encoding@0.1.13)) - jayson: 4.0.0(bufferutil@4.0.9)(utf-8-validate@5.0.10) - js-yaml: 3.14.1 - prettier: 1.19.1 - request: 2.88.2 - semver: 7.4.0 - sync-request: 6.1.0 - tmp-promise: 3.0.3 - web3-eth-abi: 1.7.0 - which: 2.0.2 - yaml: 1.10.2 - transitivePeerDependencies: - - '@swc/core' - - '@swc/wasm' - - '@types/node' - - bufferutil - - encoding - - node-fetch - - supports-color - - typescript - - utf-8-validate - '@graphprotocol/graph-cli@0.69.0(@types/node@24.3.0)(bufferutil@4.0.9)(encoding@0.1.13)(node-fetch@2.7.0(encoding@0.1.13))(typescript@5.9.2)(utf-8-validate@5.0.10)': dependencies: '@float-capital/float-subgraph-uncrashable': 0.0.0-internal-testing.5 diff --git a/tests/integration-tests/grafted/subgraph.yaml b/tests/integration-tests/grafted/subgraph.yaml index c0435df9c11..6cd33f93006 100644 --- a/tests/integration-tests/grafted/subgraph.yaml +++ b/tests/integration-tests/grafted/subgraph.yaml @@ -26,5 +26,5 @@ dataSources: features: - grafting graft: - base: QmTQbJ234d2Po7xKZS5wKPiYuMYsCAqqY4df5czESjEXn4 - block: 2 \ No newline at end of file + base: '@base@' + block: 2 diff --git a/tests/src/subgraph.rs b/tests/src/subgraph.rs index e1057fccdcb..67513f49ca0 100644 --- a/tests/src/subgraph.rs +++ b/tests/src/subgraph.rs @@ -47,16 +47,41 @@ impl Subgraph { Ok(()) } + /// Patch source subgraph placeholders in the manifest with their deployment hashes. + /// This must be called after `patch()` since it reads from `subgraph.yaml.patched`. + pub fn patch_sources(dir: &TestFile, sources: &[(String, String)]) -> anyhow::Result<()> { + if sources.is_empty() { + return Ok(()); + } + + let patched_path = dir.path.join("subgraph.yaml.patched"); + let mut content = fs::read_to_string(&patched_path)?; + + for (placeholder, deployment_hash) in sources { + let repl = format!("@{}@", placeholder); + content = content.replace(&repl, deployment_hash); + } + + fs::write(&patched_path, content)?; + Ok(()) + } + /// Prepare the subgraph for deployment by patching contracts and checking for subgraph datasources pub async fn prepare( name: &str, contracts: &[Contract], + sources: Option<&[(String, String)]>, ) -> anyhow::Result<(TestFile, String, bool)> { let dir = Self::dir(name); let name = format!("test/{name}"); Self::patch(&dir, contracts).await?; + // Patch source subgraph placeholders if provided + if let Some(sources) = sources { + Self::patch_sources(&dir, sources)?; + } + // Check if subgraph has subgraph datasources let yaml_content = fs::read_to_string(dir.path.join("subgraph.yaml.patched"))?; let yaml: serde_yaml::Value = serde_yaml::from_str(&yaml_content)?; @@ -68,9 +93,15 @@ impl Subgraph { Ok((dir, name, has_subgraph_datasource)) } - /// Deploy the subgraph by running the required `graph` commands - pub async fn deploy(name: &str, contracts: &[Contract]) -> anyhow::Result { - let (dir, name, has_subgraph_datasource) = Self::prepare(name, contracts).await?; + /// Deploy the subgraph by running the required `graph` commands. + /// If `sources` is provided, the deployment hashes will be used to patch + /// source subgraph placeholders (e.g., `@source-subgraph@`) in the manifest. + pub async fn deploy( + name: &str, + contracts: &[Contract], + sources: Option<&[(String, String)]>, + ) -> anyhow::Result { + let (dir, name, has_subgraph_datasource) = Self::prepare(name, contracts, sources).await?; // graph codegen subgraph.yaml let mut prog = Command::new(&CONFIG.graph_cli); diff --git a/tests/tests/integration_tests.rs b/tests/tests/integration_tests.rs index b5c7d3405ca..e47f043bd67 100644 --- a/tests/tests/integration_tests.rs +++ b/tests/tests/integration_tests.rs @@ -9,14 +9,23 @@ //! tasks are really worth parallelizing, and applying this trick //! indiscriminately will only result in messy code and diminishing returns. +use std::collections::HashMap; use std::future::Future; use std::pin::Pin; use std::time::{self, Duration, Instant}; use anyhow::{anyhow, bail, Context, Result}; +use graph::components::subgraph::{ + ProofOfIndexing, ProofOfIndexingEvent, ProofOfIndexingFinisher, ProofOfIndexingVersion, +}; +use graph::data::store::Id; +use graph::entity; use graph::futures03::StreamExt; use graph::itertools::Itertools; use graph::prelude::serde_json::{json, Value}; +use graph::prelude::{alloy::primitives::Address, hex, BlockPtr, DeploymentHash}; +use graph::schema::InputSchema; +use graph::slog::{o, Discard, Logger}; use graph_tests::contract::Contract; use graph_tests::subgraph::Subgraph; use graph_tests::{error, status, CONFIG}; @@ -172,7 +181,7 @@ impl TestCase { contracts: &[Contract], ) -> Result { status!(&self.name, "Deploying subgraph"); - let subgraph_name = match Subgraph::deploy(subgraph_name, contracts).await { + let subgraph_name = match Subgraph::deploy(subgraph_name, contracts, None).await { Ok(name) => name, Err(e) => { error!(&self.name, "Deploy failed"); @@ -199,22 +208,28 @@ impl TestCase { } pub async fn prepare(&self, contracts: &[Contract]) -> anyhow::Result { - // If a subgraph has subgraph datasources, prepare them first - if let Some(_subgraphs) = &self.source_subgraph { - if let Err(e) = self.prepare_multiple_sources(contracts).await { - error!(&self.name, "source subgraph deployment failed: {:?}", e); - return Err(e); + // If a subgraph has subgraph datasources, prepare them first and collect their deployment hashes + let source_mappings = if let Some(_subgraphs) = &self.source_subgraph { + match self.prepare_multiple_sources(contracts).await { + Ok(mappings) => Some(mappings), + Err(e) => { + error!(&self.name, "source subgraph deployment failed: {:?}", e); + return Err(e); + } } - } + } else { + None + }; status!(&self.name, "Preparing subgraph"); - let (_, subgraph_name, _) = match Subgraph::prepare(&self.name, contracts).await { - Ok(name) => name, - Err(e) => { - error!(&self.name, "Prepare failed: {:?}", e); - return Err(e); - } - }; + let (_, subgraph_name, _) = + match Subgraph::prepare(&self.name, contracts, source_mappings.as_deref()).await { + Ok(name) => name, + Err(e) => { + error!(&self.name, "Prepare failed: {:?}", e); + return Err(e); + } + }; Ok(subgraph_name) } @@ -276,45 +291,64 @@ impl TestCase { } } - async fn run(self, contracts: &[Contract]) -> TestResult { - // If a subgraph has subgraph datasources, deploy them first - if let Some(_subgraphs) = &self.source_subgraph { - if let Err(e) = self.deploy_multiple_sources(contracts).await { - error!(&self.name, "source subgraph deployment failed"); - return TestResult { - name: self.name.clone(), - subgraph: None, - status: TestStatus::Err(e), - }; + pub async fn run(self, contracts: &[Contract]) -> TestResult { + // If a subgraph has subgraph datasources, deploy them first and collect their deployment hashes + let source_mappings = if let Some(_subgraphs) = &self.source_subgraph { + match self.deploy_multiple_sources(contracts).await { + Ok(mappings) => Some(mappings), + Err(e) => { + error!(&self.name, "source subgraph deployment failed"); + return TestResult { + name: self.name.clone(), + subgraph: None, + status: TestStatus::Err(e), + }; + } } - } + } else { + None + }; status!(&self.name, "Deploying subgraph"); - let subgraph_name = match Subgraph::deploy(&self.name, contracts).await { - Ok(name) => name, - Err(e) => { - error!(&self.name, "Deploy failed"); - return TestResult { - name: self.name.clone(), - subgraph: None, - status: TestStatus::Err(e.context("Deploy failed")), - }; - } - }; + let subgraph_name = + match Subgraph::deploy(&self.name, contracts, source_mappings.as_deref()).await { + Ok(name) => name, + Err(e) => { + error!(&self.name, "Deploy failed"); + return TestResult { + name: self.name.clone(), + subgraph: None, + status: TestStatus::Err(e.context("Deploy failed")), + }; + } + }; self.check_health_and_test(contracts, subgraph_name).await } - async fn prepare_multiple_sources(&self, contracts: &[Contract]) -> Result<()> { + async fn prepare_multiple_sources( + &self, + contracts: &[Contract], + ) -> Result> { + let mut mappings = Vec::new(); if let Some(sources) = &self.source_subgraph { for source in sources { - let _ = Subgraph::prepare(source.test_name(), contracts).await?; + // Source subgraphs don't have their own sources, so pass None + let _ = Subgraph::prepare(source.test_name(), contracts, None).await?; + // If the source has an alias (pre-known IPFS hash), use it for the mapping + if let Some(alias) = source.alias() { + mappings.push((source.test_name().to_string(), alias.to_string())); + } } } - Ok(()) + Ok(mappings) } - async fn deploy_multiple_sources(&self, contracts: &[Contract]) -> Result<()> { + async fn deploy_multiple_sources( + &self, + contracts: &[Contract], + ) -> Result> { + let mut mappings = Vec::new(); if let Some(sources) = &self.source_subgraph { for source in sources { let subgraph = self.deploy_and_wait(source.test_name(), contracts).await?; @@ -323,9 +357,11 @@ impl TestCase { "Source subgraph deployed with hash {}", subgraph.deployment ); + // Use the test_name as the placeholder key + mappings.push((source.test_name().to_string(), subgraph.deployment.clone())); } } - Ok(()) + Ok(mappings) } } @@ -892,21 +928,22 @@ async fn test_subgraph_grafting(ctx: TestContext) -> anyhow::Result<()> { assert!(subgraph.healthy); + // Fixed block hashes from deterministic Anvil config let block_hashes: Vec<&str> = vec![ "e26fccbd24dcc76074b432becf29cad3bcba11a8467a7b770fad109c2b5d14c2", "249dbcbee975c22f8c9cc937536945ca463568c42d8933a3f54129dec352e46b", "408675f81c409dede08d0eeb2b3420a73b067c4fa8c5f0fc49ce369289467c33", ]; - let pois: Vec<&str> = vec![ - "0x606c1ed77564ef9ab077e0438da9f3c6af79a991603aecf74650971a88d05b65", - "0xbb21d5cf5fd62892159f95211da4a02f0dfa1b43d68aeb64baa52cc67fbb6c8e", - "0x5a01b371017c924e8cedd62a76cf8dcf05987f80d2b91aaf3fb57872ab75887f", - ]; + // The deployment hash is dynamic (depends on the base subgraph's hash) + let deployment_hash = DeploymentHash::new(&subgraph.deployment).unwrap(); + + // Compute the expected POI values dynamically + let expected_pois = compute_expected_pois(&deployment_hash, &block_hashes); for i in 1..4 { let block_hash = get_block_hash(i).await.unwrap(); - // We need to make sure that the preconditions for POI are fulfiled + // We need to make sure that the preconditions for POI are fulfilled // namely that the blockchain produced the proper block hashes for the // blocks of which we will check the POI. assert_eq!(block_hash, block_hashes[(i - 1) as usize]); @@ -938,12 +975,146 @@ async fn test_subgraph_grafting(ctx: TestContext) -> anyhow::Result<()> { // Change on the block #2 would mean a change in the transitioning // from the old to the new algorithm hence would be reflected only // subgraphs that are grafting from pre 0.0.5 to 0.0.6 or newer. - assert_eq!(poi, pois[(i - 1) as usize]); + assert_eq!( + poi, + expected_pois[(i - 1) as usize], + "POI mismatch for block {}", + i + ); } Ok(()) } +/// Compute the expected POI values for the grafted subgraph. +/// +/// The grafted subgraph: +/// - Spec version 0.0.6 (uses Fast POI algorithm) +/// - Grafts from base subgraph at block 2 +/// - Creates GraftedData entities starting from block 3 +/// +/// The base subgraph: +/// - Spec version 0.0.5 (uses Legacy POI algorithm) +/// - Creates BaseData entities for each block +/// +/// POI algorithm transition: +/// - Blocks 0-2: Legacy POI digests (from base subgraph) +/// - Block 3+: Fast POI algorithm with transition from Legacy +fn compute_expected_pois(deployment_hash: &DeploymentHash, block_hashes: &[&str]) -> Vec { + let logger = Logger::root(Discard, o!()); + let causality_region = "ethereum/test"; + + // Create schemas for the entity types + let base_schema = InputSchema::parse_latest( + "type BaseData @entity(immutable: true) { id: ID!, data: String!, blockNumber: BigInt! }", + deployment_hash.clone(), + ) + .unwrap(); + + let grafted_schema = InputSchema::parse_latest( + "type GraftedData @entity(immutable: true) { id: ID!, data: String!, blockNumber: BigInt! }", + deployment_hash.clone(), + ) + .unwrap(); + + // Compute POI digests at each block checkpoint + // Store the accumulated state after each block so we can compute POI at any block + let mut db_at_block: HashMap>> = HashMap::new(); + let mut db: HashMap> = HashMap::new(); + + // Process blocks 0-3: + // - Blocks 0-2: Legacy POI (from base subgraph, creates BaseData entities) + // - Block 3: Fast POI (grafted subgraph starts here, creates GraftedData entity) + // + // The base subgraph starts from block 0 (genesis block triggers handlers in Anvil). + // + // The grafted subgraph: + // - spec version 0.0.6 → uses Fast POI algorithm + // - grafts from base subgraph at block 2 + // - inherits POI digests from base for blocks 0-2 + // - transitions from Legacy to Fast at block 3 + for block_i in 0..=3i32 { + let version = if block_i <= 2 { + ProofOfIndexingVersion::Legacy + } else { + ProofOfIndexingVersion::Fast + }; + + let mut stream = ProofOfIndexing::new(block_i, version); + + if block_i <= 2 { + // Base subgraph creates BaseData + let id_str = block_i.to_string(); + let entity = entity! { + base_schema => + id: &id_str, + data: "from base", + blockNumber: graph::prelude::Value::BigInt(block_i.into()), + }; + + let event = ProofOfIndexingEvent::SetEntity { + entity_type: "BaseData", + id: &id_str, + data: &entity, + }; + stream.write(&logger, causality_region, &event); + } else { + // Grafted subgraph creates GraftedData + let id_str = block_i.to_string(); + let entity = entity! { + grafted_schema => + id: &id_str, + data: "to grafted", + blockNumber: graph::prelude::Value::BigInt(block_i.into()), + }; + + let event = ProofOfIndexingEvent::SetEntity { + entity_type: "GraftedData", + id: &id_str, + data: &entity, + }; + stream.write(&logger, causality_region, &event); + } + + for (name, region) in stream.take() { + let prev = db.get(&name); + let update = region.pause(prev.map(|v| &v[..])); + db.insert(name, update); + } + + db_at_block.insert(block_i, db.clone()); + } + + // Compute POI for blocks 1, 2, 3 + let mut pois = Vec::new(); + for (block_idx, block_hash_hex) in block_hashes.iter().enumerate() { + let block_number = (block_idx + 1) as i32; + + // Get the POI version for this block - grafted subgraph uses Fast (spec 0.0.6) + let version = ProofOfIndexingVersion::Fast; + + let block_hash_bytes = hex::decode(block_hash_hex).unwrap(); + let block_ptr = BlockPtr::from((block_hash_bytes, block_number as u64)); + + // Use zero address to match the test query's indexer parameter + let indexer = Some(Address::ZERO); + let mut finisher = + ProofOfIndexingFinisher::new(&block_ptr, deployment_hash, &indexer, version); + + if let Some(db_state) = db_at_block.get(&block_number) { + for (name, region) in db_state.iter() { + finisher.add_causality_region(name, region); + } + } + + let poi_bytes = finisher.finish(); + let poi = format!("0x{}", hex::encode(poi_bytes)); + pois.push(poi); + } + + pois +} + async fn test_poi_for_failed_subgraph(ctx: TestContext) -> anyhow::Result<()> { let subgraph = ctx.subgraph; const INDEXING_STATUS: &str = r#" From 7154bfa23b7fc9b73b722ab43dbed977f164c5a3 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Fri, 23 Jan 2026 10:13:28 -0800 Subject: [PATCH 03/60] tests: Remove staleness check for compiled contracts Git doesn't preserve mtimes, and this check just checks whether the source or the compiled contract was checked out first. --- tests/src/contract.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/tests/src/contract.rs b/tests/src/contract.rs index 80a9ba57031..5ef59bc7eed 100644 --- a/tests/src/contract.rs +++ b/tests/src/contract.rs @@ -75,14 +75,7 @@ impl Contract { } fn code_and_abi(name: &str) -> anyhow::Result<(String, Vec)> { - let src = TestFile::new(&format!("contracts/src/{}.sol", name)); let bin = TestFile::new(&format!("contracts/out/{}.sol/{}.json", name, name)); - if src.newer(&bin) { - println!( - "The source {} is newer than the compiled contract {}. Please recompile.", - src, bin - ); - } let json: Value = serde_json::from_reader(bin.reader()?).unwrap(); let abi = serde_json::to_string(&json["abi"]).unwrap(); From 49f8d2acf43557f275b7cbd40a6de644ecebf037 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sun, 18 Jan 2026 18:12:24 -0800 Subject: [PATCH 04/60] Spec for reimplementing TS graph-cli in Rust --- docs/specs/gnd-cli-expansion.md | 717 ++++++++++++++++++++++++++++++++ 1 file changed, 717 insertions(+) create mode 100644 docs/specs/gnd-cli-expansion.md diff --git a/docs/specs/gnd-cli-expansion.md b/docs/specs/gnd-cli-expansion.md new file mode 100644 index 00000000000..a9270a7c845 --- /dev/null +++ b/docs/specs/gnd-cli-expansion.md @@ -0,0 +1,717 @@ +# Spec: gnd CLI Expansion + +This spec describes the expansion of `gnd` (Graph Node Dev) to include functionality currently provided by the TypeScript-based `graph-cli`. The goal is a drop-in replacement for subgraph development workflows, implemented in Rust and integrated with graph-node's existing infrastructure. + +## Goals + +- **Drop-in replacement**: Same commands, flags, output format, and exit codes as `graph-cli` +- **Identical code generation**: AssemblyScript output must be byte-for-byte identical after formatting +- **Ethereum only**: Initial scope limited to Ethereum protocol +- **Reuse graph-node internals**: Leverage existing manifest parsing, validation, IPFS client, etc. + +## Non-Goals + +- Multi-protocol support (NEAR, Cosmos, Arweave, Substreams, Subgraph) - future work +- Rollout/migration plan from TS CLI +- Performance optimization beyond reasonable behavior + +## Commands + +### Command Matrix + +| Command | TS CLI | gnd | Notes | +|---------|--------|-----|-------| +| `codegen` | Yes | Yes | Generate AssemblyScript types | +| `build` | Yes | Yes | Compile to WASM | +| `deploy` | Yes | Yes | Deploy to Graph Node | +| `init` | Yes | Yes | Scaffold new subgraph | +| `add` | Yes | Yes | Add datasource to existing subgraph | +| `remove` | Yes | Yes | Unregister subgraph name | +| `create` | Yes | Yes | Register subgraph name | +| `auth` | Yes | Yes | Set deploy key | +| `publish` | Yes | Yes | Publish to decentralized network | +| `test` | Yes | Yes | Run Matchstick tests | +| `clean` | Yes | Yes | Remove build artifacts | +| `dev` | No | Yes | Run graph-node in dev mode (existing gnd) | +| `local` | Yes | No | Skipped - use existing test infrastructure | +| `node` | Yes | No | Skipped - use graphman for node management | + +### Known Differences from TS CLI + +1. **`local` command**: Not implemented. Users should use existing integration test infrastructure. +2. **`node` subcommand**: Not implemented. Use `graphman` for node management operations. +3. **`--uncrashable` flag on codegen**: Not implemented. Float Capital's uncrashable helper generation is a niche third-party feature. +4. **Debug output**: Uses `RUST_LOG` environment variable instead of `DEBUG=graph-cli:*`. + +## CLI Interface + +### Binary and Invocation + +``` +gnd [options] [arguments] +``` + +The binary name is `gnd`. All graph-cli commands become gnd subcommands. + +### Version Output + +``` +$ gnd --version +gnd 0.1.0 (graph-cli compatible: 0.98.1) +``` + +Shows both gnd version and the graph-cli version it emulates. + +### Flag Compatibility + +All flags must match the TS CLI exactly: +- Same long names (`--output-dir`) +- Same short names (`-o`) +- Same defaults +- Same validation behavior + +Reference: Each command section below lists flags with references to TS CLI source. + +### Output Format + +Output must match TS CLI format exactly, including: +- Spinner/progress indicators +- Success checkmarks (`✔`) +- Step descriptions +- File paths displayed +- Error formatting (information must match; exact wording may differ) + +Reference: `/packages/cli/src/command-helpers/spinner.ts` + +### Exit Codes + +Exit codes must match TS CLI behavior: +- `0`: Success +- `1`: Error (validation, compilation, deployment failure, etc.) + +### Configuration Files + +Use same paths and formats as TS CLI: + +| File | Path | Purpose | +|------|------|---------| +| Auth tokens | `~/.graphprotocol/` | Deploy keys and access tokens | +| Network config | `networks.json` (project root) | Network-specific addresses | + +Reference: `/packages/cli/src/command-helpers/auth.ts` + +## Command Specifications + +### `gnd codegen` + +Generates AssemblyScript types from subgraph manifest. + +**Usage:** +``` +gnd codegen [subgraph-manifest] +``` + +**Arguments:** +- `subgraph-manifest`: Path to manifest file (default: `subgraph.yaml`) + +**Flags:** +| Flag | Short | Default | Description | +|------|-------|---------|-------------| +| `--output-dir` | `-o` | `generated/` | Output directory for generated types | +| `--skip-migrations` | | `false` | Skip subgraph migrations | +| `--watch` | `-w` | `false` | Regenerate on file changes | +| `--ipfs` | `-i` | `https://api.thegraph.com/ipfs/` | IPFS node URL | +| `--help` | `-h` | | Show help | + +**Behavior:** +1. Load and validate manifest +2. Apply migrations (unless `--skip-migrations`) +3. Assert minimum API version (0.0.5) and graph-ts version (0.25.0) +4. Generate entity classes from GraphQL schema +5. Generate ABI bindings for each contract +6. Generate template datasource bindings +7. Format output with prettier +8. Write to output directory + +**Output Structure:** +``` +generated/ +├── schema.ts # Entity classes +├── / +│ └── .ts # ABI bindings +└── templates/ + └── / + └── .ts # Template ABI bindings +``` + +**TS CLI References:** +- Command: `/packages/cli/src/commands/codegen.ts` +- Type generator: `/packages/cli/src/type-generator.ts` +- Schema codegen: `/packages/cli/src/codegen/schema.ts` +- ABI codegen: `/packages/cli/src/protocols/ethereum/codegen/abi.ts` + +### `gnd build` + +Compiles subgraph to WASM. + +**Usage:** +``` +gnd build [subgraph-manifest] +``` + +**Arguments:** +- `subgraph-manifest`: Path to manifest file (default: `subgraph.yaml`) + +**Flags:** +| Flag | Short | Default | Description | +|------|-------|---------|-------------| +| `--output-dir` | `-o` | `build/` | Output directory | +| `--output-format` | `-t` | `wasm` | Output format: `wasm` or `wast` | +| `--skip-migrations` | | `false` | Skip subgraph migrations | +| `--watch` | `-w` | `false` | Rebuild on file changes | +| `--ipfs` | `-i` | | IPFS node URL (uploads if provided) | +| `--network` | | | Network to use from networks.json | +| `--network-file` | | `networks.json` | Path to networks config | +| `--help` | `-h` | | Show help | + +**Behavior:** +1. Run codegen (unless types already exist) +2. Apply migrations (unless `--skip-migrations`) +3. Resolve network-specific values from networks.json +4. Shell out to `asc` (AssemblyScript compiler) for each mapping +5. Copy ABIs and schema to build directory +6. Generate build manifest +7. Optionally upload to IPFS + +**TS CLI References:** +- Command: `/packages/cli/src/commands/build.ts` +- Compiler: `/packages/cli/src/compiler/index.ts` + +### `gnd deploy` + +Deploys subgraph to a Graph Node. + +**Usage:** +``` +gnd deploy [subgraph-name] [subgraph-manifest] +``` + +**Arguments:** +- `subgraph-name`: Name to deploy as (e.g., `user/subgraph`) +- `subgraph-manifest`: Path to manifest file (default: `subgraph.yaml`) + +**Flags:** +| Flag | Short | Default | Description | +|------|-------|---------|-------------| +| `--product` | | | Product: `subgraph-studio` or `hosted-service` | +| `--studio` | | `false` | Shorthand for `--product subgraph-studio` | +| `--node` | `-g` | | Graph Node URL | +| `--ipfs` | `-i` | | IPFS node URL | +| `--access-token` | | | Access token for authentication | +| `--deploy-key` | | | Deploy key (alias for access-token) | +| `--version-label` | `-l` | | Version label | +| `--headers` | | | Additional HTTP headers (JSON) | +| `--debug-fork` | | | Fork subgraph for debugging | +| `--skip-migrations` | | `false` | Skip subgraph migrations | +| `--network` | | | Network from networks.json | +| `--network-file` | | `networks.json` | Path to networks config | +| `--output-dir` | `-o` | `build/` | Build output directory | +| `--help` | `-h` | | Show help | + +**Deploy Targets:** +- Local Graph Node (via `--node` and `--ipfs`) +- Subgraph Studio (`--product subgraph-studio` or `--studio`) +- Hosted Service (`--product hosted-service`) +- Decentralized network (via `publish` command) + +**Behavior:** +1. Build subgraph (runs build command) +2. Upload build artifacts to IPFS +3. Send deployment request to Graph Node via JSON-RPC + +**TS CLI References:** +- Command: `/packages/cli/src/commands/deploy.ts` + +### `gnd init` + +Scaffolds a new subgraph project. + +**Usage:** +``` +gnd init [directory] +``` + +**Arguments:** +- `directory`: Directory to create subgraph in + +**Flags:** +| Flag | Short | Default | Description | +|------|-------|---------|-------------| +| `--protocol` | | | Protocol: `ethereum` | +| `--product` | | | Product for deployment | +| `--studio` | | `false` | Initialize for Subgraph Studio | +| `--from-contract` | | | Contract address to generate from | +| `--from-example` | | | Example subgraph to clone | +| `--contract-name` | | | Name for the contract | +| `--index-events` | | `false` | Index all contract events | +| `--start-block` | | | Start block for indexing | +| `--network` | | `mainnet` | Network name | +| `--abi` | | | Path to ABI file | +| `--spkg` | | | Path to Substreams package | +| `--allow-simple-name` | | `false` | Allow simple subgraph names | +| `--help` | `-h` | | Show help | + +**Behavior:** +1. Prompt for missing information (protocol, network, contract, etc.) +2. Fetch ABI from Etherscan/Sourcify if `--from-contract` and no `--abi` +3. Generate scaffold: + - `subgraph.yaml` manifest + - `schema.graphql` with entities for events + - `src/mapping.ts` with event handlers + - `package.json` with dependencies + - `tsconfig.json` + - ABIs directory +4. Optionally initialize git repository +5. Install dependencies + +**External APIs:** +- Etherscan API: Fetch verified contract ABIs +- Sourcify API: Fetch verified contract ABIs (fallback) +- Network registry: `@pinax/graph-networks-registry` for chain configuration + +**TS CLI References:** +- Command: `/packages/cli/src/commands/init.ts` +- Scaffold: `/packages/cli/src/scaffold/index.ts` +- Schema generation: `/packages/cli/src/scaffold/schema.ts` +- Mapping generation: `/packages/cli/src/scaffold/mapping.ts` +- Etherscan client: `/packages/cli/src/command-helpers/contracts.ts` + +### `gnd add` + +Adds a new datasource to an existing subgraph. + +**Usage:** +``` +gnd add
[subgraph-manifest] +``` + +**Arguments:** +- `address`: Contract address +- `subgraph-manifest`: Path to manifest file (default: `subgraph.yaml`) + +**Flags:** +| Flag | Short | Default | Description | +|------|-------|---------|-------------| +| `--abi` | | | Path to ABI file | +| `--contract-name` | | | Name for the contract | +| `--merge-entities` | | `false` | Merge with existing entities | +| `--network-file` | | `networks.json` | Path to networks config | +| `--start-block` | | | Start block | +| `--help` | `-h` | | Show help | + +**TS CLI References:** +- Command: `/packages/cli/src/commands/add.ts` + +### `gnd create` + +Registers a subgraph name with a Graph Node. + +**Usage:** +``` +gnd create +``` + +**Arguments:** +- `subgraph-name`: Name to register + +**Flags:** +| Flag | Short | Default | Description | +|------|-------|---------|-------------| +| `--node` | `-g` | | Graph Node URL | +| `--access-token` | | | Access token | +| `--help` | `-h` | | Show help | + +**TS CLI References:** +- Command: `/packages/cli/src/commands/create.ts` + +### `gnd remove` + +Unregisters a subgraph name from a Graph Node. + +**Usage:** +``` +gnd remove +``` + +**Arguments:** +- `subgraph-name`: Name to unregister + +**Flags:** +| Flag | Short | Default | Description | +|------|-------|---------|-------------| +| `--node` | `-g` | | Graph Node URL | +| `--access-token` | | | Access token | +| `--help` | `-h` | | Show help | + +**TS CLI References:** +- Command: `/packages/cli/src/commands/remove.ts` + +### `gnd auth` + +Sets the deploy key for a Graph Node. + +**Usage:** +``` +gnd auth +``` + +**Arguments:** +- `deploy-key`: Deploy key to store + +**Flags:** +| Flag | Short | Default | Description | +|------|-------|---------|-------------| +| `--product` | | | Product: `subgraph-studio` or `hosted-service` | +| `--studio` | | `false` | Shorthand for subgraph-studio | +| `--help` | `-h` | | Show help | + +**Behavior:** +Stores deploy key in `~/.graphprotocol/` for later use by deploy/publish commands. + +**TS CLI References:** +- Command: `/packages/cli/src/commands/auth.ts` +- Auth helpers: `/packages/cli/src/command-helpers/auth.ts` + +### `gnd publish` + +Publishes a subgraph to The Graph's decentralized network. + +**Usage:** +``` +gnd publish [subgraph-manifest] +``` + +**Arguments:** +- `subgraph-manifest`: Path to manifest file (default: `subgraph.yaml`) + +**Flags:** +| Flag | Short | Default | Description | +|------|-------|---------|-------------| +| `--subgraph-id` | | | Subgraph ID to publish to | +| `--ipfs` | `-i` | | IPFS node URL | +| `--protocol-network` | | | Protocol network (e.g., `arbitrum-one`) | +| `--help` | `-h` | | Show help | + +**TS CLI References:** +- Command: `/packages/cli/src/commands/publish.ts` + +### `gnd test` + +Runs Matchstick tests for the subgraph. + +**Usage:** +``` +gnd test [datasource] +``` + +**Arguments:** +- `datasource`: Specific datasource to test (optional) + +**Flags:** +| Flag | Short | Default | Description | +|------|-------|---------|-------------| +| `--coverage` | `-c` | `false` | Run with coverage | +| `--docker` | `-d` | `false` | Run in Docker container | +| `--force` | `-f` | `false` | Force recompilation | +| `--logs` | `-l` | `false` | Show logs | +| `--recompile` | `-r` | `false` | Recompile before testing | +| `--version` | `-v` | | Matchstick version | +| `--help` | `-h` | | Show help | + +**Behavior:** +1. Download Matchstick binary (if not present) +2. Shell out to Matchstick with appropriate flags +3. Report test results + +**TS CLI References:** +- Command: `/packages/cli/src/commands/test.ts` + +### `gnd clean` + +Removes build artifacts and generated files. + +**Usage:** +``` +gnd clean +``` + +**Flags:** +| Flag | Short | Default | Description | +|------|-------|---------|-------------| +| `--codegen-dir` | | `generated/` | Codegen output directory | +| `--build-dir` | | `build/` | Build output directory | +| `--help` | `-h` | | Show help | + +**Behavior:** +Removes `generated/` and `build/` directories (or custom paths if specified). + +**TS CLI References:** +- Command: `/packages/cli/src/commands/clean.ts` + +### `gnd dev` + +Runs graph-node in development mode with file watching. + +This is the existing `gnd` functionality, preserved as a subcommand. The implementation can be adjusted to fit the new subcommand structure. + +**Usage:** +``` +gnd dev [options] +``` + +**Flags:** +Preserve existing gnd flags, adjusted as needed for subcommand structure. + +## Code Generation + +Code generation is the most complex component and must produce byte-for-byte identical output to the TS CLI (after prettier formatting). + +### Generated File Types + +#### 1. Entity Classes (`schema.ts`) + +Generated from GraphQL schema. Each entity type becomes an AssemblyScript class with: +- Constructor +- Static `load(id)` method +- Static `loadInBlock(id)` method +- `save()` method +- Getters/setters for each field +- Proper type mappings (GraphQL → AssemblyScript) + +**TS CLI Reference:** `/packages/cli/src/codegen/schema.ts` + +#### 2. ABI Bindings (`.ts`) + +Generated from contract ABI. Includes: +- Event classes with typed parameters +- Function call result classes +- Contract class with typed call methods +- Proper Ethereum type mappings + +**TS CLI Reference:** `/packages/cli/src/protocols/ethereum/codegen/abi.ts` + +#### 3. Template Bindings + +Generated for template datasources with the same structure as ABI bindings. + +**TS CLI Reference:** `/packages/cli/src/codegen/template.ts` + +### Type Mappings + +#### GraphQL → AssemblyScript + +| GraphQL | AssemblyScript | +|---------|----------------| +| `ID` | `string` | +| `String` | `string` | +| `Int` | `i32` | +| `BigInt` | `BigInt` | +| `BigDecimal` | `BigDecimal` | +| `Bytes` | `Bytes` | +| `Boolean` | `boolean` | +| `[T]` | `Array` | +| Entity reference | `string` (ID) | + +**TS CLI Reference:** `/packages/cli/src/codegen/schema.ts` (look for type mapping functions) + +#### Ethereum ABI → AssemblyScript + +| Solidity | AssemblyScript | +|----------|----------------| +| `address` | `Address` | +| `bool` | `boolean` | +| `bytes` | `Bytes` | +| `bytesN` | `Bytes` | +| `intN` | `BigInt` | +| `uintN` | `BigInt` | +| `string` | `string` | +| `T[]` | `Array` | +| tuple | Generated class | + +**TS CLI Reference:** `/packages/cli/src/protocols/ethereum/codegen/abi.ts` + +### API Version Handling + +Different `apiVersion` values in the manifest affect code generation. gnd must support all versions that the TS CLI supports. + +**TS CLI Reference:** `/packages/cli/src/codegen/` (version-specific logic throughout) + +### Formatting + +All generated code must be formatted with prettier before writing: +- Shell out to `prettier` with same configuration as TS CLI +- Parser: `typescript` + +## Migrations + +Migrations update older manifest formats to newer versions. gnd must implement all migrations that TS CLI supports. + +**Migration Chain:** +``` +0.0.1 → 0.0.2 → 0.0.3 → 0.0.4 → 0.0.5 → ... → current +``` + +**TS CLI Reference:** `/packages/cli/src/migrations/` + +Each migration is a transformation function that: +1. Checks manifest version +2. Applies necessary changes +3. Updates version number + +## External Dependencies + +### Runtime Dependencies (shell out) + +| Tool | Purpose | Required | +|------|---------|----------| +| `asc` | AssemblyScript compiler | For `build` | +| `prettier` | Code formatting | For `codegen` | +| `matchstick` | Test runner | For `test` | + +### Network APIs + +| API | Purpose | +|-----|---------| +| Etherscan | Fetch verified contract ABIs | +| Sourcify | Fetch verified contract ABIs (fallback) | +| `@pinax/graph-networks-registry` | Network configuration (chain IDs, etc.) | + +The network registry should be fetched at runtime to get current network configurations. + +### graph-node Reuse + +| Component | graph-node Location | Purpose | +|-----------|---------------------|---------| +| Manifest parsing | `graph/src/data/subgraph/` | Load subgraph.yaml | +| Manifest validation | `graph/src/data/subgraph/` | Validate manifest structure | +| GraphQL schema | `graph/src/schema/input/` | Parse schema.graphql | +| IPFS client | `graph/src/ipfs/` | Upload to IPFS | +| Link resolver | `graph/src/components/link_resolver/` | Resolve file references | +| File watcher | `gnd/src/watcher.rs` | Watch mode | + +Refactor graph-node components as needed to make them reusable. + +## Module Structure + +``` +gnd/src/ +├── main.rs # Entry point, clap setup +├── lib.rs +├── commands/ +│ ├── mod.rs +│ ├── codegen.rs +│ ├── build.rs +│ ├── deploy.rs +│ ├── init.rs +│ ├── add.rs +│ ├── create.rs +│ ├── remove.rs +│ ├── auth.rs +│ ├── publish.rs +│ ├── test.rs +│ ├── clean.rs +│ └── dev.rs # Existing gnd functionality +├── codegen/ +│ ├── mod.rs +│ ├── schema.rs # Entity class generation +│ ├── abi.rs # ABI binding generation +│ └── template.rs # Template binding generation +├── scaffold/ +│ ├── mod.rs +│ ├── manifest.rs # Generate subgraph.yaml +│ ├── schema.rs # Generate schema.graphql +│ └── mapping.rs # Generate mapping.ts +├── migrations/ +│ ├── mod.rs +│ └── ... # One module per migration +├── compiler/ +│ ├── mod.rs +│ └── asc.rs # Shell out to asc +├── services/ +│ ├── mod.rs +│ ├── etherscan.rs # Etherscan API client +│ ├── sourcify.rs # Sourcify API client +│ ├── registry.rs # Network registry client +│ └── graph_node.rs # Graph Node JSON-RPC client +├── config/ +│ ├── mod.rs +│ ├── auth.rs # ~/.graphprotocol/ management +│ └── networks.rs # networks.json handling +├── output/ +│ ├── mod.rs +│ └── spinner.rs # Progress/spinner output +└── watcher.rs # Existing file watcher +``` + +## Testing + +### Test Strategy + +1. **Unit tests**: Test individual functions (type mappings, migrations, etc.) +2. **Snapshot tests**: Compare generated output against TS CLI output +3. **Integration tests**: End-to-end command execution + +### Test Corpus + +Use the same test corpus as graph-cli. Tests must cover at least everything the TS CLI tests cover. + +**TS CLI Test Location:** `/packages/cli/tests/` + +### Snapshot Testing for Code Generation + +For each test subgraph: +1. Run TS CLI codegen, capture output +2. Run gnd codegen, capture output +3. Assert outputs are identical + +This ensures byte-for-byte compatibility. + +### Edge Cases + +When edge case bugs are discovered in the TS CLI, gnd should fix them rather than replicate them. Document any behavioral differences that result from bug fixes. + +## Dependencies to Add + +### Cargo.toml additions + +```toml +[dependencies] +# CLI framework (already present) +clap = { version = "...", features = ["derive"] } + +# Interactive prompts (for init) +inquire = "..." + +# HTTP client (for Etherscan, Sourcify, registry) +reqwest = { version = "...", features = ["json"] } + +# Progress/spinner output +indicatif = "..." + +# Template rendering (for scaffold) +minijinja = "..." + +# JSON handling +serde_json = "..." +``` + +## Open Questions + +None at this time. All major decisions have been made. + +## References + +- TS CLI repository: https://github.com/graphprotocol/graph-tooling +- TS CLI source: `/packages/cli/src/` +- Local checkout: `/home/lutter/code/subgraphs/graph-cli` +- Original gnd expansion plan: `/LOCAL/plans/gnd-cli-expansion.md` From 215e8042ca0433b5d2043e65aeae7ed41a40579b Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sun, 18 Jan 2026 18:25:15 -0800 Subject: [PATCH 05/60] gnd: Restructure CLI with subcommands for graph-cli compatibility - Move dev command logic to commands/dev.rs module - Add subcommand structure with stubs for all graph-cli commands: codegen, build, deploy, init, add, create, remove, auth, publish, test, clean - Add version output showing graph-cli compatibility version (0.98.1) - Preserve all existing dev command functionality and options This is Phase 1 of the gnd CLI expansion to become a drop-in replacement for the TypeScript graph-cli. Co-Authored-By: Claude Opus 4.5 --- gnd/src/commands/dev.rs | 304 +++++++++++++++++++++++++++++++ gnd/src/commands/mod.rs | 3 + gnd/src/lib.rs | 1 + gnd/src/main.rs | 393 ++++++++++++---------------------------- 4 files changed, 421 insertions(+), 280 deletions(-) create mode 100644 gnd/src/commands/dev.rs create mode 100644 gnd/src/commands/mod.rs diff --git a/gnd/src/commands/dev.rs b/gnd/src/commands/dev.rs new file mode 100644 index 00000000000..ae5c57de111 --- /dev/null +++ b/gnd/src/commands/dev.rs @@ -0,0 +1,304 @@ +use std::{path::Path, sync::Arc}; + +use anyhow::{Context, Result}; +use clap::Parser; +use graph::{ + components::link_resolver::FileLinkResolver, + env::EnvVars, + prelude::{CheapClone, DeploymentHash, LinkResolver, SubgraphName}, + slog::{error, info, Logger}, +}; +use graph_core::polling_monitor::ipfs_service; +use graph_node::{launcher, opt::Opt}; +use tokio::sync::mpsc; +use tokio_util::sync::CancellationToken; + +use crate::watcher::{deploy_all_subgraphs, parse_manifest_args, watch_subgraphs}; + +#[cfg(unix)] +use pgtemp::{PgTempDB, PgTempDBBuilder}; + +// Add an alias for the temporary Postgres DB handle. On non unix +// targets we don't have pgtemp, but we still need the type to satisfy the +// function signatures. +#[cfg(unix)] +type TempPgDB = PgTempDB; +#[cfg(not(unix))] +type TempPgDB = (); + +#[derive(Clone, Debug, Parser)] +#[clap(about = "Run graph-node in dev mode")] +pub struct DevOpt { + #[clap( + long, + help = "Start a graph-node in dev mode watching a build directory for changes" + )] + pub watch: bool, + + #[clap( + long, + value_name = "MANIFEST:[BUILD_DIR]", + help = "The location of the subgraph manifest file. If no build directory is provided, the default is 'build'. The file can be an alias, in the format '[BUILD_DIR:]manifest' where 'manifest' is the path to the manifest file, and 'BUILD_DIR' is the path to the build directory relative to the manifest file.", + default_value = "./subgraph.yaml", + value_delimiter = ',' + )] + pub manifests: Vec, + + #[clap( + long, + value_name = "ALIAS:MANIFEST:[BUILD_DIR]", + value_delimiter = ',', + help = "The location of the source subgraph manifest files. This is used to resolve aliases in the manifest files for subgraph data sources. The format is ALIAS:MANIFEST:[BUILD_DIR], where ALIAS is the alias name, BUILD_DIR is the build directory relative to the manifest file, and MANIFEST is the manifest file location." + )] + pub sources: Vec, + + #[clap( + long, + help = "The location of the database directory.", + default_value = "./build" + )] + pub database_dir: String, + + #[clap( + long, + value_name = "URL", + env = "POSTGRES_URL", + help = "Location of the Postgres database used for storing entities" + )] + pub postgres_url: Option, + + #[clap( + long, + allow_negative_numbers = false, + value_name = "NETWORK_NAME:[CAPABILITIES]:URL", + env = "ETHEREUM_RPC", + help = "Ethereum network name (e.g. 'mainnet'), optional comma-seperated capabilities (eg 'full,archive'), and an Ethereum RPC URL, separated by a ':'" + )] + pub ethereum_rpc: Vec, + + #[clap( + long, + value_name = "HOST:PORT", + env = "IPFS", + help = "HTTP addresses of IPFS servers (RPC, Gateway)", + default_value = "https://api.thegraph.com/ipfs" + )] + pub ipfs: Vec, + #[clap( + long, + default_value = "8000", + value_name = "PORT", + help = "Port for the GraphQL HTTP server", + env = "GRAPH_GRAPHQL_HTTP_PORT" + )] + pub http_port: u16, + #[clap( + long, + default_value = "8030", + value_name = "PORT", + help = "Port for the index node server" + )] + pub index_node_port: u16, + #[clap( + long, + default_value = "8020", + value_name = "PORT", + help = "Port for the JSON-RPC admin server" + )] + pub admin_port: u16, + #[clap( + long, + default_value = "8040", + value_name = "PORT", + help = "Port for the Prometheus metrics server" + )] + pub metrics_port: u16, +} + +/// Builds the Graph Node options from DevOpt +fn build_args(dev_opt: &DevOpt, db_url: &str) -> Result { + let mut args = vec!["gnd".to_string()]; + + if !dev_opt.ipfs.is_empty() { + args.push("--ipfs".to_string()); + args.push(dev_opt.ipfs.join(",")); + } + + if !dev_opt.ethereum_rpc.is_empty() { + args.push("--ethereum-rpc".to_string()); + args.push(dev_opt.ethereum_rpc.join(",")); + } + + args.push("--postgres-url".to_string()); + args.push(db_url.to_string()); + + let mut opt = Opt::parse_from(args); + + opt.http_port = dev_opt.http_port; + opt.admin_port = dev_opt.admin_port; + opt.metrics_port = dev_opt.metrics_port; + opt.index_node_port = dev_opt.index_node_port; + + Ok(opt) +} + +async fn run_graph_node( + logger: &Logger, + opt: Opt, + link_resolver: Arc, + subgraph_updates_channel: mpsc::Receiver<(DeploymentHash, SubgraphName)>, + cancel_token: CancellationToken, +) -> Result<()> { + let env_vars = Arc::new(EnvVars::from_env().context("Failed to load environment variables")?); + + let (prometheus_registry, metrics_registry) = launcher::setup_metrics(logger); + + let ipfs_client = graph::ipfs::new_ipfs_client(&opt.ipfs, &metrics_registry, logger) + .await + .unwrap_or_else(|err| panic!("Failed to create IPFS client: {err:#}")); + + let ipfs_service = ipfs_service( + ipfs_client.cheap_clone(), + env_vars.mappings.max_ipfs_file_bytes, + env_vars.mappings.ipfs_timeout, + env_vars.mappings.ipfs_request_limit, + ); + + launcher::run( + logger.clone(), + opt, + env_vars, + ipfs_service, + link_resolver, + Some(subgraph_updates_channel), + prometheus_registry, + metrics_registry, + cancel_token, + ) + .await; + Ok(()) +} + +/// Get the database URL, either from the provided option or by creating a temporary database +fn get_database_url( + postgres_url: Option<&String>, + database_dir: &Path, +) -> Result<(String, Option)> { + if let Some(url) = postgres_url { + Ok((url.clone(), None)) + } else { + #[cfg(unix)] + { + // Check the database directory exists + if !database_dir.exists() { + anyhow::bail!( + "Database directory does not exist: {}", + database_dir.display() + ); + } + + let db = PgTempDBBuilder::new() + .with_data_dir_prefix(database_dir) + .persist_data(false) + .with_initdb_arg("-E", "UTF8") + .with_initdb_arg("--locale", "C") + .start(); + let url = db.connection_uri().to_string(); + // Return the handle so it lives for the lifetime of the program; dropping it will + // shut down Postgres and remove the temporary directory automatically. + Ok((url, Some(db))) + } + + #[cfg(not(unix))] + { + let _ = database_dir; // Suppress unused warning + anyhow::bail!( + "Please provide a postgres_url manually using the --postgres-url option." + ); + } + } +} + +/// Run the dev command +pub async fn run_dev( + dev_opt: DevOpt, + logger: Logger, + cancel_token: CancellationToken, +) -> Result<()> { + let database_dir = Path::new(&dev_opt.database_dir); + + info!(logger, "Starting Graph Node Dev"); + info!(logger, "Database directory: {}", database_dir.display()); + + // Get the database URL and keep the temporary database handle alive for the life of the + // program so that it is dropped (and cleaned up) on graceful shutdown. + let (db_url, mut temp_db_opt) = get_database_url(dev_opt.postgres_url.as_ref(), database_dir)?; + + let opt = build_args(&dev_opt, &db_url)?; + + let (manifests_paths, source_subgraph_aliases): ( + Vec, + std::collections::HashMap, + ) = parse_manifest_args(dev_opt.manifests.clone(), dev_opt.sources.clone(), &logger)?; + let file_link_resolver = Arc::new(FileLinkResolver::new(None, source_subgraph_aliases.clone())); + + let (tx, rx) = mpsc::channel(1); + + let logger_clone = logger.clone(); + let cancel_token_clone = cancel_token.clone(); + graph::spawn(async move { + let _ = run_graph_node( + &logger_clone, + opt, + file_link_resolver, + rx, + cancel_token_clone, + ) + .await; + }); + + let deploy_result: anyhow::Result<()> = + deploy_all_subgraphs(&logger, &manifests_paths, &source_subgraph_aliases, &tx).await; + if let Err(e) = deploy_result { + let error_msg = e.to_string(); + error!(logger, "Error deploying subgraphs"; "error" => error_msg); + std::process::exit(1); + } + + if dev_opt.watch { + let logger_clone_watch = logger.clone(); + graph::spawn_blocking(async move { + let watch_result: anyhow::Result<()> = watch_subgraphs( + &logger_clone_watch, + manifests_paths, + source_subgraph_aliases, + vec!["pgtemp-*".to_string()], + tx, + ) + .await; + if let Err(e) = watch_result { + let error_msg = e.to_string(); + error!(logger_clone_watch, "Error watching subgraphs"; "error" => error_msg); + std::process::exit(1); + } + }); + } + + // Wait for Ctrl+C so we can shut down cleanly and drop the temporary database, which removes + // the data directory. + tokio::signal::ctrl_c() + .await + .expect("Failed to listen for Ctrl+C signal"); + info!(logger, "Received Ctrl+C, shutting down."); + + // Explicitly shut down and clean up the temporary database directory if we started one. + #[cfg(unix)] + if let Some(db) = temp_db_opt.take() { + db.shutdown(); + } + + #[cfg(not(unix))] + let _ = temp_db_opt; // Suppress unused warning + + Ok(()) +} diff --git a/gnd/src/commands/mod.rs b/gnd/src/commands/mod.rs new file mode 100644 index 00000000000..dcccc5c58d3 --- /dev/null +++ b/gnd/src/commands/mod.rs @@ -0,0 +1,3 @@ +mod dev; + +pub use dev::{run_dev, DevOpt}; diff --git a/gnd/src/lib.rs b/gnd/src/lib.rs index 887d28c69de..84de364e781 100644 --- a/gnd/src/lib.rs +++ b/gnd/src/lib.rs @@ -1 +1,2 @@ +pub mod commands; pub mod watcher; diff --git a/gnd/src/main.rs b/gnd/src/main.rs index fc79c707310..a2927810b3d 100644 --- a/gnd/src/main.rs +++ b/gnd/src/main.rs @@ -1,310 +1,80 @@ -use std::{path::Path, sync::Arc}; - -use anyhow::{Context, Result}; -use clap::Parser; +use anyhow::Result; +use clap::{Parser, Subcommand}; use git_testament::{git_testament, render_testament}; -use graph::{ - components::link_resolver::FileLinkResolver, - env::EnvVars, - log::logger, - prelude::{CheapClone, DeploymentHash, LinkResolver, SubgraphName}, - slog::{error, info, Logger}, -}; -use graph_core::polling_monitor::ipfs_service; -use graph_node::{launcher, opt::Opt}; +use graph::{log::logger, slog::info}; use lazy_static::lazy_static; -use tokio::{self, sync::mpsc}; use tokio_util::sync::CancellationToken; -use gnd::watcher::{deploy_all_subgraphs, parse_manifest_args, watch_subgraphs}; - -#[cfg(unix)] -use pgtemp::{PgTempDB, PgTempDBBuilder}; - -// Add an alias for the temporary Postgres DB handle. On non unix -// targets we don't have pgtemp, but we still need the type to satisfy the -// function signatures. -#[cfg(unix)] -type TempPgDB = PgTempDB; -#[cfg(not(unix))] -type TempPgDB = (); +use gnd::commands::{run_dev, DevOpt}; git_testament!(TESTAMENT); lazy_static! { static ref RENDERED_TESTAMENT: String = render_testament!(TESTAMENT); + static ref VERSION_STRING: String = format!( + "{} (graph-cli compatible: {})", + RENDERED_TESTAMENT.as_str(), + GRAPH_CLI_COMPAT_VERSION + ); + static ref LONG_VERSION_STRING: String = format!( + "{}\ngraph-cli compatibility version: {}", + RENDERED_TESTAMENT.as_str(), + GRAPH_CLI_COMPAT_VERSION + ); } -#[derive(Clone, Debug, Parser)] +/// The version of graph-cli that gnd emulates +const GRAPH_CLI_COMPAT_VERSION: &str = "0.98.1"; + +#[derive(Parser)] #[clap( name = "gnd", - about = "Graph Node Dev", + about = "Graph Node Dev - A drop-in replacement for graph-cli", author = "Graph Protocol, Inc.", - version = RENDERED_TESTAMENT.as_str() + version = VERSION_STRING.as_str(), + long_version = LONG_VERSION_STRING.as_str(), )] -pub struct DevOpt { - #[clap( - long, - help = "Start a graph-node in dev mode watching a build directory for changes" - )] - pub watch: bool, - - #[clap( - long, - value_name = "MANIFEST:[BUILD_DIR]", - help = "The location of the subgraph manifest file. If no build directory is provided, the default is 'build'. The file can be an alias, in the format '[BUILD_DIR:]manifest' where 'manifest' is the path to the manifest file, and 'BUILD_DIR' is the path to the build directory relative to the manifest file.", - default_value = "./subgraph.yaml", - value_delimiter = ',' - )] - pub manifests: Vec, - - #[clap( - long, - value_name = "ALIAS:MANIFEST:[BUILD_DIR]", - value_delimiter = ',', - help = "The location of the source subgraph manifest files. This is used to resolve aliases in the manifest files for subgraph data sources. The format is ALIAS:MANIFEST:[BUILD_DIR], where ALIAS is the alias name, BUILD_DIR is the build directory relative to the manifest file, and MANIFEST is the manifest file location." - )] - pub sources: Vec, - - #[clap( - long, - help = "The location of the database directory.", - default_value = "./build" - )] - pub database_dir: String, - - #[clap( - long, - value_name = "URL", - env = "POSTGRES_URL", - help = "Location of the Postgres database used for storing entities" - )] - pub postgres_url: Option, - - #[clap( - long, - allow_negative_numbers = false, - value_name = "NETWORK_NAME:[CAPABILITIES]:URL", - env = "ETHEREUM_RPC", - help = "Ethereum network name (e.g. 'mainnet'), optional comma-seperated capabilities (eg 'full,archive'), and an Ethereum RPC URL, separated by a ':'" - )] - pub ethereum_rpc: Vec, - - #[clap( - long, - value_name = "HOST:PORT", - env = "IPFS", - help = "HTTP addresses of IPFS servers (RPC, Gateway)", - default_value = "https://api.thegraph.com/ipfs" - )] - pub ipfs: Vec, - #[clap( - long, - default_value = "8000", - value_name = "PORT", - help = "Port for the GraphQL HTTP server", - env = "GRAPH_GRAPHQL_HTTP_PORT" - )] - pub http_port: u16, - #[clap( - long, - default_value = "8030", - value_name = "PORT", - help = "Port for the index node server" - )] - pub index_node_port: u16, - #[clap( - long, - default_value = "8020", - value_name = "PORT", - help = "Port for the JSON-RPC admin server" - )] - pub admin_port: u16, - #[clap( - long, - default_value = "8040", - value_name = "PORT", - help = "Port for the Prometheus metrics server" - )] - pub metrics_port: u16, -} - -/// Builds the Graph Node options from DevOpt -fn build_args(dev_opt: &DevOpt, db_url: &str) -> Result { - let mut args = vec!["gnd".to_string()]; - - if !dev_opt.ipfs.is_empty() { - args.push("--ipfs".to_string()); - args.push(dev_opt.ipfs.join(",")); - } - - if !dev_opt.ethereum_rpc.is_empty() { - args.push("--ethereum-rpc".to_string()); - args.push(dev_opt.ethereum_rpc.join(",")); - } - - args.push("--postgres-url".to_string()); - args.push(db_url.to_string()); - - let mut opt = Opt::parse_from(args); - - opt.http_port = dev_opt.http_port; - opt.admin_port = dev_opt.admin_port; - opt.metrics_port = dev_opt.metrics_port; - opt.index_node_port = dev_opt.index_node_port; - - Ok(opt) -} - -async fn run_graph_node( - logger: &Logger, - opt: Opt, - link_resolver: Arc, - subgraph_updates_channel: mpsc::Receiver<(DeploymentHash, SubgraphName)>, - cancel_token: CancellationToken, -) -> Result<()> { - let env_vars = Arc::new(EnvVars::from_env().context("Failed to load environment variables")?); - - let (prometheus_registry, metrics_registry) = launcher::setup_metrics(logger); - - let ipfs_client = graph::ipfs::new_ipfs_client(&opt.ipfs, &metrics_registry, logger) - .await - .unwrap_or_else(|err| panic!("Failed to create IPFS client: {err:#}")); - - let ipfs_service = ipfs_service( - ipfs_client.cheap_clone(), - env_vars.mappings.max_ipfs_file_bytes, - env_vars.mappings.ipfs_timeout, - env_vars.mappings.ipfs_request_limit, - ); - - launcher::run( - logger.clone(), - opt, - env_vars, - ipfs_service, - link_resolver, - Some(subgraph_updates_channel), - prometheus_registry, - metrics_registry, - cancel_token, - ) - .await; - Ok(()) +struct Cli { + #[command(subcommand)] + command: Commands, } -/// Get the database URL, either from the provided option or by creating a temporary database -fn get_database_url( - postgres_url: Option<&String>, - database_dir: &Path, -) -> Result<(String, Option)> { - if let Some(url) = postgres_url { - Ok((url.clone(), None)) - } else { - #[cfg(unix)] - { - // Check the database directory exists - if !database_dir.exists() { - anyhow::bail!( - "Database directory does not exist: {}", - database_dir.display() - ); - } - - let db = PgTempDBBuilder::new() - .with_data_dir_prefix(database_dir) - .persist_data(false) - .with_initdb_arg("-E", "UTF8") - .with_initdb_arg("--locale", "C") - .start(); - let url = db.connection_uri().to_string(); - // Return the handle so it lives for the lifetime of the program; dropping it will - // shut down Postgres and remove the temporary directory automatically. - Ok((url, Some(db))) - } - - #[cfg(not(unix))] - { - anyhow::bail!( - "Please provide a postgres_url manually using the --postgres-url option." - ); - } - } -} - -#[tokio::main] -async fn main() -> Result<()> { - std::env::set_var("ETHEREUM_REORG_THRESHOLD", "10"); - std::env::set_var("GRAPH_NODE_DISABLE_DEPLOYMENT_HASH_VALIDATION", "true"); - env_logger::init(); - let dev_opt = DevOpt::parse(); - - let database_dir = Path::new(&dev_opt.database_dir); +#[derive(Subcommand)] +enum Commands { + /// Run graph-node in dev mode + Dev(DevOpt), - let cancel_token = shutdown_token(); - let logger = logger(true); - - info!(logger, "Starting Graph Node Dev 1"); - info!(logger, "Database directory: {}", database_dir.display()); + /// Generate AssemblyScript types from subgraph manifest + Codegen, - // Get the database URL and keep the temporary database handle alive for the life of the - // program so that it is dropped (and cleaned up) on graceful shutdown. - let (db_url, mut temp_db_opt) = get_database_url(dev_opt.postgres_url.as_ref(), database_dir)?; + /// Compile subgraph to WASM + Build, - let opt = build_args(&dev_opt, &db_url)?; + /// Deploy subgraph to a Graph Node + Deploy, - let (manifests_paths, source_subgraph_aliases) = - parse_manifest_args(dev_opt.manifests, dev_opt.sources, &logger)?; - let file_link_resolver = Arc::new(FileLinkResolver::new(None, source_subgraph_aliases.clone())); + /// Scaffold a new subgraph project + Init, - let (tx, rx) = mpsc::channel(1); - - let logger_clone = logger.clone(); - graph::spawn(async move { - let _ = run_graph_node(&logger_clone, opt, file_link_resolver, rx, cancel_token).await; - }); + /// Add a datasource to an existing subgraph + Add, - if let Err(e) = - deploy_all_subgraphs(&logger, &manifests_paths, &source_subgraph_aliases, &tx).await - { - error!(logger, "Error deploying subgraphs"; "error" => e.to_string()); - std::process::exit(1); - } + /// Register a subgraph name with a Graph Node + Create, - if dev_opt.watch { - let logger_clone_watch = logger.clone(); - graph::spawn_blocking(async move { - if let Err(e) = watch_subgraphs( - &logger_clone_watch, - manifests_paths, - source_subgraph_aliases, - vec!["pgtemp-*".to_string()], - tx, - ) - .await - { - error!(logger_clone_watch, "Error watching subgraphs"; "error" => e.to_string()); - std::process::exit(1); - } - }); - } + /// Unregister a subgraph name from a Graph Node + Remove, - // Wait for Ctrl+C so we can shut down cleanly and drop the temporary database, which removes - // the data directory. - tokio::signal::ctrl_c() - .await - .expect("Failed to listen for Ctrl+C signal"); - info!(logger, "Received Ctrl+C, shutting down."); + /// Set the deploy key for a Graph Node + Auth, - // Explicitly shut down and clean up the temporary database directory if we started one. - #[cfg(unix)] - if let Some(db) = temp_db_opt.take() { - db.shutdown(); - } + /// Publish subgraph to The Graph's decentralized network + Publish, - std::process::exit(0); + /// Run Matchstick tests + Test, - #[allow(unreachable_code)] - Ok(()) + /// Remove build artifacts and generated files + Clean, } fn shutdown_token() -> CancellationToken { @@ -342,3 +112,66 @@ fn shutdown_token() -> CancellationToken { cancel_token } + +#[tokio::main] +async fn main() -> Result<()> { + std::env::set_var("ETHEREUM_REORG_THRESHOLD", "10"); + std::env::set_var("GRAPH_NODE_DISABLE_DEPLOYMENT_HASH_VALIDATION", "true"); + env_logger::init(); + + let cli = Cli::parse(); + let logger = logger(true); + let cancel_token = shutdown_token(); + + match cli.command { + Commands::Dev(dev_opt) => { + run_dev(dev_opt, logger, cancel_token).await?; + } + Commands::Codegen => { + info!(logger, "codegen command not yet implemented"); + std::process::exit(1); + } + Commands::Build => { + info!(logger, "build command not yet implemented"); + std::process::exit(1); + } + Commands::Deploy => { + info!(logger, "deploy command not yet implemented"); + std::process::exit(1); + } + Commands::Init => { + info!(logger, "init command not yet implemented"); + std::process::exit(1); + } + Commands::Add => { + info!(logger, "add command not yet implemented"); + std::process::exit(1); + } + Commands::Create => { + info!(logger, "create command not yet implemented"); + std::process::exit(1); + } + Commands::Remove => { + info!(logger, "remove command not yet implemented"); + std::process::exit(1); + } + Commands::Auth => { + info!(logger, "auth command not yet implemented"); + std::process::exit(1); + } + Commands::Publish => { + info!(logger, "publish command not yet implemented"); + std::process::exit(1); + } + Commands::Test => { + info!(logger, "test command not yet implemented"); + std::process::exit(1); + } + Commands::Clean => { + info!(logger, "clean command not yet implemented"); + std::process::exit(1); + } + } + + Ok(()) +} From 4179181e33baeab40533f0f0195f14bebdbe21fe Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sun, 18 Jan 2026 18:27:51 -0800 Subject: [PATCH 06/60] gnd: Implement clean command Add the `gnd clean` command to remove build artifacts and generated files. - Removes `generated/` and `build/` directories by default - Supports custom paths via --codegen-dir and --build-dir flags - Includes unit tests for both success and missing directory cases - Matches graph-cli clean command behavior Co-Authored-By: Claude Opus 4.5 --- Cargo.lock | 1 + gnd/Cargo.toml | 3 ++ gnd/src/commands/clean.rs | 102 ++++++++++++++++++++++++++++++++++++++ gnd/src/commands/mod.rs | 2 + gnd/src/main.rs | 17 ++++--- 5 files changed, 118 insertions(+), 7 deletions(-) create mode 100644 gnd/src/commands/clean.rs diff --git a/Cargo.lock b/Cargo.lock index fe1efe36a6f..e03ea3ae41d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3676,6 +3676,7 @@ dependencies = [ "notify", "pgtemp", "pq-sys", + "tempfile", "tokio", "tokio-util 0.7.18", ] diff --git a/gnd/Cargo.toml b/gnd/Cargo.toml index f8748246d64..d4828879529 100644 --- a/gnd/Cargo.toml +++ b/gnd/Cargo.toml @@ -29,3 +29,6 @@ pq-sys = { version = "0.7.5", features = ["bundled"] } [target.'cfg(unix)'.dependencies] pgtemp = { git = "https://github.com/graphprotocol/pgtemp", branch = "initdb-args" } + +[dev-dependencies] +tempfile = "3" diff --git a/gnd/src/commands/clean.rs b/gnd/src/commands/clean.rs new file mode 100644 index 00000000000..3968532b917 --- /dev/null +++ b/gnd/src/commands/clean.rs @@ -0,0 +1,102 @@ +use std::path::Path; + +use anyhow::Result; +use clap::Parser; + +#[derive(Clone, Debug, Parser)] +#[clap(about = "Remove build artifacts and generated files")] +pub struct CleanOpt { + #[clap( + long, + default_value = "generated/", + help = "Directory where the codegen output is stored" + )] + pub codegen_dir: String, + + #[clap( + long, + default_value = "build/", + help = "Directory where the build output is stored" + )] + pub build_dir: String, +} + +/// Run the clean command +pub fn run_clean(opt: CleanOpt) -> Result<()> { + println!("Cleaning cache and generated files..."); + + let codegen_path = Path::new(&opt.codegen_dir); + let build_path = Path::new(&opt.build_dir); + + let mut cleaned = false; + + if codegen_path.exists() { + std::fs::remove_dir_all(codegen_path)?; + println!("✔ Removed {}", opt.codegen_dir); + cleaned = true; + } + + if build_path.exists() { + std::fs::remove_dir_all(build_path)?; + println!("✔ Removed {}", opt.build_dir); + cleaned = true; + } + + if cleaned { + println!("Cache and generated files cleaned"); + } else { + println!("Nothing to clean"); + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + use tempfile::TempDir; + + #[test] + fn test_clean_removes_directories() { + let temp_dir = TempDir::new().unwrap(); + let codegen_dir = temp_dir.path().join("generated"); + let build_dir = temp_dir.path().join("build"); + + // Create the directories + fs::create_dir(&codegen_dir).unwrap(); + fs::create_dir(&build_dir).unwrap(); + + // Create some files in them + fs::write(codegen_dir.join("schema.ts"), "test").unwrap(); + fs::write(build_dir.join("subgraph.wasm"), "test").unwrap(); + + assert!(codegen_dir.exists()); + assert!(build_dir.exists()); + + let opt = CleanOpt { + codegen_dir: codegen_dir.to_string_lossy().to_string(), + build_dir: build_dir.to_string_lossy().to_string(), + }; + + run_clean(opt).unwrap(); + + assert!(!codegen_dir.exists()); + assert!(!build_dir.exists()); + } + + #[test] + fn test_clean_handles_missing_directories() { + let temp_dir = TempDir::new().unwrap(); + let codegen_dir = temp_dir.path().join("nonexistent_generated"); + let build_dir = temp_dir.path().join("nonexistent_build"); + + let opt = CleanOpt { + codegen_dir: codegen_dir.to_string_lossy().to_string(), + build_dir: build_dir.to_string_lossy().to_string(), + }; + + // Should not error when directories don't exist + run_clean(opt).unwrap(); + } +} diff --git a/gnd/src/commands/mod.rs b/gnd/src/commands/mod.rs index dcccc5c58d3..e9d1cdc0eaf 100644 --- a/gnd/src/commands/mod.rs +++ b/gnd/src/commands/mod.rs @@ -1,3 +1,5 @@ +mod clean; mod dev; +pub use clean::{run_clean, CleanOpt}; pub use dev::{run_dev, DevOpt}; diff --git a/gnd/src/main.rs b/gnd/src/main.rs index a2927810b3d..fd524fb572a 100644 --- a/gnd/src/main.rs +++ b/gnd/src/main.rs @@ -5,7 +5,7 @@ use graph::{log::logger, slog::info}; use lazy_static::lazy_static; use tokio_util::sync::CancellationToken; -use gnd::commands::{run_dev, DevOpt}; +use gnd::commands::{run_clean, run_dev, CleanOpt, DevOpt}; git_testament!(TESTAMENT); lazy_static! { @@ -74,7 +74,7 @@ enum Commands { Test, /// Remove build artifacts and generated files - Clean, + Clean(CleanOpt), } fn shutdown_token() -> CancellationToken { @@ -123,9 +123,10 @@ async fn main() -> Result<()> { let logger = logger(true); let cancel_token = shutdown_token(); - match cli.command { + let res = match cli.command { Commands::Dev(dev_opt) => { run_dev(dev_opt, logger, cancel_token).await?; + Ok(()) } Commands::Codegen => { info!(logger, "codegen command not yet implemented"); @@ -167,10 +168,12 @@ async fn main() -> Result<()> { info!(logger, "test command not yet implemented"); std::process::exit(1); } - Commands::Clean => { - info!(logger, "clean command not yet implemented"); - std::process::exit(1); - } + Commands::Clean(clean_opt) => run_clean(clean_opt), + }; + + if let Err(e) = res { + eprintln!("Error: {}", e); + std::process::exit(1); } Ok(()) From 63c74edaf5a418618ff92251c6cd760c4e519b4a Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sun, 18 Jan 2026 18:31:27 -0800 Subject: [PATCH 07/60] gnd: Implement auth command Add the `gnd auth` command to save deploy keys for Graph Node authentication. - Stores keys in ~/.graph-cli.json (compatible with TS graph-cli) - Supports custom node URLs via --node flag - Defaults to Subgraph Studio URL - Validates Studio deploy key format (32 hex chars) - Includes unit tests for key storage and retrieval Co-Authored-By: Claude Opus 4.5 --- Cargo.lock | 3 + gnd/Cargo.toml | 5 ++ gnd/src/commands/auth.rs | 177 +++++++++++++++++++++++++++++++++++++++ gnd/src/commands/mod.rs | 2 + gnd/src/main.rs | 9 +- 5 files changed, 190 insertions(+), 6 deletions(-) create mode 100644 gnd/src/commands/auth.rs diff --git a/Cargo.lock b/Cargo.lock index e03ea3ae41d..0edb5d937d0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3666,6 +3666,7 @@ version = "0.36.0" dependencies = [ "anyhow", "clap", + "dirs", "env_logger", "git-testament", "globset", @@ -3676,9 +3677,11 @@ dependencies = [ "notify", "pgtemp", "pq-sys", + "serde_json", "tempfile", "tokio", "tokio-util 0.7.18", + "url", ] [[package]] diff --git a/gnd/Cargo.toml b/gnd/Cargo.toml index d4828879529..e3941fd7404 100644 --- a/gnd/Cargo.toml +++ b/gnd/Cargo.toml @@ -25,6 +25,11 @@ tokio-util.workspace = true # File watching notify = "8.2.0" globset = "0.4.18" + +# Config and auth +url = "2" +dirs = "5" +serde_json = { workspace = true } pq-sys = { version = "0.7.5", features = ["bundled"] } [target.'cfg(unix)'.dependencies] diff --git a/gnd/src/commands/auth.rs b/gnd/src/commands/auth.rs new file mode 100644 index 00000000000..eaca1555b6c --- /dev/null +++ b/gnd/src/commands/auth.rs @@ -0,0 +1,177 @@ +use std::collections::HashMap; +use std::fs; +use std::path::PathBuf; + +use anyhow::{Context, Result}; +use clap::Parser; +use url::Url; + +/// Default Subgraph Studio deploy URL +const SUBGRAPH_STUDIO_URL: &str = "https://api.studio.thegraph.com/deploy/"; + +/// Get the path to the config file (~/.graph-cli.json) +fn config_path() -> PathBuf { + dirs::home_dir() + .expect("Could not determine home directory") + .join(".graph-cli.json") +} + +/// Normalize a node URL by parsing and re-serializing it +fn normalize_node_url(node: &str) -> Result { + let url = Url::parse(node).context("Invalid node URL")?; + Ok(url.to_string()) +} + +/// Load the config file from a specific path, returning an empty map if it doesn't exist +fn load_config_from(path: &PathBuf) -> Result> { + if !path.exists() { + return Ok(HashMap::new()); + } + + let content = fs::read_to_string(path) + .with_context(|| format!("Failed to read config file: {}", path.display()))?; + + serde_json::from_str(&content) + .with_context(|| format!("Failed to parse config file: {}", path.display())) +} + +/// Save the config file to a specific path +fn save_config_to(path: &PathBuf, config: &HashMap) -> Result<()> { + let content = serde_json::to_string(config).context("Failed to serialize config")?; + fs::write(path, content) + .with_context(|| format!("Failed to write config file: {}", path.display())) +} + +/// Save a deploy key for a node (uses default config path) +pub fn save_deploy_key(node: &str, deploy_key: &str) -> Result<()> { + save_deploy_key_to(&config_path(), node, deploy_key) +} + +/// Save a deploy key for a node to a specific config file +fn save_deploy_key_to(config_file: &PathBuf, node: &str, deploy_key: &str) -> Result<()> { + let normalized_node = normalize_node_url(node)?; + let mut config = load_config_from(config_file)?; + config.insert(normalized_node, deploy_key.to_string()); + save_config_to(config_file, &config) +} + +#[derive(Clone, Debug, Parser)] +#[clap(about = "Set the deploy key for a Graph Node")] +pub struct AuthOpt { + /// The deploy key to store + #[clap(value_name = "DEPLOY_KEY")] + pub deploy_key: String, + + /// The Graph Node URL to authenticate with + #[clap( + long, + short = 'g', + value_name = "URL", + default_value = SUBGRAPH_STUDIO_URL, + help = "Graph Node URL" + )] + pub node: String, +} + +/// Validate that a deploy key looks like a valid Subgraph Studio key (32 hex chars) +fn is_valid_studio_key(key: &str) -> bool { + key.len() == 32 && key.chars().all(|c| c.is_ascii_hexdigit()) +} + +/// Run the auth command +pub fn run_auth(opt: AuthOpt) -> Result<()> { + // Validate the deploy key format for Studio + if opt.node == SUBGRAPH_STUDIO_URL && !is_valid_studio_key(&opt.deploy_key) { + println!( + "Warning: Deploy key doesn't look like a valid Subgraph Studio key (expected 32 hex characters)" + ); + } + + save_deploy_key(&opt.node, &opt.deploy_key)?; + + let normalized_node = normalize_node_url(&opt.node)?; + println!("✔ Deploy key set for {}", normalized_node); + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Get the deploy key for a node from a specific config file + fn get_deploy_key_from(config_file: &PathBuf, node: &str) -> Result> { + let normalized_node = normalize_node_url(node)?; + let config = load_config_from(config_file)?; + Ok(config.get(&normalized_node).cloned()) + } + + #[test] + fn test_normalize_node_url() { + assert_eq!( + normalize_node_url("https://example.com").unwrap(), + "https://example.com/" + ); + assert_eq!( + normalize_node_url("https://example.com/").unwrap(), + "https://example.com/" + ); + assert_eq!( + normalize_node_url("https://api.studio.thegraph.com/deploy/").unwrap(), + "https://api.studio.thegraph.com/deploy/" + ); + } + + #[test] + fn test_is_valid_studio_key() { + assert!(is_valid_studio_key("0123456789abcdef0123456789abcdef")); + assert!(is_valid_studio_key("ABCDEF0123456789abcdef0123456789")); + assert!(!is_valid_studio_key("too-short")); + assert!(!is_valid_studio_key("0123456789abcdef0123456789abcdefXX")); // too long + assert!(!is_valid_studio_key("0123456789abcdef0123456789abcdeg")); // invalid char + } + + #[test] + fn test_save_and_get_deploy_key() { + let temp_dir = tempfile::TempDir::new().unwrap(); + let config_file = temp_dir.path().join(".graph-cli.json"); + + let node = "https://example.com/deploy/"; + let key = "test-deploy-key-12345"; + + // Initially no key + assert!(get_deploy_key_from(&config_file, node).unwrap().is_none()); + + // Save key + save_deploy_key_to(&config_file, node, key).unwrap(); + + // Get key back + assert_eq!( + get_deploy_key_from(&config_file, node).unwrap(), + Some(key.to_string()) + ); + } + + #[test] + fn test_multiple_nodes() { + let temp_dir = tempfile::TempDir::new().unwrap(); + let config_file = temp_dir.path().join(".graph-cli.json"); + + let node1 = "https://node1.example.com/"; + let node2 = "https://node2.example.com/"; + let key1 = "key1"; + let key2 = "key2"; + + save_deploy_key_to(&config_file, node1, key1).unwrap(); + save_deploy_key_to(&config_file, node2, key2).unwrap(); + + assert_eq!( + get_deploy_key_from(&config_file, node1).unwrap(), + Some(key1.to_string()) + ); + assert_eq!( + get_deploy_key_from(&config_file, node2).unwrap(), + Some(key2.to_string()) + ); + } +} diff --git a/gnd/src/commands/mod.rs b/gnd/src/commands/mod.rs index e9d1cdc0eaf..2d433e0b348 100644 --- a/gnd/src/commands/mod.rs +++ b/gnd/src/commands/mod.rs @@ -1,5 +1,7 @@ +mod auth; mod clean; mod dev; +pub use auth::{run_auth, AuthOpt}; pub use clean::{run_clean, CleanOpt}; pub use dev::{run_dev, DevOpt}; diff --git a/gnd/src/main.rs b/gnd/src/main.rs index fd524fb572a..e2711478b25 100644 --- a/gnd/src/main.rs +++ b/gnd/src/main.rs @@ -5,7 +5,7 @@ use graph::{log::logger, slog::info}; use lazy_static::lazy_static; use tokio_util::sync::CancellationToken; -use gnd::commands::{run_clean, run_dev, CleanOpt, DevOpt}; +use gnd::commands::{run_auth, run_clean, run_dev, AuthOpt, CleanOpt, DevOpt}; git_testament!(TESTAMENT); lazy_static! { @@ -65,7 +65,7 @@ enum Commands { Remove, /// Set the deploy key for a Graph Node - Auth, + Auth(AuthOpt), /// Publish subgraph to The Graph's decentralized network Publish, @@ -156,10 +156,7 @@ async fn main() -> Result<()> { info!(logger, "remove command not yet implemented"); std::process::exit(1); } - Commands::Auth => { - info!(logger, "auth command not yet implemented"); - std::process::exit(1); - } + Commands::Auth(auth_opt) => run_auth(auth_opt), Commands::Publish => { info!(logger, "publish command not yet implemented"); std::process::exit(1); From 8f74fd28a200532757f107c280306b061b30589b Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sun, 18 Jan 2026 18:45:08 -0800 Subject: [PATCH 08/60] gitignore: Ignore ralph-loop in .claude --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 038afe1d530..a55f2fbb4c7 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,4 @@ logfile # Local claude settings .claude/settings.local.json +.claude/ralph-loop.local.md From 66508bb39535c41cdbb70571e77875b0e5948fad Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sun, 18 Jan 2026 18:51:06 -0800 Subject: [PATCH 09/60] gnd: Add services module with GraphNodeClient Add a JSON-RPC client for communicating with Graph Node's admin API. This client is used by the create and remove commands to register and unregister subgraph names. Features: - HTTP/HTTPS support with protocol validation - Optional access token authentication via Bearer header - User-Agent header with gnd version - 120 second timeout for long operations - create_subgraph and remove_subgraph methods Co-Authored-By: Claude Opus 4.5 --- Cargo.lock | 3 + gnd/Cargo.toml | 5 + gnd/src/lib.rs | 1 + gnd/src/services/graph_node.rs | 203 +++++++++++++++++++++++++++++++++ gnd/src/services/mod.rs | 3 + 5 files changed, 215 insertions(+) create mode 100644 gnd/src/services/graph_node.rs create mode 100644 gnd/src/services/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 0edb5d937d0..3aa8154a251 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3677,8 +3677,11 @@ dependencies = [ "notify", "pgtemp", "pq-sys", + "reqwest", + "serde", "serde_json", "tempfile", + "thiserror 2.0.17", "tokio", "tokio-util 0.7.18", "url", diff --git a/gnd/Cargo.toml b/gnd/Cargo.toml index e3941fd7404..9e2ead0f832 100644 --- a/gnd/Cargo.toml +++ b/gnd/Cargo.toml @@ -19,6 +19,7 @@ clap = { workspace = true } env_logger = "0.11.8" git-testament = "0.2" lazy_static = "1.5.0" +serde = { workspace = true } tokio = { workspace = true } tokio-util.workspace = true @@ -32,6 +33,10 @@ dirs = "5" serde_json = { workspace = true } pq-sys = { version = "0.7.5", features = ["bundled"] } +# HTTP client for Graph Node API +reqwest = { workspace = true } +thiserror = { workspace = true } + [target.'cfg(unix)'.dependencies] pgtemp = { git = "https://github.com/graphprotocol/pgtemp", branch = "initdb-args" } diff --git a/gnd/src/lib.rs b/gnd/src/lib.rs index 84de364e781..0f80d189547 100644 --- a/gnd/src/lib.rs +++ b/gnd/src/lib.rs @@ -1,2 +1,3 @@ pub mod commands; +pub mod services; pub mod watcher; diff --git a/gnd/src/services/graph_node.rs b/gnd/src/services/graph_node.rs new file mode 100644 index 00000000000..463588e457c --- /dev/null +++ b/gnd/src/services/graph_node.rs @@ -0,0 +1,203 @@ +//! JSON-RPC client for communicating with Graph Node admin API. +//! +//! This client is used by the `create`, `remove`, and `deploy` commands to +//! interact with a Graph Node instance. + +use reqwest::header::{HeaderMap, HeaderValue, AUTHORIZATION, USER_AGENT}; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use url::Url; + +/// Version string for User-Agent header +const GND_VERSION: &str = env!("CARGO_PKG_VERSION"); + +/// Errors that can occur when communicating with a Graph Node +#[derive(Debug, Error)] +pub enum GraphNodeError { + #[error("Invalid node URL: {0}")] + InvalidUrl(#[from] url::ParseError), + + #[error("Unsupported protocol: {protocol}. The Graph Node URL must be http:// or https://")] + UnsupportedProtocol { protocol: String }, + + #[error("HTTP error: {0}")] + Http(#[from] reqwest::Error), + + #[error("JSON-RPC error: {message}")] + JsonRpc { code: i64, message: String }, +} + +/// A client for communicating with a Graph Node's JSON-RPC admin API +#[derive(Debug, Clone)] +pub struct GraphNodeClient { + client: reqwest::Client, + url: Url, +} + +impl GraphNodeClient { + /// Create a new client for the given Graph Node URL. + /// + /// The URL should be the admin JSON-RPC endpoint (e.g., `http://localhost:8020`). + /// An optional access token can be provided for authentication. + pub fn new(node_url: &str, access_token: Option<&str>) -> Result { + let url = Url::parse(node_url)?; + + // Validate protocol + match url.scheme() { + "http" | "https" => {} + other => { + return Err(GraphNodeError::UnsupportedProtocol { + protocol: other.to_string(), + }) + } + } + + // Build headers + let mut headers = HeaderMap::new(); + headers.insert( + USER_AGENT, + HeaderValue::from_str(&format!("gnd/{}", GND_VERSION)) + .expect("valid user agent string"), + ); + + if let Some(token) = access_token { + headers.insert( + AUTHORIZATION, + HeaderValue::from_str(&format!("Bearer {}", token)) + .expect("valid authorization header"), + ); + } + + let client = reqwest::Client::builder() + .default_headers(headers) + .timeout(std::time::Duration::from_secs(120)) + .build()?; + + Ok(Self { client, url }) + } + + /// Create a subgraph with the given name. + /// + /// This registers the subgraph name with the Graph Node but does not deploy any code. + pub async fn create_subgraph(&self, name: &str) -> Result<(), GraphNodeError> { + let request = JsonRpcRequest { + jsonrpc: "2.0", + id: 1, + method: "subgraph_create", + params: SubgraphNameParams { name }, + }; + + self.call::(request).await?; + Ok(()) + } + + /// Remove a subgraph with the given name. + /// + /// This unregisters the subgraph name from the Graph Node. + pub async fn remove_subgraph(&self, name: &str) -> Result<(), GraphNodeError> { + let request = JsonRpcRequest { + jsonrpc: "2.0", + id: 1, + method: "subgraph_remove", + params: SubgraphNameParams { name }, + }; + + self.call::(request).await?; + Ok(()) + } + + /// Make a JSON-RPC call to the Graph Node + async fn call Deserialize<'de>>( + &self, + request: JsonRpcRequest<'_>, + ) -> Result { + let response = self + .client + .post(self.url.clone()) + .json(&request) + .send() + .await? + .json::>() + .await?; + + if let Some(error) = response.error { + return Err(GraphNodeError::JsonRpc { + code: error.code, + message: error.message, + }); + } + + // If there's no error, there should be a result + response.result.ok_or_else(|| GraphNodeError::JsonRpc { + code: -1, + message: "No result in response".to_string(), + }) + } +} + +#[derive(Debug, Serialize)] +struct JsonRpcRequest<'a> { + jsonrpc: &'static str, + id: u32, + method: &'static str, + params: SubgraphNameParams<'a>, +} + +#[derive(Debug, Serialize)] +struct SubgraphNameParams<'a> { + name: &'a str, +} + +#[derive(Debug, Deserialize)] +struct JsonRpcResponse { + #[allow(dead_code)] + jsonrpc: String, + #[allow(dead_code)] + id: u32, + result: Option, + error: Option, +} + +#[derive(Debug, Deserialize)] +struct JsonRpcError { + code: i64, + message: String, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_invalid_url() { + let result = GraphNodeClient::new("not-a-valid-url", None); + assert!(result.is_err()); + } + + #[test] + fn test_unsupported_protocol() { + let result = GraphNodeClient::new("ftp://example.com", None); + assert!(matches!( + result, + Err(GraphNodeError::UnsupportedProtocol { protocol }) if protocol == "ftp" + )); + } + + #[test] + fn test_valid_http_url() { + let result = GraphNodeClient::new("http://localhost:8020", None); + assert!(result.is_ok()); + } + + #[test] + fn test_valid_https_url() { + let result = GraphNodeClient::new("https://example.com/admin", None); + assert!(result.is_ok()); + } + + #[test] + fn test_with_access_token() { + let result = GraphNodeClient::new("http://localhost:8020", Some("test-token")); + assert!(result.is_ok()); + } +} diff --git a/gnd/src/services/mod.rs b/gnd/src/services/mod.rs new file mode 100644 index 00000000000..afc1000aca2 --- /dev/null +++ b/gnd/src/services/mod.rs @@ -0,0 +1,3 @@ +mod graph_node; + +pub use graph_node::{GraphNodeClient, GraphNodeError}; From 015f3e45731363036d34e6381786a94130b7db39 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sun, 18 Jan 2026 18:51:24 -0800 Subject: [PATCH 10/60] gnd: Implement create and remove commands Add commands to register and unregister subgraph names with a Graph Node. Both commands: - Use GraphNodeClient for JSON-RPC communication - Support --node/-g flag for Graph Node URL (required) - Support --access-token flag for authentication - Automatically read deploy key from ~/.graph-cli.json if no token provided - Include unit tests for CLI argument parsing Usage: gnd create --node [--access-token ] gnd remove --node [--access-token ] Co-Authored-By: Claude Opus 4.5 --- gnd/src/commands/auth.rs | 19 +++++--- gnd/src/commands/create.rs | 96 ++++++++++++++++++++++++++++++++++++++ gnd/src/commands/mod.rs | 6 ++- gnd/src/commands/remove.rs | 96 ++++++++++++++++++++++++++++++++++++++ gnd/src/main.rs | 19 ++++---- 5 files changed, 217 insertions(+), 19 deletions(-) create mode 100644 gnd/src/commands/create.rs create mode 100644 gnd/src/commands/remove.rs diff --git a/gnd/src/commands/auth.rs b/gnd/src/commands/auth.rs index eaca1555b6c..f3cd633ad1c 100644 --- a/gnd/src/commands/auth.rs +++ b/gnd/src/commands/auth.rs @@ -95,17 +95,22 @@ pub fn run_auth(opt: AuthOpt) -> Result<()> { Ok(()) } +/// Get the deploy key for a node, if one is saved (uses default config path) +pub fn get_deploy_key(node: &str) -> Result> { + get_deploy_key_from(&config_path(), node) +} + +/// Get the deploy key for a node from a specific config file +fn get_deploy_key_from(config_file: &PathBuf, node: &str) -> Result> { + let normalized_node = normalize_node_url(node)?; + let config = load_config_from(config_file)?; + Ok(config.get(&normalized_node).cloned()) +} + #[cfg(test)] mod tests { use super::*; - /// Get the deploy key for a node from a specific config file - fn get_deploy_key_from(config_file: &PathBuf, node: &str) -> Result> { - let normalized_node = normalize_node_url(node)?; - let config = load_config_from(config_file)?; - Ok(config.get(&normalized_node).cloned()) - } - #[test] fn test_normalize_node_url() { assert_eq!( diff --git a/gnd/src/commands/create.rs b/gnd/src/commands/create.rs new file mode 100644 index 00000000000..5351a4eff21 --- /dev/null +++ b/gnd/src/commands/create.rs @@ -0,0 +1,96 @@ +use anyhow::{Context, Result}; +use clap::Parser; + +use crate::commands::auth::get_deploy_key; +use crate::services::GraphNodeClient; + +#[derive(Clone, Debug, Parser)] +#[clap(about = "Register a subgraph name with a Graph Node")] +pub struct CreateOpt { + /// The subgraph name to register (e.g., "user/subgraph") + #[clap(value_name = "SUBGRAPH_NAME")] + pub subgraph_name: String, + + /// Graph Node admin URL + #[clap(long, short = 'g', value_name = "URL", help = "Graph Node URL")] + pub node: String, + + /// Access token for authentication + #[clap(long, value_name = "TOKEN", help = "Graph access token")] + pub access_token: Option, +} + +/// Run the create command +pub async fn run_create(opt: CreateOpt) -> Result<()> { + println!("Creating subgraph in Graph node: {}", opt.node); + + // Get access token (from flag or from config) + let access_token = match &opt.access_token { + Some(token) => Some(token.clone()), + None => get_deploy_key(&opt.node) + .ok() + .flatten() + .map(|key| key.to_string()), + }; + + let client = GraphNodeClient::new(&opt.node, access_token.as_deref()) + .context("Failed to create Graph Node client")?; + + client + .create_subgraph(&opt.subgraph_name) + .await + .context("Failed to create subgraph")?; + + println!("✔ Created subgraph: {}", opt.subgraph_name); + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_create_opt_parsing() { + // Test that required args are enforced + let result = CreateOpt::try_parse_from(["create"]); + assert!(result.is_err()); + + // Test with just subgraph name (missing --node) + let result = CreateOpt::try_parse_from(["create", "user/subgraph"]); + assert!(result.is_err()); + + // Test with all required args + let result = CreateOpt::try_parse_from([ + "create", + "user/subgraph", + "--node", + "http://localhost:8020", + ]); + assert!(result.is_ok()); + let opt = result.unwrap(); + assert_eq!(opt.subgraph_name, "user/subgraph"); + assert_eq!(opt.node, "http://localhost:8020"); + assert!(opt.access_token.is_none()); + + // Test with access token + let result = CreateOpt::try_parse_from([ + "create", + "user/subgraph", + "--node", + "http://localhost:8020", + "--access-token", + "my-token", + ]); + assert!(result.is_ok()); + let opt = result.unwrap(); + assert_eq!(opt.access_token, Some("my-token".to_string())); + + // Test short flag for node + let result = + CreateOpt::try_parse_from(["create", "user/subgraph", "-g", "http://localhost:8020"]); + assert!(result.is_ok()); + let opt = result.unwrap(); + assert_eq!(opt.node, "http://localhost:8020"); + } +} diff --git a/gnd/src/commands/mod.rs b/gnd/src/commands/mod.rs index 2d433e0b348..ee9e94b1a08 100644 --- a/gnd/src/commands/mod.rs +++ b/gnd/src/commands/mod.rs @@ -1,7 +1,11 @@ mod auth; mod clean; +mod create; mod dev; +mod remove; -pub use auth::{run_auth, AuthOpt}; +pub use auth::{get_deploy_key, run_auth, AuthOpt}; pub use clean::{run_clean, CleanOpt}; +pub use create::{run_create, CreateOpt}; pub use dev::{run_dev, DevOpt}; +pub use remove::{run_remove, RemoveOpt}; diff --git a/gnd/src/commands/remove.rs b/gnd/src/commands/remove.rs new file mode 100644 index 00000000000..52cac738968 --- /dev/null +++ b/gnd/src/commands/remove.rs @@ -0,0 +1,96 @@ +use anyhow::{Context, Result}; +use clap::Parser; + +use crate::commands::auth::get_deploy_key; +use crate::services::GraphNodeClient; + +#[derive(Clone, Debug, Parser)] +#[clap(about = "Unregister a subgraph name from a Graph Node")] +pub struct RemoveOpt { + /// The subgraph name to unregister + #[clap(value_name = "SUBGRAPH_NAME")] + pub subgraph_name: String, + + /// Graph Node admin URL + #[clap(long, short = 'g', value_name = "URL", help = "Graph Node URL")] + pub node: String, + + /// Access token for authentication + #[clap(long, value_name = "TOKEN", help = "Graph access token")] + pub access_token: Option, +} + +/// Run the remove command +pub async fn run_remove(opt: RemoveOpt) -> Result<()> { + println!("Removing subgraph from Graph node: {}", opt.node); + + // Get access token (from flag or from config) + let access_token = match &opt.access_token { + Some(token) => Some(token.clone()), + None => get_deploy_key(&opt.node) + .ok() + .flatten() + .map(|key| key.to_string()), + }; + + let client = GraphNodeClient::new(&opt.node, access_token.as_deref()) + .context("Failed to create Graph Node client")?; + + client + .remove_subgraph(&opt.subgraph_name) + .await + .context("Failed to remove subgraph")?; + + println!("✔ Removed subgraph: {}", opt.subgraph_name); + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_remove_opt_parsing() { + // Test that required args are enforced + let result = RemoveOpt::try_parse_from(["remove"]); + assert!(result.is_err()); + + // Test with just subgraph name (missing --node) + let result = RemoveOpt::try_parse_from(["remove", "user/subgraph"]); + assert!(result.is_err()); + + // Test with all required args + let result = RemoveOpt::try_parse_from([ + "remove", + "user/subgraph", + "--node", + "http://localhost:8020", + ]); + assert!(result.is_ok()); + let opt = result.unwrap(); + assert_eq!(opt.subgraph_name, "user/subgraph"); + assert_eq!(opt.node, "http://localhost:8020"); + assert!(opt.access_token.is_none()); + + // Test with access token + let result = RemoveOpt::try_parse_from([ + "remove", + "user/subgraph", + "--node", + "http://localhost:8020", + "--access-token", + "my-token", + ]); + assert!(result.is_ok()); + let opt = result.unwrap(); + assert_eq!(opt.access_token, Some("my-token".to_string())); + + // Test short flag for node + let result = + RemoveOpt::try_parse_from(["remove", "user/subgraph", "-g", "http://localhost:8020"]); + assert!(result.is_ok()); + let opt = result.unwrap(); + assert_eq!(opt.node, "http://localhost:8020"); + } +} diff --git a/gnd/src/main.rs b/gnd/src/main.rs index e2711478b25..71974c347db 100644 --- a/gnd/src/main.rs +++ b/gnd/src/main.rs @@ -5,7 +5,10 @@ use graph::{log::logger, slog::info}; use lazy_static::lazy_static; use tokio_util::sync::CancellationToken; -use gnd::commands::{run_auth, run_clean, run_dev, AuthOpt, CleanOpt, DevOpt}; +use gnd::commands::{ + run_auth, run_clean, run_create, run_dev, run_remove, AuthOpt, CleanOpt, CreateOpt, DevOpt, + RemoveOpt, +}; git_testament!(TESTAMENT); lazy_static! { @@ -59,10 +62,10 @@ enum Commands { Add, /// Register a subgraph name with a Graph Node - Create, + Create(CreateOpt), /// Unregister a subgraph name from a Graph Node - Remove, + Remove(RemoveOpt), /// Set the deploy key for a Graph Node Auth(AuthOpt), @@ -148,14 +151,8 @@ async fn main() -> Result<()> { info!(logger, "add command not yet implemented"); std::process::exit(1); } - Commands::Create => { - info!(logger, "create command not yet implemented"); - std::process::exit(1); - } - Commands::Remove => { - info!(logger, "remove command not yet implemented"); - std::process::exit(1); - } + Commands::Create(create_opt) => run_create(create_opt).await, + Commands::Remove(remove_opt) => run_remove(remove_opt).await, Commands::Auth(auth_opt) => run_auth(auth_opt), Commands::Publish => { info!(logger, "publish command not yet implemented"); From 7e8fc2caba9239fad97d1a275c731ab8deed1faf Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sun, 18 Jan 2026 19:04:47 -0800 Subject: [PATCH 11/60] gnd: Add output/spinner module for CLI progress indicators Add the output module with spinner functionality matching the TypeScript graph-cli output format (gluegun/ora style). This provides: - Spinner struct for progress indicators with colored output - with_spinner() helper for wrapping long-running operations - SpinnerResult for operations that can warn or fail - Checkmark/cross/warning symbols matching TS CLI output The module uses indicatif and console crates for terminal handling. Co-Authored-By: Claude Opus 4.5 --- Cargo.lock | 21 +++ gnd/Cargo.toml | 4 + gnd/src/lib.rs | 1 + gnd/src/output/mod.rs | 3 + gnd/src/output/spinner.rs | 299 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 328 insertions(+) create mode 100644 gnd/src/output/mod.rs create mode 100644 gnd/src/output/spinner.rs diff --git a/Cargo.lock b/Cargo.lock index 3aa8154a251..a0c2b81b2e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3666,6 +3666,7 @@ version = "0.36.0" dependencies = [ "anyhow", "clap", + "console", "dirs", "env_logger", "git-testament", @@ -3673,6 +3674,7 @@ dependencies = [ "graph", "graph-core", "graph-node", + "indicatif", "lazy_static", "notify", "pgtemp", @@ -4818,6 +4820,19 @@ dependencies = [ "serde_core", ] +[[package]] +name = "indicatif" +version = "0.17.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "183b3088984b400f4cfac3620d5e076c84da5364016b4f49473de574b2586235" +dependencies = [ + "console", + "number_prefix", + "portable-atomic", + "unicode-width", + "web-time", +] + [[package]] name = "indoc" version = "2.0.7" @@ -5666,6 +5681,12 @@ dependencies = [ "syn 2.0.114", ] +[[package]] +name = "number_prefix" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" + [[package]] name = "nybbles" version = "0.4.7" diff --git a/gnd/Cargo.toml b/gnd/Cargo.toml index 9e2ead0f832..ecc7be90f89 100644 --- a/gnd/Cargo.toml +++ b/gnd/Cargo.toml @@ -37,6 +37,10 @@ pq-sys = { version = "0.7.5", features = ["bundled"] } reqwest = { workspace = true } thiserror = { workspace = true } +# Console output +indicatif = "0.17" +console = "0.15" + [target.'cfg(unix)'.dependencies] pgtemp = { git = "https://github.com/graphprotocol/pgtemp", branch = "initdb-args" } diff --git a/gnd/src/lib.rs b/gnd/src/lib.rs index 0f80d189547..6f0c9265b6a 100644 --- a/gnd/src/lib.rs +++ b/gnd/src/lib.rs @@ -1,3 +1,4 @@ pub mod commands; +pub mod output; pub mod services; pub mod watcher; diff --git a/gnd/src/output/mod.rs b/gnd/src/output/mod.rs new file mode 100644 index 00000000000..6260de65a37 --- /dev/null +++ b/gnd/src/output/mod.rs @@ -0,0 +1,3 @@ +mod spinner; + +pub use spinner::{step, with_spinner, Spinner, SpinnerResult}; diff --git a/gnd/src/output/spinner.rs b/gnd/src/output/spinner.rs new file mode 100644 index 00000000000..01405fc9a16 --- /dev/null +++ b/gnd/src/output/spinner.rs @@ -0,0 +1,299 @@ +//! Spinner and progress output utilities. +//! +//! This module provides spinner functionality matching the output format of +//! the TypeScript graph-cli, which uses gluegun's print.spin() (based on ora). +//! +//! # Examples +//! +//! ```ignore +//! use gnd::output::{with_spinner, Spinner}; +//! +//! // Simple usage with closure +//! let result = with_spinner( +//! "Loading data", +//! "Failed to load data", +//! "Data loaded with warnings", +//! |spinner| { +//! // Do work... +//! Ok::<_, anyhow::Error>(42) +//! } +//! )?; +//! +//! // Or use Spinner directly for more control +//! let mut spinner = Spinner::new("Processing..."); +//! spinner.step("Step 1", Some("details")); +//! spinner.succeed("Done!"); +//! ``` + +use std::fmt::Display; +use std::time::Duration; + +use console::{style, Term}; +use indicatif::{ProgressBar, ProgressStyle}; + +/// The checkmark symbol used for successful steps (matches TS CLI) +pub const SUCCESS_SYMBOL: &str = "✔"; +/// The cross symbol used for failed steps (matches TS CLI) +pub const FAILURE_SYMBOL: &str = "✖"; +/// The warning symbol used for warnings +pub const WARNING_SYMBOL: &str = "⚠"; + +/// A command-line spinner for showing progress on long-running operations. +/// +/// This matches the behavior of gluegun's `print.spin()` which is based on ora. +pub struct Spinner { + progress: ProgressBar, + term: Term, +} + +impl Spinner { + /// Create a new spinner with the given initial message. + pub fn new(message: impl Into) -> Self { + let progress = ProgressBar::new_spinner(); + progress.set_style( + ProgressStyle::default_spinner() + .tick_chars("⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏") + .template("{spinner:.cyan} {msg}") + .expect("Invalid spinner template"), + ); + progress.set_message(message.into()); + progress.enable_steady_tick(Duration::from_millis(80)); + + Self { + progress, + term: Term::stderr(), + } + } + + /// Update the spinner message. + pub fn set_message(&self, message: impl Into) { + self.progress.set_message(message.into()); + } + + /// Stop the spinner and show a step with optional details. + /// + /// The spinner will continue after showing the step (matches TS CLI behavior). + pub fn step(&self, subject: impl Display, text: Option) { + self.progress.suspend(|| { + let msg = if let Some(text) = text { + format!("{} {}", style(subject).dim(), style(text).dim()) + } else { + format!("{}", style(subject).dim()) + }; + let _ = self.term.write_line(&msg); + }); + } + + /// Stop the spinner with a success checkmark and message. + pub fn succeed(self, message: impl Display) { + self.progress.finish_and_clear(); + let _ = self + .term + .write_line(&format!("{} {}", style(SUCCESS_SYMBOL).green(), message)); + } + + /// Stop the spinner with a failure X and message. + pub fn fail(self, message: impl Display) { + self.progress.finish_and_clear(); + let _ = self + .term + .write_line(&format!("{} {}", style(FAILURE_SYMBOL).red(), message)); + } + + /// Stop the spinner with a warning symbol and message. + pub fn warn(self, message: impl Display) { + self.progress.finish_and_clear(); + let _ = self + .term + .write_line(&format!("{} {}", style(WARNING_SYMBOL).yellow(), message)); + } + + /// Stop the spinner, clearing the line without any message. + pub fn stop(self) { + self.progress.finish_and_clear(); + } + + /// Stop the spinner and persist with a custom symbol and text. + pub fn stop_and_persist(self, symbol: impl Display, message: impl Display) { + self.progress.finish_and_clear(); + let _ = self.term.write_line(&format!("{} {}", symbol, message)); + } +} + +/// Result type for spinner operations that can have warnings. +#[derive(Debug)] +pub enum SpinnerResult { + /// Operation succeeded without warnings + Ok(T), + /// Operation succeeded with a warning + Warning { result: T, warning: String }, + /// Operation failed with an error message + Error { result: Option, error: String }, +} + +impl SpinnerResult { + /// Create a successful result + pub fn ok(result: T) -> Self { + SpinnerResult::Ok(result) + } + + /// Create a result with a warning + pub fn warning(result: T, warning: impl Into) -> Self { + SpinnerResult::Warning { + result, + warning: warning.into(), + } + } + + /// Create an error result + pub fn error(error: impl Into) -> Self { + SpinnerResult::Error { + result: None, + error: error.into(), + } + } + + /// Create an error result with a partial result + pub fn error_with_result(result: T, error: impl Into) -> Self { + SpinnerResult::Error { + result: Some(result), + error: error.into(), + } + } +} + +/// Execute a function with a spinner, showing progress and handling errors. +/// +/// This matches the TS CLI's `withSpinner` function behavior: +/// - Shows `text` while the operation is in progress +/// - On success, shows `text` with a checkmark +/// - On error, shows `error_text: ` with an X +/// - If the function returns `SpinnerResult::Warning`, shows the warning but still succeeds +/// +/// # Arguments +/// +/// * `text` - The text to show while the operation is in progress (and on success) +/// * `error_text` - The prefix for error messages +/// * `warning_text` - The prefix for warning messages +/// * `f` - The function to execute +/// +/// # Returns +/// +/// The result of the function, or an error if the function failed. +/// +/// # Examples +/// +/// ```ignore +/// let result = with_spinner( +/// "Uploading files", +/// "Failed to upload", +/// "Uploaded with warnings", +/// |spinner| { +/// spinner.step("Preparing", Some("files")); +/// // Do work... +/// Ok::<_, anyhow::Error>(42) +/// } +/// )?; +/// ``` +pub fn with_spinner( + text: impl Into, + error_text: impl Into, + _warning_text: impl Into, + f: impl FnOnce(&Spinner) -> Result, +) -> Result +where + E: Display, +{ + let text = text.into(); + let error_text = error_text.into(); + + let spinner = Spinner::new(&text); + + match f(&spinner) { + Ok(result) => { + spinner.succeed(&text); + Ok(result) + } + Err(e) => { + spinner.fail(format!("{}: {}", error_text, e)); + Err(e) + } + } +} + +/// Execute a step as part of a spinner, showing the step description. +/// +/// This matches the TS CLI's `step` function which shows a muted step +/// description while continuing the spinner. +pub fn step(spinner: &Spinner, subject: impl Display, text: Option) { + spinner.step(subject, text); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_spinner_result_ok() { + let result: SpinnerResult = SpinnerResult::ok(42); + match result { + SpinnerResult::Ok(v) => assert_eq!(v, 42), + _ => panic!("Expected Ok"), + } + } + + #[test] + fn test_spinner_result_warning() { + let result: SpinnerResult = SpinnerResult::warning(42, "some warning"); + match result { + SpinnerResult::Warning { result, warning } => { + assert_eq!(result, 42); + assert_eq!(warning, "some warning"); + } + _ => panic!("Expected Warning"), + } + } + + #[test] + fn test_spinner_result_error() { + let result: SpinnerResult = SpinnerResult::error("some error"); + match result { + SpinnerResult::Error { result, error } => { + assert!(result.is_none()); + assert_eq!(error, "some error"); + } + _ => panic!("Expected Error"), + } + } + + #[test] + fn test_spinner_result_error_with_result() { + let result: SpinnerResult = SpinnerResult::error_with_result(42, "partial error"); + match result { + SpinnerResult::Error { result, error } => { + assert_eq!(result, Some(42)); + assert_eq!(error, "partial error"); + } + _ => panic!("Expected Error"), + } + } + + #[test] + fn test_with_spinner_success() { + let result: Result = + with_spinner("Testing", "Test failed", "Test warning", |_spinner| { + Ok::<_, &str>(42) + }); + assert_eq!(result.unwrap(), 42); + } + + #[test] + fn test_with_spinner_error() { + let result: Result = + with_spinner("Testing", "Test failed", "Test warning", |_spinner| { + Err::("error occurred") + }); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), "error occurred"); + } +} From 95c94b36189e55b6dcceaa22183715db76d93355 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sun, 18 Jan 2026 19:12:17 -0800 Subject: [PATCH 12/60] gnd: Add codegen module for AssemblyScript type generation This commit adds the foundation for code generation functionality: - codegen/typescript.rs: AST builders for generating TypeScript/AssemblyScript code (classes, methods, types, imports) - codegen/types.rs: Type conversion utilities between GraphQL, Ethereum ABI, and AssemblyScript types - codegen/schema.rs: SchemaCodeGenerator that generates entity classes from GraphQL schemas, matching the TS CLI output format The schema code generator supports: - Entity classes with constructor, save(), load(), loadInBlock() - Field getters and setters with proper type conversions - Nullable field handling with proper null checks - Entity reference fields (stored as string IDs) - Derived fields with loader classes - Multiple ID field types (String, Bytes, Int8) Co-Authored-By: Claude Opus 4.5 --- Cargo.lock | 2 + gnd/Cargo.toml | 4 + gnd/src/codegen/mod.rs | 16 + gnd/src/codegen/schema.rs | 684 ++++++++++++++++++++++++++++++++++ gnd/src/codegen/types.rs | 128 +++++++ gnd/src/codegen/typescript.rs | 477 ++++++++++++++++++++++++ gnd/src/lib.rs | 1 + 7 files changed, 1312 insertions(+) create mode 100644 gnd/src/codegen/mod.rs create mode 100644 gnd/src/codegen/schema.rs create mode 100644 gnd/src/codegen/types.rs create mode 100644 gnd/src/codegen/typescript.rs diff --git a/Cargo.lock b/Cargo.lock index a0c2b81b2e9..a4f9490cc4b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3674,11 +3674,13 @@ dependencies = [ "graph", "graph-core", "graph-node", + "graphql-parser", "indicatif", "lazy_static", "notify", "pgtemp", "pq-sys", + "regex", "reqwest", "serde", "serde_json", diff --git a/gnd/Cargo.toml b/gnd/Cargo.toml index ecc7be90f89..bfa07bae24f 100644 --- a/gnd/Cargo.toml +++ b/gnd/Cargo.toml @@ -41,6 +41,10 @@ thiserror = { workspace = true } indicatif = "0.17" console = "0.15" +# Code generation +graphql-parser = "0.4" +regex = "1" + [target.'cfg(unix)'.dependencies] pgtemp = { git = "https://github.com/graphprotocol/pgtemp", branch = "initdb-args" } diff --git a/gnd/src/codegen/mod.rs b/gnd/src/codegen/mod.rs new file mode 100644 index 00000000000..a014e83304e --- /dev/null +++ b/gnd/src/codegen/mod.rs @@ -0,0 +1,16 @@ +//! Code generation for subgraph AssemblyScript types. +//! +//! This module generates AssemblyScript types from: +//! - GraphQL schema (entity classes) +//! - Contract ABIs (event and call bindings) +//! - Data source templates + +mod schema; +mod types; +mod typescript; + +pub use schema::SchemaCodeGenerator; +pub use typescript::{ + ArrayType, Class, ClassMember, Method, ModuleImports, NamedType, NullableType, Param, + StaticMethod, GENERATED_FILE_NOTE, +}; diff --git a/gnd/src/codegen/schema.rs b/gnd/src/codegen/schema.rs new file mode 100644 index 00000000000..f12cda460ff --- /dev/null +++ b/gnd/src/codegen/schema.rs @@ -0,0 +1,684 @@ +//! Schema code generation. +//! +//! Generates AssemblyScript entity classes from GraphQL schemas. + +use graphql_parser::schema::{Definition, Document, Field, ObjectType, Type, TypeDefinition}; + +use super::types::{asc_type_for_value, value_from_asc, value_to_asc}; +use super::typescript::{ + self as ts, ArrayType, Class, Method, ModuleImports, NamedType, NullableType, Param, + StaticMethod, TypeExpr, +}; + +/// Reserved words in AssemblyScript that need to be escaped. +const RESERVED_WORDS: &[&str] = &[ + "break", + "case", + "catch", + "class", + "const", + "continue", + "debugger", + "default", + "delete", + "do", + "else", + "enum", + "export", + "extends", + "false", + "finally", + "for", + "function", + "if", + "implements", + "import", + "in", + "instanceof", + "interface", + "let", + "new", + "null", + "package", + "private", + "protected", + "public", + "return", + "static", + "super", + "switch", + "this", + "throw", + "true", + "try", + "typeof", + "var", + "void", + "while", + "with", + "yield", +]; + +/// Handle reserved words by appending an underscore. +fn handle_reserved_word(name: &str) -> String { + if RESERVED_WORDS.contains(&name) { + format!("{}_", name) + } else { + name.to_string() + } +} + +/// Type of the ID field. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum IdFieldKind { + String, + Bytes, + Int8, +} + +impl IdFieldKind { + /// Get the AssemblyScript type name for this ID type. + pub fn type_name(&self) -> &'static str { + match self { + IdFieldKind::String => "string", + IdFieldKind::Bytes => "Bytes", + IdFieldKind::Int8 => "i64", + } + } + + /// Get the GraphQL type name for this ID type. + pub fn gql_type_name(&self) -> &'static str { + match self { + IdFieldKind::String => "String", + IdFieldKind::Bytes => "Bytes", + IdFieldKind::Int8 => "Int8", + } + } + + /// Get the code to create a Value from the ID. + pub fn value_from(&self) -> &'static str { + match self { + IdFieldKind::String => "Value.fromString(id)", + IdFieldKind::Bytes => "Value.fromBytes(id)", + IdFieldKind::Int8 => "Value.fromI64(id)", + } + } + + /// Get the ValueKind for this ID type. + pub fn value_kind(&self) -> &'static str { + match self { + IdFieldKind::String => "ValueKind.STRING", + IdFieldKind::Bytes => "ValueKind.BYTES", + IdFieldKind::Int8 => "ValueKind.INT8", + } + } + + /// Get the code to convert a Value to a string representation. + pub fn value_to_string(&self) -> &'static str { + match self { + IdFieldKind::String => "id.toString()", + IdFieldKind::Bytes => "id.toBytes().toHexString()", + IdFieldKind::Int8 => "id.toI64().toString()", + } + } + + /// Get the code to convert the ID to a string. + pub fn id_to_string_code(self) -> &'static str { + match self { + IdFieldKind::String => "id", + IdFieldKind::Bytes => "id.toHexString()", + IdFieldKind::Int8 => "id.toString()", + } + } + + /// Determine the ID field kind from a type name. + pub fn from_type_name(type_name: &str) -> Self { + match type_name { + "Bytes" => IdFieldKind::Bytes, + "Int8" => IdFieldKind::Int8, + _ => IdFieldKind::String, + } + } +} + +/// Get the base type name from a GraphQL type (stripping NonNull and List wrappers). +fn get_base_type_name(ty: &Type<'_, String>) -> String { + match ty { + Type::NamedType(name) => name.clone(), + Type::NonNullType(inner) => get_base_type_name(inner), + Type::ListType(inner) => get_base_type_name(inner), + } +} + +/// Check if a type is nullable (not wrapped in NonNull). +fn is_nullable(ty: &Type<'_, String>) -> bool { + !matches!(ty, Type::NonNullType(_)) +} + +/// Check if a type is a list. +fn is_list(ty: &Type<'_, String>) -> bool { + match ty { + Type::ListType(_) => true, + Type::NonNullType(inner) => is_list(inner), + Type::NamedType(_) => false, + } +} + +/// Check if a field has the @derivedFrom directive. +fn is_derived_field(field: &Field<'_, String>) -> bool { + field.directives.iter().any(|d| d.name == "derivedFrom") +} + +/// Check if an object type has the @entity directive. +fn is_entity_type(obj: &ObjectType<'_, String>) -> bool { + obj.directives.iter().any(|d| d.name == "entity") +} + +/// Collected entity info for code generation. +struct EntityInfo { + name: String, + id_kind: IdFieldKind, + fields: Vec, +} + +/// Collected field info. +struct FieldInfo { + name: String, + is_derived: bool, + base_type: String, + is_nullable: bool, + is_list: bool, +} + +/// Schema code generator. +pub struct SchemaCodeGenerator { + entities: Vec, + entity_names: std::collections::HashSet, +} + +impl SchemaCodeGenerator { + /// Create a new schema code generator from a parsed GraphQL document. + pub fn new(document: &Document<'_, String>) -> Self { + let mut entities = Vec::new(); + let mut entity_names = std::collections::HashSet::new(); + + // First pass: collect entity names + for def in &document.definitions { + if let Definition::TypeDefinition(TypeDefinition::Object(obj)) = def { + if is_entity_type(obj) { + entity_names.insert(obj.name.clone()); + } + } + } + + // Second pass: collect entity info + for def in &document.definitions { + if let Definition::TypeDefinition(TypeDefinition::Object(obj)) = def { + if is_entity_type(obj) { + let name = obj.name.clone(); + + // Find ID field + let id_field = obj.fields.iter().find(|f| f.name == "id"); + let id_kind = id_field + .map(|f| IdFieldKind::from_type_name(&get_base_type_name(&f.field_type))) + .unwrap_or(IdFieldKind::String); + + // Collect field info + let fields: Vec<_> = obj + .fields + .iter() + .map(|f| FieldInfo { + name: f.name.clone(), + is_derived: is_derived_field(f), + base_type: get_base_type_name(&f.field_type), + is_nullable: is_nullable(&f.field_type), + is_list: is_list(&f.field_type), + }) + .collect(); + + entities.push(EntityInfo { + name, + id_kind, + fields, + }); + } + } + } + + Self { + entities, + entity_names, + } + } + + /// Generate module imports for the schema file. + pub fn generate_module_imports(&self) -> Vec { + vec![ModuleImports::new( + vec![ + "TypedMap".to_string(), + "Entity".to_string(), + "Value".to_string(), + "ValueKind".to_string(), + "store".to_string(), + "Bytes".to_string(), + "BigInt".to_string(), + "BigDecimal".to_string(), + "Int8".to_string(), + ], + "@graphprotocol/graph-ts", + )] + } + + /// Generate entity classes from the schema. + pub fn generate_types(&self, generate_store_methods: bool) -> Vec { + self.entities + .iter() + .map(|entity| self.generate_entity_type(entity, generate_store_methods)) + .collect() + } + + /// Generate derived loaders for fields with @derivedFrom. + pub fn generate_derived_loaders(&self) -> Vec { + let mut loaders = Vec::new(); + let mut seen_types = std::collections::HashSet::new(); + + for entity in &self.entities { + for field in &entity.fields { + if field.is_derived && !seen_types.contains(&field.base_type) { + // Only generate loaders for entity types, not interfaces + if self.entity_names.contains(&field.base_type) { + seen_types.insert(field.base_type.clone()); + loaders.push(self.generate_derived_loader(&field.base_type)); + } + } + } + } + + loaders + } + + fn generate_entity_type(&self, entity: &EntityInfo, generate_store_methods: bool) -> Class { + let mut klass = ts::klass(&entity.name).exported().extends("Entity"); + + // Generate constructor + klass.add_method(self.generate_constructor(&entity.id_kind)); + + // Generate store methods + if generate_store_methods { + for method in self.generate_store_methods(&entity.name, &entity.id_kind) { + match method { + StoreMethod::Regular(m) => klass.add_method(m), + StoreMethod::Static(m) => klass.add_static_method(m), + } + } + } + + // Generate field getters and setters + for field in &entity.fields { + if let Some(getter) = self.generate_field_getter(&entity.name, field) { + klass.add_method(getter); + } + if let Some(setter) = self.generate_field_setter(field) { + klass.add_method(setter); + } + } + + klass + } + + fn generate_constructor(&self, id_kind: &IdFieldKind) -> Method { + Method::new( + "constructor", + vec![Param::new("id", NamedType::new(id_kind.type_name()))], + None, + format!( + r#" + super() + this.set('id', {})"#, + id_kind.value_from() + ), + ) + } + + fn generate_store_methods(&self, entity_name: &str, id_kind: &IdFieldKind) -> Vec { + vec![ + // save() method + StoreMethod::Regular(Method::new( + "save", + vec![], + Some(NamedType::new("void").into()), + format!( + r#" + let id = this.get('id') + assert(id != null, + 'Cannot save {} entity without an ID') + if (id) {{ + assert(id.kind == {}, + `Entities of type {} must have an ID of type {} but the id '${{id.displayData()}}' is of type ${{id.displayKind()}}`) + store.set('{}', {}, this) + }}"#, + entity_name, + id_kind.value_kind(), + entity_name, + id_kind.gql_type_name(), + entity_name, + id_kind.value_to_string() + ), + )), + // loadInBlock() static method + StoreMethod::Static(StaticMethod::new( + "loadInBlock", + vec![Param::new("id", NamedType::new(id_kind.type_name()))], + NullableType::new(NamedType::new(entity_name)), + format!( + r#" + return changetype<{} | null>(store.get_in_block('{}', {}))"#, + entity_name, + entity_name, + id_kind.id_to_string_code() + ), + )), + // load() static method + StoreMethod::Static(StaticMethod::new( + "load", + vec![Param::new("id", NamedType::new(id_kind.type_name()))], + NullableType::new(NamedType::new(entity_name)), + format!( + r#" + return changetype<{} | null>(store.get('{}', {}))"#, + entity_name, + entity_name, + id_kind.id_to_string_code() + ), + )), + ] + } + + fn generate_field_getter(&self, entity_name: &str, field: &FieldInfo) -> Option { + let safe_name = handle_reserved_word(&field.name); + + // Handle derived fields + if field.is_derived { + return self.generate_derived_field_getter(entity_name, field, &safe_name); + } + + let value_type = self.value_type_from_field(field); + let return_type = self.type_from_field(field); + let nullable = field.is_nullable; + + let primitive_default = match &return_type { + TypeExpr::Named(t) => t.get_primitive_default(), + _ => None, + }; + + let get_code = if nullable { + format!( + r#" + let value = this.get('{}') + if (!value || value.kind == ValueKind.NULL) {{ + return null + }} else {{ + return {} + }}"#, + field.name, + value_to_asc("value", &value_type) + ) + } else { + let null_handling = match primitive_default { + Some(default) => format!("return {}", default), + None => "throw new Error('Cannot return null for a required field.')".to_string(), + }; + format!( + r#" + let value = this.get('{}') + if (!value || value.kind == ValueKind.NULL) {{ + {} + }} else {{ + return {} + }}"#, + field.name, + null_handling, + value_to_asc("value", &value_type) + ) + }; + + Some(Method::new( + format!("get {}", safe_name), + vec![], + Some(return_type), + get_code, + )) + } + + fn generate_derived_field_getter( + &self, + entity_name: &str, + field: &FieldInfo, + safe_name: &str, + ) -> Option { + let loader_name = format!("{}Loader", field.base_type); + + Some(Method::new( + format!("get {}", safe_name), + vec![], + Some(NamedType::new(&loader_name).into()), + format!( + r#" + return new {}('{}', this.get('id')!.toString(), '{}')"#, + loader_name, entity_name, field.name + ), + )) + } + + fn generate_field_setter(&self, field: &FieldInfo) -> Option { + // No setters for derived fields + if field.is_derived { + return None; + } + + let safe_name = handle_reserved_word(&field.name); + let value_type = self.value_type_from_field(field); + let param_type = self.type_from_field(field); + let nullable = field.is_nullable; + + let set_code = if nullable { + let inner_type = match ¶m_type { + TypeExpr::Nullable(n) => n.inner.to_string(), + other => other.to_string(), + }; + format!( + r#" + if (!value) {{ + this.unset('{}') + }} else {{ + this.set('{}', {}) + }}"#, + field.name, + field.name, + value_from_asc(&format!("<{}>value", inner_type), &value_type) + ) + } else { + format!( + r#" + this.set('{}', {})"#, + field.name, + value_from_asc("value", &value_type) + ) + }; + + Some(Method::new( + format!("set {}", safe_name), + vec![Param::new("value", param_type)], + None, + set_code, + )) + } + + fn generate_derived_loader(&self, type_name: &str) -> Class { + let loader_name = format!("{}Loader", type_name); + let mut klass = ts::klass(&loader_name).exported().extends("Entity"); + + // Add members + klass.add_member(ts::klass_member("_entity", "string")); + klass.add_member(ts::klass_member("_field", "string")); + klass.add_member(ts::klass_member("_id", "string")); + + // Add constructor + klass.add_method(Method::new( + "constructor", + vec![ + Param::new("entity", NamedType::new("string")), + Param::new("id", NamedType::new("string")), + Param::new("field", NamedType::new("string")), + ], + None, + r#" + super(); + this._entity = entity; + this._id = id; + this._field = field;"# + .to_string(), + )); + + // Add load() method + klass.add_method(Method::new( + "load", + vec![], + Some(TypeExpr::Raw(format!("{}[]", type_name))), + format!( + r#" + let value = store.loadRelated(this._entity, this._id, this._field); + return changetype<{}[]>(value);"#, + type_name + ), + )); + + klass + } + + /// Get the value type string for a field. + fn value_type_from_field(&self, field: &FieldInfo) -> String { + if field.is_list { + format!( + "[{}]", + if self.entity_names.contains(&field.base_type) { + "String".to_string() + } else { + field.base_type.clone() + } + ) + } else if self.entity_names.contains(&field.base_type) { + "String".to_string() + } else { + field.base_type.clone() + } + } + + /// Convert field info to an AssemblyScript TypeExpr. + fn type_from_field(&self, field: &FieldInfo) -> TypeExpr { + let type_name = if self.entity_names.contains(&field.base_type) { + "string" // Entity references are stored as string IDs + } else { + asc_type_for_value(&field.base_type) + }; + + let named = NamedType::new(type_name); + + if field.is_list { + let array = ArrayType::new(named); + if field.is_nullable { + NullableType::new(array).into() + } else { + array.into() + } + } else if field.is_nullable && !named.is_primitive() { + NullableType::new(named).into() + } else { + named.into() + } + } +} + +enum StoreMethod { + Regular(Method), + Static(StaticMethod), +} + +#[cfg(test)] +mod tests { + use super::*; + use graphql_parser::parse_schema; + + #[test] + fn test_simple_entity() { + let schema = r#" + type Transfer @entity { + id: ID! + from: Bytes! + to: Bytes! + value: BigInt! + } + "#; + let doc = parse_schema::(schema).unwrap(); + let gen = SchemaCodeGenerator::new(&doc); + + let classes = gen.generate_types(true); + assert_eq!(classes.len(), 1); + + let transfer = &classes[0]; + assert_eq!(transfer.name, "Transfer"); + assert_eq!(transfer.extends, Some("Entity".to_string())); + assert!(transfer.export); + } + + #[test] + fn test_nullable_field() { + let schema = r#" + type Token @entity { + id: ID! + name: String + symbol: String! + } + "#; + let doc = parse_schema::(schema).unwrap(); + let gen = SchemaCodeGenerator::new(&doc); + + let classes = gen.generate_types(true); + assert_eq!(classes.len(), 1); + + // Check that we have methods for nullable and non-nullable fields + let token = &classes[0]; + let method_names: Vec<_> = token.methods.iter().map(|m| m.name.as_str()).collect(); + assert!(method_names.contains(&"get name")); + assert!(method_names.contains(&"set name")); + assert!(method_names.contains(&"get symbol")); + assert!(method_names.contains(&"set symbol")); + } + + #[test] + fn test_id_field_types() { + assert_eq!(IdFieldKind::String.type_name(), "string"); + assert_eq!(IdFieldKind::Bytes.type_name(), "Bytes"); + assert_eq!(IdFieldKind::Int8.type_name(), "i64"); + } + + #[test] + fn test_entity_reference() { + let schema = r#" + type User @entity { + id: ID! + name: String! + } + type Post @entity { + id: ID! + author: User! + } + "#; + let doc = parse_schema::(schema).unwrap(); + let gen = SchemaCodeGenerator::new(&doc); + + // The Post.author field should be treated as a string (entity ID reference) + assert!(gen.entity_names.contains("User")); + assert!(gen.entity_names.contains("Post")); + } +} diff --git a/gnd/src/codegen/types.rs b/gnd/src/codegen/types.rs new file mode 100644 index 00000000000..086e3ee5529 --- /dev/null +++ b/gnd/src/codegen/types.rs @@ -0,0 +1,128 @@ +//! Type conversion utilities for code generation. +//! +//! This module handles conversions between: +//! - GraphQL schema types and AssemblyScript types +//! - Ethereum ABI types and AssemblyScript types +//! - graph-node Value types and AssemblyScript types + +/// Get the AssemblyScript type for a GraphQL Value type. +pub fn asc_type_for_value(value_type: &str) -> &'static str { + match value_type { + "Bytes" => "Bytes", + "Boolean" => "boolean", + "Int" => "i32", + "Int8" => "i64", + "BigInt" => "BigInt", + "ID" | "String" => "string", + "BigDecimal" => "BigDecimal", + "Timestamp" => "i64", + // Array types + "[Bytes]" => "Array", + "[Boolean]" => "Array", + "[Int]" => "Array", + "[Int8]" => "Array", + "[Timestamp]" => "Array", + "[BigInt]" => "Array", + "[ID]" | "[String]" => "Array", + "[BigDecimal]" => "Array", + // Default for entity references and unknown types + _ => { + if value_type.starts_with('[') && value_type.ends_with(']') { + "Array" // Entity reference arrays + } else { + "string" // Entity references + } + } + } +} + +/// Generate code to convert a Value to AssemblyScript. +pub fn value_to_asc(code: &str, value_type: &str) -> String { + match value_type { + "Bytes" => format!("{}.toBytes()", code), + "Boolean" => format!("{}.toBoolean()", code), + "Int" => format!("{}.toI32()", code), + "Int8" => format!("{}.toI64()", code), + "BigInt" => format!("{}.toBigInt()", code), + "ID" | "String" => format!("{}.toString()", code), + "BigDecimal" => format!("{}.toBigDecimal()", code), + "Timestamp" => format!("{}.toTimestamp()", code), + // Array types + "[Bytes]" => format!("{}.toBytesArray()", code), + "[Boolean]" => format!("{}.toBooleanArray()", code), + "[Int]" => format!("{}.toI32Array()", code), + "[Int8]" => format!("{}.toI64Array()", code), + "[Timestamp]" => format!("{}.toTimestampArray()", code), + "[BigInt]" => format!("{}.toBigIntArray()", code), + "[ID]" | "[String]" => format!("{}.toStringArray()", code), + "[BigDecimal]" => format!("{}.toBigDecimalArray()", code), + // Default for entity references + _ => { + if value_type.starts_with('[') { + format!("{}.toStringArray()", code) + } else { + format!("{}.toString()", code) + } + } + } +} + +/// Generate code to convert an AssemblyScript value to a Value. +pub fn value_from_asc(code: &str, value_type: &str) -> String { + match value_type { + "Bytes" => format!("Value.fromBytes({})", code), + "Boolean" => format!("Value.fromBoolean({})", code), + "Int" => format!("Value.fromI32({})", code), + "Int8" => format!("Value.fromI64({})", code), + "BigInt" => format!("Value.fromBigInt({})", code), + "ID" | "String" => format!("Value.fromString({})", code), + "BigDecimal" => format!("Value.fromBigDecimal({})", code), + "Timestamp" => format!("Value.fromTimestamp({})", code), + // Array types + "[Bytes]" => format!("Value.fromBytesArray({})", code), + "[Boolean]" => format!("Value.fromBooleanArray({})", code), + "[Int]" => format!("Value.fromI32Array({})", code), + "[Int8]" => format!("Value.fromI64Array({})", code), + "[Timestamp]" => format!("Value.fromTimestampArray({})", code), + "[BigInt]" => format!("Value.fromBigIntArray({})", code), + "[ID]" | "[String]" => format!("Value.fromStringArray({})", code), + "[BigDecimal]" => format!("Value.fromBigDecimalArray({})", code), + // Default for entity references + _ => { + if value_type.starts_with('[') { + format!("Value.fromStringArray({})", code) + } else { + format!("Value.fromString({})", code) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_asc_type_for_value() { + assert_eq!(asc_type_for_value("String"), "string"); + assert_eq!(asc_type_for_value("ID"), "string"); + assert_eq!(asc_type_for_value("Int"), "i32"); + assert_eq!(asc_type_for_value("BigInt"), "BigInt"); + assert_eq!(asc_type_for_value("Bytes"), "Bytes"); + assert_eq!(asc_type_for_value("[String]"), "Array"); + assert_eq!(asc_type_for_value("SomeEntity"), "string"); // Entity reference + } + + #[test] + fn test_value_to_asc() { + assert_eq!(value_to_asc("value", "String"), "value.toString()"); + assert_eq!(value_to_asc("value", "BigInt"), "value.toBigInt()"); + assert_eq!(value_to_asc("value", "[Int]"), "value.toI32Array()"); + } + + #[test] + fn test_value_from_asc() { + assert_eq!(value_from_asc("value", "String"), "Value.fromString(value)"); + assert_eq!(value_from_asc("value", "BigInt"), "Value.fromBigInt(value)"); + } +} diff --git a/gnd/src/codegen/typescript.rs b/gnd/src/codegen/typescript.rs new file mode 100644 index 00000000000..4db3aa2c642 --- /dev/null +++ b/gnd/src/codegen/typescript.rs @@ -0,0 +1,477 @@ +//! TypeScript/AssemblyScript code generation utilities. +//! +//! This module provides builder types for generating TypeScript/AssemblyScript code, +//! matching the output format of the TS CLI's `codegen/typescript.ts`. + +use std::fmt::{self, Display}; + +/// Note prepended to all generated files. +pub const GENERATED_FILE_NOTE: &str = r#" +// THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +"#; + +/// A function/method parameter. +#[derive(Debug, Clone)] +pub struct Param { + pub name: String, + pub param_type: Box, +} + +impl Param { + pub fn new(name: impl Into, param_type: impl Into) -> Self { + Self { + name: name.into(), + param_type: Box::new(param_type.into()), + } + } +} + +impl Display for Param { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}: {}", self.name, self.param_type) + } +} + +/// A type expression (named type, array type, or nullable type). +#[derive(Debug, Clone)] +pub enum TypeExpr { + Named(NamedType), + Array(ArrayType), + Nullable(NullableType), + /// Raw string type (for complex types like `ethereum.CallResult`) + Raw(String), +} + +impl From for TypeExpr { + fn from(t: NamedType) -> Self { + TypeExpr::Named(t) + } +} + +impl From for TypeExpr { + fn from(t: ArrayType) -> Self { + TypeExpr::Array(t) + } +} + +impl From for TypeExpr { + fn from(t: NullableType) -> Self { + TypeExpr::Nullable(t) + } +} + +impl From<&str> for TypeExpr { + fn from(s: &str) -> Self { + TypeExpr::Named(NamedType::new(s)) + } +} + +impl From for TypeExpr { + fn from(s: String) -> Self { + TypeExpr::Named(NamedType::new(s)) + } +} + +impl Display for TypeExpr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TypeExpr::Named(t) => write!(f, "{}", t), + TypeExpr::Array(t) => write!(f, "{}", t), + TypeExpr::Nullable(t) => write!(f, "{}", t), + TypeExpr::Raw(s) => write!(f, "{}", s), + } + } +} + +/// A named type (e.g., `string`, `BigInt`, `MyEntity`). +#[derive(Debug, Clone)] +pub struct NamedType { + pub name: String, +} + +impl NamedType { + pub fn new(name: impl Into) -> Self { + Self { name: name.into() } + } + + /// Capitalize the first letter of the type name. + pub fn capitalize(&mut self) -> &mut Self { + if let Some(c) = self.name.chars().next() { + self.name = c.to_uppercase().collect::() + &self.name[c.len_utf8()..]; + } + self + } + + /// Returns true if this is a primitive AssemblyScript type. + pub fn is_primitive(&self) -> bool { + matches!( + self.name.as_str(), + "boolean" + | "u8" + | "i8" + | "u16" + | "i16" + | "u32" + | "i32" + | "u64" + | "i64" + | "f32" + | "f64" + | "usize" + | "isize" + ) + } + + /// Returns the default value for primitive types, or None if not a primitive. + pub fn get_primitive_default(&self) -> Option<&'static str> { + if !self.is_primitive() { + return None; + } + match self.name.as_str() { + "boolean" => Some("false"), + "u8" | "i8" | "u16" | "i16" | "u32" | "i32" | "u64" | "i64" | "f32" | "f64" + | "usize" | "isize" => Some("0"), + _ => None, + } + } +} + +impl Display for NamedType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.name) + } +} + +/// An array type (e.g., `Array`). +#[derive(Debug, Clone)] +pub struct ArrayType { + pub inner: NamedType, +} + +impl ArrayType { + pub fn new(inner: NamedType) -> Self { + Self { inner } + } +} + +impl Display for ArrayType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Array<{}>", self.inner) + } +} + +/// A nullable type (e.g., `string | null`). +#[derive(Debug, Clone)] +pub struct NullableType { + pub inner: Box, +} + +impl NullableType { + pub fn new(inner: impl Into) -> Self { + Self { + inner: Box::new(inner.into()), + } + } +} + +impl Display for NullableType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{} | null", self.inner) + } +} + +/// A class method. +#[derive(Debug, Clone)] +pub struct Method { + pub name: String, + pub params: Vec, + pub return_type: Option, + pub body: String, +} + +impl Method { + pub fn new( + name: impl Into, + params: Vec, + return_type: Option, + body: impl Into, + ) -> Self { + Self { + name: name.into(), + params, + return_type, + body: body.into(), + } + } +} + +impl Display for Method { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let params = self + .params + .iter() + .map(|p| p.to_string()) + .collect::>() + .join(", "); + let return_type = self + .return_type + .as_ref() + .map(|t| format!(": {}", t)) + .unwrap_or_default(); + write!( + f, + "\n {}({}){} {{{}\n }}\n", + self.name, params, return_type, self.body + ) + } +} + +/// A static class method. +#[derive(Debug, Clone)] +pub struct StaticMethod { + pub name: String, + pub params: Vec, + pub return_type: TypeExpr, + pub body: String, +} + +impl StaticMethod { + pub fn new( + name: impl Into, + params: Vec, + return_type: impl Into, + body: impl Into, + ) -> Self { + Self { + name: name.into(), + params, + return_type: return_type.into(), + body: body.into(), + } + } +} + +impl Display for StaticMethod { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let params = self + .params + .iter() + .map(|p| p.to_string()) + .collect::>() + .join(", "); + write!( + f, + "\n static {}({}): {} {{{}\n }}\n", + self.name, params, self.return_type, self.body + ) + } +} + +/// A class member (field). +#[derive(Debug, Clone)] +pub struct ClassMember { + pub name: String, + pub member_type: String, +} + +impl ClassMember { + pub fn new(name: impl Into, member_type: impl Into) -> Self { + Self { + name: name.into(), + member_type: member_type.into(), + } + } +} + +impl Display for ClassMember { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, " {}: {}", self.name, self.member_type) + } +} + +/// Code that can be part of a class body (method or static method). +#[allow(dead_code)] +pub enum ClassCode { + Method(Method), + StaticMethod(StaticMethod), +} + +impl Display for ClassCode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ClassCode::Method(m) => write!(f, "{}", m), + ClassCode::StaticMethod(m) => write!(f, "{}", m), + } + } +} + +/// A TypeScript/AssemblyScript class. +#[derive(Debug, Clone)] +pub struct Class { + pub name: String, + pub extends: Option, + pub export: bool, + pub members: Vec, + pub methods: Vec, + pub static_methods: Vec, +} + +impl Class { + pub fn new(name: impl Into) -> Self { + Self { + name: name.into(), + extends: None, + export: false, + members: Vec::new(), + methods: Vec::new(), + static_methods: Vec::new(), + } + } + + pub fn exported(mut self) -> Self { + self.export = true; + self + } + + pub fn extends(mut self, base: impl Into) -> Self { + self.extends = Some(base.into()); + self + } + + pub fn add_member(&mut self, member: ClassMember) { + self.members.push(member); + } + + pub fn add_method(&mut self, method: Method) { + self.methods.push(method); + } + + pub fn add_static_method(&mut self, method: StaticMethod) { + self.static_methods.push(method); + } +} + +impl Display for Class { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let export = if self.export { "export " } else { "" }; + let extends = self + .extends + .as_ref() + .map(|e| format!(" extends {}", e)) + .unwrap_or_default(); + + writeln!(f, "\n{}class {}{} {{", export, self.name, extends)?; + + // Write members + for member in &self.members { + writeln!(f, "{}", member)?; + } + + // Write methods (regular and static) + for method in &self.static_methods { + write!(f, "{}", method)?; + } + for method in &self.methods { + write!(f, "{}", method)?; + } + + writeln!(f, "}}") + } +} + +/// An import statement. +#[derive(Debug, Clone)] +pub struct ModuleImports { + pub names: Vec, + pub module: String, +} + +impl ModuleImports { + pub fn new(names: Vec, module: impl Into) -> Self { + Self { + names, + module: module.into(), + } + } + + pub fn single(name: impl Into, module: impl Into) -> Self { + Self { + names: vec![name.into()], + module: module.into(), + } + } +} + +impl Display for ModuleImports { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let names = self.names.join(", "); + write!(f, "import {{ {} }} from \"{}\"", names, self.module) + } +} + +/// Helper functions matching the TS CLI API. +pub fn klass(name: impl Into) -> Class { + Class::new(name) +} + +pub fn klass_member(name: impl Into, member_type: impl Into) -> ClassMember { + ClassMember::new(name, member_type) +} + +#[cfg(test)] +mod tests { + use super::*; + + pub fn named_type(name: impl Into) -> NamedType { + NamedType::new(name) + } + + pub fn param(name: impl Into, param_type: impl Into) -> Param { + Param::new(name, param_type) + } + + pub fn method( + name: impl Into, + params: Vec, + return_type: Option, + body: impl Into, + ) -> Method { + Method::new(name, params, return_type, body) + } + + #[test] + fn test_method() { + let m = method( + "save", + vec![], + Some(named_type("void").into()), + "\n store.set('Entity', this.id, this)\n ", + ); + let output = m.to_string(); + assert!(output.contains("save()")); + assert!(output.contains(": void")); + assert!(output.contains("store.set")); + } + + #[test] + fn test_class() { + let mut c = klass("MyEntity").exported().extends("Entity"); + c.add_method(method( + "constructor", + vec![param("id", named_type("string"))], + None, + "\n super()\n this.set('id', Value.fromString(id))\n ", + )); + let output = c.to_string(); + assert!(output.contains("export class MyEntity extends Entity")); + assert!(output.contains("constructor(id: string)")); + } + + #[test] + fn test_is_primitive() { + assert!(named_type("boolean").is_primitive()); + assert!(named_type("i32").is_primitive()); + assert!(!named_type("string").is_primitive()); + assert!(!named_type("BigInt").is_primitive()); + } +} diff --git a/gnd/src/lib.rs b/gnd/src/lib.rs index 6f0c9265b6a..1ac14b2116d 100644 --- a/gnd/src/lib.rs +++ b/gnd/src/lib.rs @@ -1,3 +1,4 @@ +pub mod codegen; pub mod commands; pub mod output; pub mod services; From dc088f7ded7869ccdd5655216c95e5438d6076b1 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sun, 18 Jan 2026 21:07:44 -0800 Subject: [PATCH 13/60] gnd: Add ABI code generator for Ethereum contract bindings Implements codegen/abi.rs that generates AssemblyScript bindings from Ethereum contract ABIs: - Event classes with typed parameters and getters - Call classes for function calls with inputs/outputs - Smart contract class with typed call methods - Tuple handling for nested struct types - Support for indexed event parameters - Reserved word escaping for AssemblyScript The generated code matches the format of the TypeScript graph-cli. Co-Authored-By: Claude Opus 4.5 --- Cargo.lock | 1 + gnd/Cargo.toml | 1 + gnd/src/codegen/abi.rs | 1427 ++++++++++++++++++++++++++++++++++++++++ gnd/src/codegen/mod.rs | 2 + 4 files changed, 1431 insertions(+) create mode 100644 gnd/src/codegen/abi.rs diff --git a/Cargo.lock b/Cargo.lock index a4f9490cc4b..2c1585b38cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3669,6 +3669,7 @@ dependencies = [ "console", "dirs", "env_logger", + "ethabi", "git-testament", "globset", "graph", diff --git a/gnd/Cargo.toml b/gnd/Cargo.toml index bfa07bae24f..1ad6c05138a 100644 --- a/gnd/Cargo.toml +++ b/gnd/Cargo.toml @@ -44,6 +44,7 @@ console = "0.15" # Code generation graphql-parser = "0.4" regex = "1" +ethabi = "17.2" [target.'cfg(unix)'.dependencies] pgtemp = { git = "https://github.com/graphprotocol/pgtemp", branch = "initdb-args" } diff --git a/gnd/src/codegen/abi.rs b/gnd/src/codegen/abi.rs new file mode 100644 index 00000000000..1fe79400bc7 --- /dev/null +++ b/gnd/src/codegen/abi.rs @@ -0,0 +1,1427 @@ +//! ABI code generation for Ethereum contracts. +//! +//! Generates AssemblyScript bindings from contract ABIs: +//! - Event classes with typed parameters +//! - Call classes for function calls with inputs/outputs +//! - Contract class with typed call methods + +use std::collections::HashMap; + +use ethabi::{Contract, Event, EventParam, Function, Param, ParamType, StateMutability}; +use regex::Regex; + +use super::typescript::{self as ts, Class, ClassMember, Method, ModuleImports, Param as TsParam}; + +/// Reserved words in AssemblyScript that need to be escaped. +const RESERVED_WORDS: &[&str] = &[ + "await", + "break", + "case", + "catch", + "class", + "const", + "continue", + "debugger", + "delete", + "do", + "else", + "enum", + "export", + "extends", + "false", + "finally", + "function", + "if", + "implements", + "import", + "in", + "interface", + "let", + "new", + "package", + "private", + "protected", + "public", + "return", + "super", + "switch", + "static", + "this", + "throw", + "true", + "try", + "typeof", + "var", + "while", + "with", + "yield", +]; + +/// Handle reserved words by appending an underscore. +fn handle_reserved_word(name: &str) -> String { + if RESERVED_WORDS.contains(&name) { + format!("{}_", name) + } else { + name.to_string() + } +} + +/// Capitalize the first letter of a string. +fn capitalize(s: &str) -> String { + let mut chars = s.chars(); + match chars.next() { + None => String::new(), + Some(c) => c.to_uppercase().collect::() + chars.as_str(), + } +} + +const GRAPH_TS_MODULE: &str = "@graphprotocol/graph-ts"; + +/// ABI code generator. +pub struct AbiCodeGenerator { + contract: Contract, + name: String, +} + +impl AbiCodeGenerator { + /// Create a new ABI code generator. + pub fn new(contract: Contract, name: impl Into) -> Self { + let mut name = name.into(); + // Sanitize name to be a valid class name + let re = Regex::new(r#"[!@#$%^&*()+\-=\[\]{};':\"|,.<>/?]+"#).unwrap(); + name = re.replace_all(&name, "_").to_string(); + Self { contract, name } + } + + /// Generate module imports for the ABI file. + pub fn generate_module_imports(&self) -> Vec { + vec![ModuleImports::new( + vec![ + "ethereum".to_string(), + "JSONValue".to_string(), + "TypedMap".to_string(), + "Entity".to_string(), + "Bytes".to_string(), + "Address".to_string(), + "BigInt".to_string(), + ], + GRAPH_TS_MODULE, + )] + } + + /// Generate all types from the ABI. + pub fn generate_types(&self) -> Vec { + let mut classes = Vec::new(); + classes.extend(self.generate_event_types()); + classes.extend(self.generate_smart_contract_class()); + classes.extend(self.generate_call_types()); + classes + } + + /// Generate event type classes. + fn generate_event_types(&self) -> Vec { + let mut classes = Vec::new(); + let events = self.disambiguate_events(); + + for (event, alias) in events { + let event_class_name = alias.clone(); + let mut tuple_classes = Vec::new(); + + // Generate params class + let params_class_name = [&event_class_name, "__Params"].concat(); + let mut params_class = ts::klass(¶ms_class_name).exported(); + params_class.add_member(ClassMember::new("_event", &event_class_name)); + params_class.add_method(Method::new( + "constructor", + vec![TsParam::new("event", ts::NamedType::new(&event_class_name))], + None, + "this._event = event", + )); + + // Generate getters for event params + let inputs = self.disambiguate_params(&event.inputs, "param"); + for (index, (param, param_name)) in inputs.iter().enumerate() { + let param_object = self.generate_event_param( + param, + param_name, + index, + &event_class_name, + &mut tuple_classes, + ); + params_class.add_method(param_object); + } + + // Generate event class + let mut event_class = ts::klass(&event_class_name) + .exported() + .extends("ethereum.Event"); + event_class.add_method(Method::new( + "get params", + vec![], + Some(ts::NamedType::new(¶ms_class_name).into()), + format!("return new {}(this)", params_class_name), + )); + + classes.push(event_class); + classes.push(params_class); + classes.extend(tuple_classes); + } + + classes + } + + /// Generate the smart contract class with call methods. + fn generate_smart_contract_class(&self) -> Vec { + let mut classes = Vec::new(); + + let mut contract_class = ts::klass(&self.name) + .exported() + .extends("ethereum.SmartContract"); + + // Add static bind method + let contract_name = &self.name; + contract_class.add_static_method(ts::StaticMethod::new( + "bind", + vec![TsParam::new("address", ts::NamedType::new("Address"))], + ts::NamedType::new(&self.name), + format!("return new {}('{}', address)", contract_name, contract_name), + )); + + // Get callable functions + let functions = self.get_callable_functions(); + let disambiguated = self.disambiguate_functions(&functions); + + for (func, alias) in disambiguated { + let (method, try_method, result_classes) = self.generate_function_methods(func, &alias); + contract_class.add_method(method); + contract_class.add_method(try_method); + classes.extend(result_classes); + } + + classes.push(contract_class); + classes + } + + /// Generate call type classes. + fn generate_call_types(&self) -> Vec { + let mut classes = Vec::new(); + let functions = self.get_call_functions(); + let disambiguated = self.disambiguate_call_functions(&functions); + + for (func, alias) in disambiguated { + let cap_alias = capitalize(&alias); + let call_class_name = format!("{}Call", cap_alias); + let mut tuple_classes = Vec::new(); + + // Generate inputs class + let inputs_class_name = [&call_class_name, "__Inputs"].concat(); + let mut inputs_class = ts::klass(&inputs_class_name).exported(); + inputs_class.add_member(ClassMember::new("_call", &call_class_name)); + inputs_class.add_method(Method::new( + "constructor", + vec![TsParam::new("call", ts::NamedType::new(&call_class_name))], + None, + "this._call = call", + )); + + let inputs = self.disambiguate_params_from_func_inputs(&func.inputs, "value"); + for (index, (param, param_name)) in inputs.iter().enumerate() { + let getter = self.generate_input_output_getter( + param, + param_name, + index, + &call_class_name, + "call", + "inputValues", + &mut tuple_classes, + ); + inputs_class.add_method(getter); + } + + // Generate outputs class + let outputs_class_name = [&call_class_name, "__Outputs"].concat(); + let mut outputs_class = ts::klass(&outputs_class_name).exported(); + outputs_class.add_member(ClassMember::new("_call", &call_class_name)); + outputs_class.add_method(Method::new( + "constructor", + vec![TsParam::new("call", ts::NamedType::new(&call_class_name))], + None, + "this._call = call", + )); + + let outputs = self.disambiguate_params_from_func_inputs(&func.outputs, "value"); + for (index, (param, param_name)) in outputs.iter().enumerate() { + let getter = self.generate_input_output_getter( + param, + param_name, + index, + &call_class_name, + "call", + "outputValues", + &mut tuple_classes, + ); + outputs_class.add_method(getter); + } + + // Generate call class + let mut call_class = ts::klass(&call_class_name) + .exported() + .extends("ethereum.Call"); + call_class.add_method(Method::new( + "get inputs", + vec![], + Some(ts::NamedType::new(&inputs_class_name).into()), + format!("return new {}(this)", inputs_class_name), + )); + call_class.add_method(Method::new( + "get outputs", + vec![], + Some(ts::NamedType::new(&outputs_class_name).into()), + format!("return new {}(this)", outputs_class_name), + )); + + classes.push(call_class); + classes.push(inputs_class); + classes.push(outputs_class); + classes.extend(tuple_classes); + } + + classes + } + + /// Generate a getter method for an event parameter. + fn generate_event_param( + &self, + param: &EventParam, + name: &str, + index: usize, + event_class_name: &str, + tuple_classes: &mut Vec, + ) -> Method { + // Handle indexed params - strings, bytes and arrays are hashed to bytes32 + let value_type = if param.indexed { + self.indexed_input_type(¶m.kind) + } else { + param.kind.clone() + }; + + if self.contains_tuple_type(&value_type) { + self.generate_tuple_getter( + ¶m.kind, + name, + index, + event_class_name, + "event", + "parameters", + tuple_classes, + ) + } else { + let asc_type = self.asc_type_for_ethereum(&value_type); + let access = format!("this._event.parameters[{}].value", index); + let conversion = self.ethereum_to_asc(&access, &value_type, None); + Method::new( + format!("get {}", name), + vec![], + Some(ts::TypeExpr::Raw(asc_type)), + format!("return {}", conversion), + ) + } + } + + /// Generate a getter for call inputs/outputs. + #[allow(clippy::too_many_arguments)] + fn generate_input_output_getter( + &self, + param: &Param, + name: &str, + index: usize, + parent_class: &str, + parent_type: &str, + parent_field: &str, + tuple_classes: &mut Vec, + ) -> Method { + if self.contains_tuple_type(¶m.kind) { + self.generate_tuple_getter( + ¶m.kind, + name, + index, + parent_class, + parent_type, + parent_field, + tuple_classes, + ) + } else { + let asc_type = self.asc_type_for_ethereum(¶m.kind); + let access = format!("this._{}.{}[{}].value", parent_type, parent_field, index); + let conversion = self.ethereum_to_asc(&access, ¶m.kind, None); + Method::new( + format!("get {}", name), + vec![], + Some(ts::TypeExpr::Raw(asc_type)), + format!("return {}", conversion), + ) + } + } + + /// Generate a tuple getter and its associated classes. + #[allow(clippy::too_many_arguments)] + fn generate_tuple_getter( + &self, + param_type: &ParamType, + name: &str, + index: usize, + parent_class: &str, + parent_type: &str, + parent_field: &str, + tuple_classes: &mut Vec, + ) -> Method { + let cap_name = capitalize(name); + let tuple_identifier = format!("{}{}", parent_class, cap_name); + let tuple_class_name = if parent_field == "outputValues" { + format!("{}OutputStruct", tuple_identifier) + } else { + format!("{}Struct", tuple_identifier) + }; + + let is_tuple = matches!(param_type, ParamType::Tuple(_)); + let access_code = if parent_type == "tuple" { + format!("this[{}]", index) + } else { + format!("this._{}.{}[{}].value", parent_type, parent_field, index) + }; + + let return_value = self.ethereum_to_asc(&access_code, param_type, Some(&tuple_class_name)); + + let return_type = if self.is_tuple_matrix_type(param_type) { + format!("Array>", tuple_class_name) + } else if self.is_tuple_array_type(param_type) { + format!("Array<{}>", tuple_class_name) + } else { + tuple_class_name.clone() + }; + + let body = if is_tuple { + format!("return changetype<{}>({})", tuple_class_name, return_value) + } else { + format!("return {}", return_value) + }; + + // Generate tuple class + if let Some(components) = self.get_tuple_components(param_type) { + let mut tuple_class = ts::klass(&tuple_class_name) + .exported() + .extends("ethereum.Tuple"); + + let component_params = self.disambiguate_tuple_components(components); + for (idx, (component, component_name)) in component_params.iter().enumerate() { + let component_getter = self.generate_tuple_component_getter( + component, + component_name, + idx, + &tuple_identifier, + tuple_classes, + ); + tuple_class.add_method(component_getter); + } + + tuple_classes.push(tuple_class); + } + + Method::new( + format!("get {}", name), + vec![], + Some(ts::TypeExpr::Raw(return_type)), + body, + ) + } + + /// Generate a getter for a tuple component. + fn generate_tuple_component_getter( + &self, + param_type: &ParamType, + name: &str, + index: usize, + parent_class: &str, + tuple_classes: &mut Vec, + ) -> Method { + if self.contains_tuple_type(param_type) { + self.generate_tuple_getter( + param_type, + name, + index, + parent_class, + "tuple", + "", + tuple_classes, + ) + } else { + let asc_type = self.asc_type_for_ethereum(param_type); + let access = format!("this[{}]", index); + let conversion = self.ethereum_to_asc(&access, param_type, None); + Method::new( + format!("get {}", name), + vec![], + Some(ts::TypeExpr::Raw(asc_type)), + format!("return {}", conversion), + ) + } + } + + /// Generate methods for a callable function. + fn generate_function_methods( + &self, + func: &Function, + alias: &str, + ) -> (Method, Method, Vec) { + let mut result_classes = Vec::new(); + let fn_signature = self.function_signature(func); + let contract_name = &self.name; + let tuple_result_parent_type = [contract_name, "__", alias, "Result"].concat(); + let tuple_input_parent_type = [contract_name, "__", alias, "Input"].concat(); + + // Disambiguate outputs + let outputs = self.disambiguate_params_from_func_inputs(&func.outputs, "value"); + + // Determine return type + let (return_type, simple_return_type) = if outputs.len() > 1 { + // Multiple outputs - create a result struct + let result_class = self.generate_result_class( + &outputs, + &tuple_result_parent_type, + &mut result_classes, + ); + result_classes.push(result_class.clone()); + (result_class.name.clone(), false) + } else if !outputs.is_empty() { + let (param, _) = &outputs[0]; + if self.contains_tuple_type(¶m.kind) { + let tuple_name = self.generate_tuple_return_type( + ¶m.kind, + 0, + &tuple_result_parent_type, + &mut result_classes, + ); + (tuple_name, true) + } else { + (self.asc_type_for_ethereum(¶m.kind), true) + } + } else { + ("void".to_string(), true) + }; + + // Disambiguate inputs + let inputs = self.disambiguate_params_from_func_inputs(&func.inputs, "param"); + + // Generate tuple types for inputs + for (index, (param, _)) in inputs.iter().enumerate() { + if self.contains_tuple_type(¶m.kind) { + self.generate_tuple_class_for_input( + ¶m.kind, + index, + &tuple_input_parent_type, + &mut result_classes, + ); + } + } + + // Build params + let params: Vec = inputs + .iter() + .enumerate() + .map(|(index, (param, name))| { + let param_type = + self.get_param_type_for_input(¶m.kind, index, &tuple_input_parent_type); + TsParam::new(name.clone(), ts::TypeExpr::Raw(param_type)) + }) + .collect(); + + // Build call arguments + let call_args: Vec = inputs + .iter() + .map(|(param, name)| self.ethereum_from_asc(name, ¶m.kind)) + .collect(); + + let func_name = &func.name; + let call_args_str = call_args.join(", "); + let super_inputs = format!("'{}', '{}', [{}]", func_name, fn_signature, call_args_str); + + // Generate method body + let method_body = self.generate_call_body( + &outputs, + &return_type, + simple_return_type, + &super_inputs, + &tuple_result_parent_type, + false, + ); + + let try_method_body = self.generate_call_body( + &outputs, + &return_type, + simple_return_type, + &super_inputs, + &tuple_result_parent_type, + true, + ); + + let method = Method::new( + alias.to_string(), + params.clone(), + Some(ts::TypeExpr::Raw(return_type.clone())), + method_body, + ); + + let try_method = Method::new( + format!("try_{}", alias), + params, + Some(ts::TypeExpr::Raw(format!( + "ethereum.CallResult<{}>", + return_type + ))), + try_method_body, + ); + + (method, try_method, result_classes) + } + + /// Generate call method body. + fn generate_call_body( + &self, + outputs: &[(&Param, String)], + return_type: &str, + simple_return_type: bool, + super_inputs: &str, + tuple_result_parent_type: &str, + is_try: bool, + ) -> String { + let nl = "\n"; + let (call_stmt, result_var) = if is_try { + let mut lines = Vec::new(); + lines.push(format!("let result = super.tryCall({})", super_inputs)); + lines.push(" if (result.reverted) {".to_string()); + lines.push(" return new ethereum.CallResult()".to_string()); + lines.push(" }".to_string()); + lines.push(" let value = result.value".to_string()); + (lines.join(nl), "value") + } else { + ( + format!("let result = super.call({})", super_inputs), + "result", + ) + }; + + let return_val = if simple_return_type { + if outputs.is_empty() { + String::new() + } else { + let (param, _) = &outputs[0]; + let tuple_name = if self.is_tuple_array_type(¶m.kind) { + Some(self.tuple_type_name(¶m.kind, 0, tuple_result_parent_type)) + } else { + None + }; + let val = self.ethereum_to_asc( + &format!("{}[0]", result_var), + ¶m.kind, + tuple_name.as_deref(), + ); + if matches!(param.kind, ParamType::Tuple(_)) { + format!("changetype<{}>({})", return_type, val) + } else { + val + } + } + } else { + let conversions: Vec = outputs + .iter() + .enumerate() + .map(|(index, (param, _))| { + let tuple_name = if self.is_tuple_array_type(¶m.kind) { + Some(self.tuple_type_name(¶m.kind, index, tuple_result_parent_type)) + } else { + None + }; + let val = self.ethereum_to_asc( + &format!("{}[{}]", result_var, index), + ¶m.kind, + tuple_name.as_deref(), + ); + if matches!(param.kind, ParamType::Tuple(_)) { + let tn = self.tuple_type_name(¶m.kind, index, tuple_result_parent_type); + format!("changetype<{}>({})", tn, val) + } else { + val + } + }) + .collect(); + let conv_str = conversions.join(", "); + format!("new {}({})", return_type, conv_str) + }; + + if is_try { + [ + &call_stmt, + nl, + " return ethereum.CallResult.fromValue(", + &return_val, + ")", + ] + .concat() + } else if outputs.is_empty() { + call_stmt + } else { + [&call_stmt, nl, nl, " return (", &return_val, ")"].concat() + } + } + + /// Generate a result class for multiple outputs. + fn generate_result_class( + &self, + outputs: &[(&Param, String)], + tuple_result_parent_type: &str, + result_classes: &mut Vec, + ) -> Class { + let class_name = tuple_result_parent_type.to_string(); + let mut klass = ts::klass(&class_name).exported(); + + // Add constructor + let constructor_params: Vec = outputs + .iter() + .enumerate() + .map(|(index, (param, _))| { + let param_type = + self.get_param_type_for_input(¶m.kind, index, tuple_result_parent_type); + TsParam::new(format!("value{}", index), ts::TypeExpr::Raw(param_type)) + }) + .collect(); + + let nl = "\n"; + let constructor_body: Vec = outputs + .iter() + .enumerate() + .map(|(index, _)| format!("this.value{} = value{}", index, index)) + .collect(); + + klass.add_method(Method::new( + "constructor", + constructor_params, + None, + constructor_body.join(&format!("{} ", nl)), + )); + + // Add toMap method + let map_entries: Vec = outputs + .iter() + .enumerate() + .map(|(index, (param, _))| { + let this_val = format!("this.value{}", index); + let from_asc = self.ethereum_from_asc(&this_val, ¶m.kind); + format!("map.set('value{}', {})", index, from_asc) + }) + .collect(); + + let map_body = [ + "let map = new TypedMap()", + nl, + " ", + &map_entries.join(&format!("{} ", nl)), + nl, + " return map", + ] + .concat(); + + klass.add_method(Method::new( + "toMap", + vec![], + Some(ts::TypeExpr::Raw( + "TypedMap".to_string(), + )), + map_body, + )); + + // Add members + for (index, (param, _)) in outputs.iter().enumerate() { + let param_type = + self.get_param_type_for_input(¶m.kind, index, tuple_result_parent_type); + klass.add_member(ClassMember::new(format!("value{}", index), param_type)); + } + + // Add getters for named outputs + for (index, (param, _)) in outputs.iter().enumerate() { + if !param.name.is_empty() { + let cap = capitalize(¶m.name); + let getter_name = format!("get{}", cap); + let param_type = + self.get_param_type_for_input(¶m.kind, index, tuple_result_parent_type); + klass.add_method(Method::new( + getter_name, + vec![], + Some(ts::TypeExpr::Raw(param_type)), + format!("return this.value{}", index), + )); + } + } + + // Generate tuple classes for outputs + for (index, (param, _)) in outputs.iter().enumerate() { + if self.contains_tuple_type(¶m.kind) { + self.generate_tuple_class_for_input( + ¶m.kind, + index, + tuple_result_parent_type, + result_classes, + ); + } + } + + klass + } + + /// Generate tuple return type name and classes. + fn generate_tuple_return_type( + &self, + param_type: &ParamType, + index: usize, + parent_type: &str, + result_classes: &mut Vec, + ) -> String { + self.generate_tuple_class_for_input(param_type, index, parent_type, result_classes); + let tuple_name = self.tuple_type_name(param_type, index, parent_type); + if self.is_tuple_array_type(param_type) { + format!("Array<{}>", tuple_name) + } else if self.is_tuple_matrix_type(param_type) { + format!("Array>", tuple_name) + } else { + tuple_name + } + } + + /// Generate tuple class for an input/output. + fn generate_tuple_class_for_input( + &self, + param_type: &ParamType, + index: usize, + parent_type: &str, + result_classes: &mut Vec, + ) { + let tuple_class_name = self.tuple_type_name(param_type, index, parent_type); + let mut tuple_class = ts::klass(&tuple_class_name) + .exported() + .extends("ethereum.Tuple"); + + if let Some(components) = self.get_tuple_components(param_type) { + let component_params = self.disambiguate_tuple_components(components); + for (idx, (component, component_name)) in component_params.iter().enumerate() { + let getter = if self.contains_tuple_type(component) { + // Recursively generate tuple classes + let cap = capitalize(&format!("{}", index)); + let nested_parent = format!("{}Value{}", parent_type, cap); + self.generate_tuple_class_for_input( + component, + idx, + &nested_parent, + result_classes, + ); + let nested_tuple_name = self.tuple_type_name(component, idx, &nested_parent); + let access = format!("this[{}]", idx); + let conversion = + self.ethereum_to_asc(&access, component, Some(&nested_tuple_name)); + let return_type = if self.is_tuple_array_type(component) { + format!("Array<{}>", nested_tuple_name) + } else { + nested_tuple_name.clone() + }; + let body = if matches!(component, ParamType::Tuple(_)) { + format!("return changetype<{}>({})", nested_tuple_name, conversion) + } else { + format!("return {}", conversion) + }; + Method::new( + format!("get {}", component_name), + vec![], + Some(ts::TypeExpr::Raw(return_type)), + body, + ) + } else { + let asc_type = self.asc_type_for_ethereum(component); + let access = format!("this[{}]", idx); + let conversion = self.ethereum_to_asc(&access, component, None); + Method::new( + format!("get {}", component_name), + vec![], + Some(ts::TypeExpr::Raw(asc_type)), + format!("return {}", conversion), + ) + }; + tuple_class.add_method(getter); + } + } + + result_classes.push(tuple_class); + } + + /// Get tuple type name for a param. + fn tuple_type_name(&self, _param_type: &ParamType, index: usize, parent_type: &str) -> String { + format!("{}Value{}Struct", parent_type, index) + } + + /// Get the param type string for an input, handling tuples. + fn get_param_type_for_input( + &self, + param_type: &ParamType, + index: usize, + parent_type: &str, + ) -> String { + if matches!(param_type, ParamType::Tuple(_)) { + self.tuple_type_name(param_type, index, parent_type) + } else if self.is_tuple_matrix_type(param_type) { + let tn = self.tuple_type_name(param_type, index, parent_type); + format!("Array>", tn) + } else if self.is_tuple_array_type(param_type) { + let tn = self.tuple_type_name(param_type, index, parent_type); + format!("Array<{}>", tn) + } else { + self.asc_type_for_ethereum(param_type) + } + } + + /// Get callable functions (view, pure, nonpayable, constant with outputs). + fn get_callable_functions(&self) -> Vec<&Function> { + self.contract + .functions() + .filter(|f| { + !f.outputs.is_empty() + && matches!( + f.state_mutability, + StateMutability::View | StateMutability::Pure | StateMutability::NonPayable + ) + }) + .collect() + } + + /// Get functions that can be used as calls. + fn get_call_functions(&self) -> Vec<&Function> { + self.contract.functions().collect() + } + + /// Disambiguate events with duplicate names. + fn disambiguate_events(&self) -> Vec<(&Event, String)> { + let mut result = Vec::new(); + let mut collision_counter: HashMap = HashMap::new(); + + for event in self.contract.events() { + let name = handle_reserved_word(&event.name); + let counter = collision_counter.entry(name.clone()).or_insert(0); + let alias = if *counter == 0 { + name.clone() + } else { + format!("{}{}", name, counter) + }; + *counter += 1; + result.push((event, alias)); + } + + result + } + + /// Disambiguate functions. + fn disambiguate_functions<'a>( + &self, + functions: &[&'a Function], + ) -> Vec<(&'a Function, String)> { + let mut result = Vec::new(); + let mut collision_counter: HashMap = HashMap::new(); + + for func in functions { + let name = handle_reserved_word(&func.name); + let counter = collision_counter.entry(name.clone()).or_insert(0); + let alias = if *counter == 0 { + name.clone() + } else { + format!("{}{}", name, counter) + }; + *counter += 1; + result.push((*func, alias)); + } + + result + } + + /// Disambiguate call functions. + fn disambiguate_call_functions<'a>( + &self, + functions: &[&'a Function], + ) -> Vec<(&'a Function, String)> { + let mut result = Vec::new(); + let mut collision_counter: HashMap = HashMap::new(); + + for func in functions { + let name = if func.name.is_empty() { + "default".to_string() + } else { + handle_reserved_word(&func.name) + }; + let counter = collision_counter.entry(name.clone()).or_insert(0); + let alias = if *counter == 0 { + name.clone() + } else { + format!("{}{}", name, counter) + }; + *counter += 1; + result.push((*func, alias)); + } + + result + } + + /// Disambiguate event params. + fn disambiguate_params<'a>( + &self, + params: &'a [EventParam], + default_prefix: &str, + ) -> Vec<(&'a EventParam, String)> { + let mut result = Vec::new(); + let mut collision_counter: HashMap = HashMap::new(); + + for (index, param) in params.iter().enumerate() { + let name = if param.name.is_empty() { + format!("{}{}", default_prefix, index) + } else { + handle_reserved_word(¶m.name) + }; + let counter = collision_counter.entry(name.clone()).or_insert(0); + let disambiguated = if *counter == 0 { + name.clone() + } else { + format!("{}{}", name, counter) + }; + *counter += 1; + result.push((param, disambiguated)); + } + + result + } + + /// Disambiguate function params. + fn disambiguate_params_from_func_inputs<'a>( + &self, + params: &'a [Param], + default_prefix: &str, + ) -> Vec<(&'a Param, String)> { + let mut result = Vec::new(); + let mut collision_counter: HashMap = HashMap::new(); + + for (index, param) in params.iter().enumerate() { + let name = if param.name.is_empty() { + format!("{}{}", default_prefix, index) + } else { + handle_reserved_word(¶m.name) + }; + let counter = collision_counter.entry(name.clone()).or_insert(0); + let disambiguated = if *counter == 0 { + name.clone() + } else { + format!("{}{}", name, counter) + }; + *counter += 1; + result.push((param, disambiguated)); + } + + result + } + + /// Disambiguate tuple components. + fn disambiguate_tuple_components<'a>( + &self, + components: &'a [ParamType], + ) -> Vec<(&'a ParamType, String)> { + components + .iter() + .enumerate() + .map(|(index, component)| (component, format!("value{}", index))) + .collect() + } + + /// Get function signature. + fn function_signature(&self, func: &Function) -> String { + let param_types: Vec = func.inputs.iter().map(|p| p.kind.to_string()).collect(); + let name = &func.name; + let types = param_types.join(","); + format!("{}({})", name, types) + } + + /// Get AssemblyScript type for an Ethereum type. + fn asc_type_for_ethereum(&self, param_type: &ParamType) -> String { + match param_type { + ParamType::Address => "Address".to_string(), + ParamType::Bool => "boolean".to_string(), + ParamType::Bytes => "Bytes".to_string(), + ParamType::FixedBytes(_) => "Bytes".to_string(), + ParamType::Int(bits) => { + if *bits <= 32 { + "i32".to_string() + } else { + "BigInt".to_string() + } + } + ParamType::Uint(bits) => { + if *bits <= 24 { + "i32".to_string() + } else { + "BigInt".to_string() + } + } + ParamType::String => "string".to_string(), + ParamType::Array(inner) => { + let inner_type = self.asc_type_for_ethereum(inner); + format!("Array<{}>", inner_type) + } + ParamType::FixedArray(inner, _) => { + let inner_type = self.asc_type_for_ethereum(inner); + format!("Array<{}>", inner_type) + } + ParamType::Tuple(_) => "ethereum.Tuple".to_string(), + } + } + + /// Convert ethereum value to AssemblyScript. + fn ethereum_to_asc( + &self, + code: &str, + param_type: &ParamType, + tuple_type: Option<&str>, + ) -> String { + match param_type { + ParamType::Address => format!("{}.toAddress()", code), + ParamType::Bool => format!("{}.toBoolean()", code), + ParamType::Bytes | ParamType::FixedBytes(_) => format!("{}.toBytes()", code), + ParamType::Int(bits) => { + if *bits <= 32 { + format!("{}.toI32()", code) + } else { + format!("{}.toBigInt()", code) + } + } + ParamType::Uint(bits) => { + if *bits <= 24 { + format!("{}.toI32()", code) + } else { + format!("{}.toBigInt()", code) + } + } + ParamType::String => format!("{}.toString()", code), + ParamType::Array(inner) | ParamType::FixedArray(inner, _) => match inner.as_ref() { + ParamType::Address => format!("{}.toAddressArray()", code), + ParamType::Bool => format!("{}.toBooleanArray()", code), + ParamType::Bytes | ParamType::FixedBytes(_) => { + format!("{}.toBytesArray()", code) + } + ParamType::Int(bits) => { + if *bits <= 32 { + format!("{}.toI32Array()", code) + } else { + format!("{}.toBigIntArray()", code) + } + } + ParamType::Uint(bits) => { + if *bits <= 24 { + format!("{}.toI32Array()", code) + } else { + format!("{}.toBigIntArray()", code) + } + } + ParamType::String => format!("{}.toStringArray()", code), + ParamType::Tuple(_) => { + if let Some(tuple_name) = tuple_type { + format!("{}.toTupleArray<{}>()", code, tuple_name) + } else { + format!("{}.toTupleArray()", code) + } + } + ParamType::Array(inner2) | ParamType::FixedArray(inner2, _) => { + self.ethereum_to_asc_matrix(code, inner2.as_ref(), tuple_type) + } + }, + ParamType::Tuple(_) => format!("{}.toTuple()", code), + } + } + + /// Convert matrix type to AssemblyScript. + fn ethereum_to_asc_matrix( + &self, + code: &str, + inner_type: &ParamType, + tuple_type: Option<&str>, + ) -> String { + match inner_type { + ParamType::Address => format!("{}.toAddressMatrix()", code), + ParamType::Bool => format!("{}.toBooleanMatrix()", code), + ParamType::Bytes | ParamType::FixedBytes(_) => format!("{}.toBytesMatrix()", code), + ParamType::Int(bits) => { + if *bits <= 32 { + format!("{}.toI32Matrix()", code) + } else { + format!("{}.toBigIntMatrix()", code) + } + } + ParamType::Uint(bits) => { + if *bits <= 24 { + format!("{}.toI32Matrix()", code) + } else { + format!("{}.toBigIntMatrix()", code) + } + } + ParamType::String => format!("{}.toStringMatrix()", code), + ParamType::Tuple(_) => { + if let Some(tuple_name) = tuple_type { + format!("{}.toTupleMatrix<{}>()", code, tuple_name) + } else { + format!("{}.toTupleMatrix()", code) + } + } + _ => format!("{}.toStringMatrix()", code), // fallback + } + } + + /// Convert AssemblyScript value to ethereum value. + fn ethereum_from_asc(&self, code: &str, param_type: &ParamType) -> String { + match param_type { + ParamType::Address => format!("ethereum.Value.fromAddress({})", code), + ParamType::Bool => format!("ethereum.Value.fromBoolean({})", code), + ParamType::Bytes => format!("ethereum.Value.fromBytes({})", code), + ParamType::FixedBytes(_) => format!("ethereum.Value.fromFixedBytes({})", code), + ParamType::Int(bits) => { + if *bits <= 32 { + format!("ethereum.Value.fromI32({})", code) + } else { + format!("ethereum.Value.fromSignedBigInt({})", code) + } + } + ParamType::Uint(bits) => { + if *bits <= 24 { + format!( + "ethereum.Value.fromUnsignedBigInt(BigInt.fromI32({}))", + code + ) + } else { + format!("ethereum.Value.fromUnsignedBigInt({})", code) + } + } + ParamType::String => format!("ethereum.Value.fromString({})", code), + ParamType::Array(inner) | ParamType::FixedArray(inner, _) => { + self.ethereum_from_asc_array(code, inner.as_ref()) + } + ParamType::Tuple(_) => format!("ethereum.Value.fromTuple({})", code), + } + } + + /// Convert array to ethereum value. + fn ethereum_from_asc_array(&self, code: &str, inner_type: &ParamType) -> String { + match inner_type { + ParamType::Address => format!("ethereum.Value.fromAddressArray({})", code), + ParamType::Bool => format!("ethereum.Value.fromBooleanArray({})", code), + ParamType::Bytes => format!("ethereum.Value.fromBytesArray({})", code), + ParamType::FixedBytes(_) => format!("ethereum.Value.fromFixedBytesArray({})", code), + ParamType::Int(bits) => { + if *bits <= 32 { + format!("ethereum.Value.fromI32Array({})", code) + } else { + format!("ethereum.Value.fromSignedBigIntArray({})", code) + } + } + ParamType::Uint(bits) => { + if *bits <= 24 { + format!("ethereum.Value.fromI32Array({})", code) + } else { + format!("ethereum.Value.fromUnsignedBigIntArray({})", code) + } + } + ParamType::String => format!("ethereum.Value.fromStringArray({})", code), + ParamType::Tuple(_) => format!("ethereum.Value.fromTupleArray({})", code), + ParamType::Array(inner2) | ParamType::FixedArray(inner2, _) => { + self.ethereum_from_asc_matrix(code, inner2.as_ref()) + } + } + } + + /// Convert matrix to ethereum value. + fn ethereum_from_asc_matrix(&self, code: &str, inner_type: &ParamType) -> String { + match inner_type { + ParamType::Address => format!("ethereum.Value.fromAddressMatrix({})", code), + ParamType::Bool => format!("ethereum.Value.fromBooleanMatrix({})", code), + ParamType::Bytes => format!("ethereum.Value.fromBytesMatrix({})", code), + ParamType::FixedBytes(_) => format!("ethereum.Value.fromFixedBytesMatrix({})", code), + ParamType::Int(bits) => { + if *bits <= 32 { + format!("ethereum.Value.fromI32Matrix({})", code) + } else { + format!("ethereum.Value.fromSignedBigIntMatrix({})", code) + } + } + ParamType::Uint(bits) => { + if *bits <= 24 { + format!("ethereum.Value.fromI32Matrix({})", code) + } else { + format!("ethereum.Value.fromUnsignedBigIntMatrix({})", code) + } + } + ParamType::String => format!("ethereum.Value.fromStringMatrix({})", code), + ParamType::Tuple(_) => format!("ethereum.Value.fromTupleMatrix({})", code), + _ => format!("ethereum.Value.fromStringMatrix({})", code), // fallback + } + } + + /// Check if param type contains a tuple. + fn contains_tuple_type(&self, param_type: &ParamType) -> bool { + match param_type { + ParamType::Tuple(_) => true, + ParamType::Array(inner) | ParamType::FixedArray(inner, _) => { + self.contains_tuple_type(inner) + } + _ => false, + } + } + + /// Check if param type is a tuple array. + fn is_tuple_array_type(&self, param_type: &ParamType) -> bool { + matches!( + param_type, + ParamType::Array(inner) | ParamType::FixedArray(inner, _) + if matches!(inner.as_ref(), ParamType::Tuple(_)) + ) + } + + /// Check if param type is a tuple matrix (2D array). + fn is_tuple_matrix_type(&self, param_type: &ParamType) -> bool { + match param_type { + ParamType::Array(inner) | ParamType::FixedArray(inner, _) => { + self.is_tuple_array_type(inner) + } + _ => false, + } + } + + /// Get tuple components. + fn get_tuple_components<'a>(&self, param_type: &'a ParamType) -> Option<&'a [ParamType]> { + match param_type { + ParamType::Tuple(components) => Some(components), + ParamType::Array(inner) | ParamType::FixedArray(inner, _) => { + self.get_tuple_components(inner) + } + _ => None, + } + } + + /// Handle indexed input type conversion. + fn indexed_input_type(&self, param_type: &ParamType) -> ParamType { + // Strings, bytes, and arrays are encoded and hashed to bytes32 + match param_type { + ParamType::String | ParamType::Bytes | ParamType::Tuple(_) => ParamType::FixedBytes(32), + ParamType::Array(_) | ParamType::FixedArray(_, _) => ParamType::FixedBytes(32), + _ => param_type.clone(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn parse_abi(json: &str) -> Contract { + serde_json::from_str(json).unwrap() + } + + #[test] + fn test_simple_event() { + let abi_json = r#"[ + { + "type": "event", + "name": "Transfer", + "inputs": [ + {"name": "from", "type": "address", "indexed": true}, + {"name": "to", "type": "address", "indexed": true}, + {"name": "value", "type": "uint256", "indexed": false} + ], + "anonymous": false + } + ]"#; + + let contract = parse_abi(abi_json); + let gen = AbiCodeGenerator::new(contract, "Token"); + let types = gen.generate_types(); + + assert!(types.iter().any(|c| c.name == "Transfer")); + assert!(types.iter().any(|c| c.name == "Transfer__Params")); + } + + #[test] + fn test_function_with_outputs() { + let abi_json = r#"[ + { + "type": "function", + "name": "balanceOf", + "inputs": [{"name": "owner", "type": "address"}], + "outputs": [{"name": "", "type": "uint256"}], + "stateMutability": "view" + } + ]"#; + + let contract = parse_abi(abi_json); + let gen = AbiCodeGenerator::new(contract, "Token"); + let types = gen.generate_types(); + + assert!(types.iter().any(|c| c.name == "Token")); + let token_class = types.iter().find(|c| c.name == "Token").unwrap(); + + assert!(token_class.methods.iter().any(|m| m.name == "balanceOf")); + assert!(token_class + .methods + .iter() + .any(|m| m.name == "try_balanceOf")); + } + + #[test] + fn test_asc_type_for_ethereum() { + let gen = AbiCodeGenerator::new(Contract::default(), "Test"); + + assert_eq!(gen.asc_type_for_ethereum(&ParamType::Address), "Address"); + assert_eq!(gen.asc_type_for_ethereum(&ParamType::Bool), "boolean"); + assert_eq!(gen.asc_type_for_ethereum(&ParamType::Uint(256)), "BigInt"); + assert_eq!(gen.asc_type_for_ethereum(&ParamType::Uint(8)), "i32"); + assert_eq!(gen.asc_type_for_ethereum(&ParamType::Int(32)), "i32"); + assert_eq!(gen.asc_type_for_ethereum(&ParamType::String), "string"); + assert_eq!(gen.asc_type_for_ethereum(&ParamType::Bytes), "Bytes"); + } + + #[test] + fn test_name_sanitization() { + let gen = AbiCodeGenerator::new(Contract::default(), "Test!Contract@Name"); + assert_eq!(gen.name, "Test_Contract_Name"); + } + + #[test] + fn test_indexed_input_type() { + let gen = AbiCodeGenerator::new(Contract::default(), "Test"); + + assert_eq!( + gen.indexed_input_type(&ParamType::String), + ParamType::FixedBytes(32) + ); + assert_eq!( + gen.indexed_input_type(&ParamType::Bytes), + ParamType::FixedBytes(32) + ); + assert_eq!( + gen.indexed_input_type(&ParamType::Array(Box::new(ParamType::Uint(256)))), + ParamType::FixedBytes(32) + ); + assert_eq!( + gen.indexed_input_type(&ParamType::Address), + ParamType::Address + ); + assert_eq!( + gen.indexed_input_type(&ParamType::Uint(256)), + ParamType::Uint(256) + ); + } +} diff --git a/gnd/src/codegen/mod.rs b/gnd/src/codegen/mod.rs index a014e83304e..1f1951eb817 100644 --- a/gnd/src/codegen/mod.rs +++ b/gnd/src/codegen/mod.rs @@ -5,10 +5,12 @@ //! - Contract ABIs (event and call bindings) //! - Data source templates +mod abi; mod schema; mod types; mod typescript; +pub use abi::AbiCodeGenerator; pub use schema::SchemaCodeGenerator; pub use typescript::{ ArrayType, Class, ClassMember, Method, ModuleImports, NamedType, NullableType, Param, From e847350e7cf446908c49dce1236162206f328912 Mon Sep 17 00:00:00 2001 From: David Lutterkort Date: Sun, 18 Jan 2026 21:09:38 -0800 Subject: [PATCH 14/60] gnd: Add template code generator for dynamic data sources Implements codegen/template.rs that generates AssemblyScript classes for subgraph data source templates: - Template class extending DataSourceTemplate - Static create() method for creating new data sources - Static createWithContext() method with context parameter - Support for Ethereum (Address param) and file (cid param) templates Co-Authored-By: Claude Opus 4.5 --- gnd/src/codegen/mod.rs | 2 + gnd/src/codegen/template.rs | 285 ++++++++++++++++++++++++++++++++++++ 2 files changed, 287 insertions(+) create mode 100644 gnd/src/codegen/template.rs diff --git a/gnd/src/codegen/mod.rs b/gnd/src/codegen/mod.rs index 1f1951eb817..3b300e5eb94 100644 --- a/gnd/src/codegen/mod.rs +++ b/gnd/src/codegen/mod.rs @@ -7,11 +7,13 @@ mod abi; mod schema; +mod template; mod types; mod typescript; pub use abi::AbiCodeGenerator; pub use schema::SchemaCodeGenerator; +pub use template::{Template, TemplateCodeGenerator, TemplateKind}; pub use typescript::{ ArrayType, Class, ClassMember, Method, ModuleImports, NamedType, NullableType, Param, StaticMethod, GENERATED_FILE_NOTE, diff --git a/gnd/src/codegen/template.rs b/gnd/src/codegen/template.rs new file mode 100644 index 00000000000..d067ea928c2 --- /dev/null +++ b/gnd/src/codegen/template.rs @@ -0,0 +1,285 @@ +//! Data source template code generation. +//! +//! Generates AssemblyScript classes for subgraph templates that allow +//! dynamic data source creation at runtime. + +use super::typescript::{self as ts, Class, ModuleImports, Param, StaticMethod}; + +/// The kind of a data source template. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum TemplateKind { + /// Ethereum contract template + Ethereum, + /// IPFS file template + FileIpfs, + /// Arweave file template + FileArweave, +} + +impl TemplateKind { + /// Parse a template kind from a string (e.g., "ethereum/contract", "file/ipfs"). + pub fn from_str_kind(kind: &str) -> Option { + match kind { + "ethereum/contract" | "ethereum" => Some(TemplateKind::Ethereum), + "file/ipfs" => Some(TemplateKind::FileIpfs), + "file/arweave" => Some(TemplateKind::FileArweave), + _ => None, + } + } +} + +/// A data source template from the subgraph manifest. +pub struct Template { + /// The name of the template. + pub name: String, + /// The kind of template. + pub kind: TemplateKind, +} + +impl Template { + /// Create a new template. + pub fn new(name: impl Into, kind: TemplateKind) -> Self { + Self { + name: name.into(), + kind, + } + } +} + +const GRAPH_TS_MODULE: &str = "@graphprotocol/graph-ts"; + +/// Template code generator. +pub struct TemplateCodeGenerator { + templates: Vec