diff --git a/.cargo/config.toml b/.cargo/config.toml index abb445a64c5c..452a53d2fe39 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -10,7 +10,7 @@ incremental = true # TODO(aatifsyed): remove - this can be pushed out to readme # In all cases, pass --cfg=tokio_unstable for tokio console integration # See (https://github.com/ChainSafe/forest/pull/2245) -# Note that this may be overriden by user configuration at ~/.cargo/config.toml +# Note that this may be overridden by user configuration at ~/.cargo/config.toml rustflags = ["--cfg=tokio_unstable"] [net] diff --git a/.config/forest.dic b/.config/forest.dic index e503ebf36b94..8767a6198899 100644 --- a/.config/forest.dic +++ b/.config/forest.dic @@ -1,83 +1,106 @@ -73 +237 Algorand/M -API/M APIs -args -arities arity -async attoFIL +autogenerated +automagically +backend +backfilling benchmark/GD +benchmarking bitfield bitswap BLAKE2b blockchain/M blockstore/SM BLS +bootstrapper/SM butterflynet calibnet calldata callee canonicalization CAR/SM -CARv1/SM -CARv2/SM CBOR -CID/SM -CIDs ChainSafe/M +changelog +CID/SM CLI -Cloudflare clonable +Cloudflare codebase codec cron crypto -CurrentEpoch +Curio daemon daemonize Datacap -devnet -DB/S deserialize/D destructuring +devnet +DevOps +Devs +DHT +Diataxis +Diátaxis +DigitalOcean DNS -durations +Dockerfile/SM +Docusaurus +draggable +Drand +Drilldown/SM +Drilldowns +eg EIP -EIP155 -enum -Enum -EOF -Ethereum +entrypoint/SM eth -exa +Ethereum EVM -f4 -F3 FFI FIL Filecoin/M +FilecoinEC Filops +FilOz FIP +FIPs +followable +fortnightly FVM GC GiB +GitHub +Glif +Grafana +GraphQL HAMT -hasher +hardcoded healthcheck -implementor/SM +hotfix +implementer/SM +infeasible +interop/SM +IP IPFS -ip IPLD JSON -JWT +jwt +JWTs Kademlia +keypair/SM +keystore/SM Kubernetes libp2p liveness localhost +LRU +macOS mainnet MDNS +MDX mempool Merkle MiB @@ -85,62 +108,74 @@ middleware milliGas multiaddr/SM multihash +multiline multisig -mutex -Open +namespace/SM +NVMe +onwards OpenRPC overallocation -P2P +p2p param/SM -ParityDb ParityDB parsable +peerstore/SM performant PoC pointer/SM PoSt -precommit -R2 +pre-commit +preloaded +pubsub +README +reimplemented +repo/SM RLP +RocksDB RPC schema/SM -schemas -SECP SECP256k1 seekable serializable serializer/SM +sharded skippable statediff stateful +stateroot/SM stderr stdout -struct/SM +subcommand/SM +swappiness synchronizer syscall/S +Tabs TCP testnet +theme/SM tipset/SM tipsetkey/S TOML +toolchain/SM trie -truthy -TTY +UI/SM uncompress unrepresentable untrusted URL UUID -v0 -v1 -v2 validator/S varint +vendored verifier verifreg VM/SM +VPS VRF -WebAssembly +wasm +wasmtime +webapp WebSocket -WPoStProvingPeriodDeadlines +WIP +YugabyteDB zstd diff --git a/.config/spellcheck.toml b/.config/spellcheck.toml index b7de9b332d93..1e536bf9e8c4 100644 --- a/.config/spellcheck.toml +++ b/.config/spellcheck.toml @@ -6,13 +6,51 @@ lang = "en_US" search_dirs = ["."] skip_os_lookups = true use_builtin = true -tokenization_splitchars = "\",;:.!?#(){}[]|/_-‒'`&@§¶…<>=" +tokenization_splitchars = "\",;:.!?#()[]|/_-‒'`&@§¶…<>=~" extra_dictionaries = ["forest.dic", "en_US.dic"] [hunspell.quirks] transform_regex = [ # 10.7% "^[0-9_]+(?:\\.[0-9]*)?%$", + # Template variables like {{ env.WORKFLOW_URL }} or {{ date | date('D/M/YY HH:mm') }} + "\\{\\{.*?\\}\\}", + # Domain names ending in .io (including in markdown links) + "^[A-Za-z0-9.-]+\\.io$", + "\\.io$", + # Double braces + "^\\{\\{$", + "^\\}\\}$", + # YAML frontmatter - skip underscore_words common in config + "^[a-z]+_[a-z]+$", + # Numbers with underscores like 30_000 + "^[0-9_]+$", + # Plus signs and other operators + "^[+\\-*/=]+$", + # Tilde symbol + "^~+$", + # Import statements like @theme/Tabs, TabItem + "@theme/.*", + "^TabItem$", + # Emoji sequences (literal emojis) + "^[🌲🪷🐳❤️🐋⚠️]+$", + # Network versions and test networks + "^NV[0-9X]+$", + "^[0-9]+k$", + # File extensions in names + "^[a-z_]+\\.yaml$", + "^[a-z_]+\\.yml$", + "\\.yaml$", + "\\.yml$", + # Date format parts in templates + "^YY$", + "^HH$", + "^mm$", + # File extension standalone tokens + "^yaml$", + "^yml$", + # Domain TLD standalone tokens + "^io$", ] allow_concatenation = false allow_dashes = false diff --git a/.github/CARGO_ADVISORIES_ISSUE_TEMPLATE.md b/.github/CARGO_ADVISORIES_ISSUE_TEMPLATE.md index 5da859ce91d0..5fff099ec53f 100644 --- a/.github/CARGO_ADVISORIES_ISSUE_TEMPLATE.md +++ b/.github/CARGO_ADVISORIES_ISSUE_TEMPLATE.md @@ -5,4 +5,4 @@ labels: ["Bug"] ## Description -Please [check the logs]({{ env.WORKFLOW_URL }}) for more information. +Please [check the logs]({{ `env.WORKFLOW_URL` }}) for more information. diff --git a/.github/DOCKER_ISSUE_TEMPLATE.md b/.github/DOCKER_ISSUE_TEMPLATE.md index a2943b028944..4141c34dec02 100644 --- a/.github/DOCKER_ISSUE_TEMPLATE.md +++ b/.github/DOCKER_ISSUE_TEMPLATE.md @@ -5,4 +5,4 @@ labels: ["Bug"] ## Description -Latest Docker check failed. Please [check the logs]({{ env.WORKFLOW_URL }}) for more information. +Latest Docker check failed. Please [check the logs]({{ `env.WORKFLOW_URL` }}) for more information. diff --git a/.github/INTEGRATION_TESTS_ISSUE_TEMPLATE.md b/.github/INTEGRATION_TESTS_ISSUE_TEMPLATE.md index 6588f4587dc1..55fcf4867227 100644 --- a/.github/INTEGRATION_TESTS_ISSUE_TEMPLATE.md +++ b/.github/INTEGRATION_TESTS_ISSUE_TEMPLATE.md @@ -5,4 +5,4 @@ labels: ["Bug"] ## Description -Integration tests failed. Please [check the logs]({{ env.WORKFLOW_URL }}) for more information. +Integration tests failed. Please [check the logs]({{ `env.WORKFLOW_URL` }}) for more information. diff --git a/.github/ISSUE_TEMPLATE/1-bug_report.md b/.github/ISSUE_TEMPLATE/1-bug_report.md index c85e33093950..ef614486153f 100644 --- a/.github/ISSUE_TEMPLATE/1-bug_report.md +++ b/.github/ISSUE_TEMPLATE/1-bug_report.md @@ -1,6 +1,6 @@ --- name: Bug report -about: Provide a report of unexpected behaviour +about: Provide a report of unexpected behavior title: "" labels: "Type: Bug" assignees: "" @@ -12,7 +12,7 @@ assignees: "" ## To reproduce - + 1. Go to '...' 2. Run '....' @@ -29,7 +29,7 @@ assignees: "" ``` -## Expected behaviour +## Expected behavior diff --git a/.github/ISSUE_TEMPLATE/6-network-upgrade.md b/.github/ISSUE_TEMPLATE/6-network-upgrade.md index 6025da16f37a..2b91852618af 100644 --- a/.github/ISSUE_TEMPLATE/6-network-upgrade.md +++ b/.github/ISSUE_TEMPLATE/6-network-upgrade.md @@ -5,9 +5,9 @@ title: "NVXX Tracking" labels: "Type: Epic" --- - + -# Tracking issue for the NVXX network upgrade +# Tracking issue for the `NVXX` network upgrade diff --git a/.github/RPC_PARITY_ISSUE_TEMPLATE.md b/.github/RPC_PARITY_ISSUE_TEMPLATE.md index cbff9a8bbb32..67b7a04298c8 100644 --- a/.github/RPC_PARITY_ISSUE_TEMPLATE.md +++ b/.github/RPC_PARITY_ISSUE_TEMPLATE.md @@ -5,4 +5,4 @@ labels: ["Bug"] ## Description -Latest RPC parity test failed. Please [check the logs]({{ env.WORKFLOW_URL }}) for more information. +Latest RPC parity test failed. Please [check the logs]({{ `env.WORKFLOW_URL` }}) for more information. diff --git a/.github/SNAPSHOT_PARITY_ISSUE_TEMPLATE.md b/.github/SNAPSHOT_PARITY_ISSUE_TEMPLATE.md index f1e3f59f6b66..5bdc83725dbf 100644 --- a/.github/SNAPSHOT_PARITY_ISSUE_TEMPLATE.md +++ b/.github/SNAPSHOT_PARITY_ISSUE_TEMPLATE.md @@ -5,4 +5,4 @@ labels: ["Bug"] ## Description -Latest snapshot parity test failed. Please [check the logs]({{ env.WORKFLOW_URL }}) for more information. +Latest snapshot parity test failed. Please [check the logs]({{ `env.WORKFLOW_URL` }}) for more information. diff --git a/.github/workflows/docs-check.yml b/.github/workflows/docs-check.yml index f65cb147129e..ba1aebaab262 100644 --- a/.github/workflows/docs-check.yml +++ b/.github/workflows/docs-check.yml @@ -36,9 +36,7 @@ jobs: node-version: 20 package-manager-cache: false - run: corepack enable - - run: make format-spellcheck-dictionary-check - run: yarn --immutable - run: yarn typecheck - - run: yarn spellcheck - run: yarn format-check - run: yarn build diff --git a/CHANGELOG.md b/CHANGELOG.md index bca2f1687495..13393f727a63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -127,6 +127,8 @@ Mandatory release for calibration network node operators. It includes the NV27 _ - [#6006](https://github.com/ChainSafe/forest/issues/6006) More strict checks for the address arguments in the `forest-cli` subcommands. +- [#5958](https://github.com/ChainSafe/forest/issues/5958): Extended cargo-spellcheck coverage to include .github directory for repository-wide markdown spellchecking. + - [#5897](https://github.com/ChainSafe/forest/issues/5987) Added support for the NV27 _Golden Week_ network upgrade for devnets. - [#5897](https://github.com/ChainSafe/forest/issues/5987) Added support for the NV27 _Golden Week_ network upgrade for calibration network. The upgrade epoch is set to `3_007_294` (Wed 10 Sep 23:00:00 UTC 2025). @@ -155,11 +157,11 @@ Non-mandatory release. It introduces a couple of features around snapshot genera ### Added -- [#5835](https://github.com/ChainSafe/forest/issues/5835) Add `--format` flag to the `forest-cli snapshot export` subcommand. This allows exporting a Filecoin snapshot in v2 format(FRC-0108). +- [#5835](https://github.com/ChainSafe/forest/issues/5835) Add `--format` flag to the `forest-cli snapshot export` subcommand. This allows exporting a Filecoin snapshot in `v2` format(FRC-0108). - [#5956](https://github.com/ChainSafe/forest/pull/5956) Add `forest-tool archive f3-header` subcommand for inspecting the header of a standalone F3 snapshot(FRC-0108). -- [#5835](https://github.com/ChainSafe/forest/issues/5835) Add `forest-tool archive metadata` subcommand for inspecting snapshot metadata of a Filecoin snapshot in v2 format(FRC-0108). +- [#5835](https://github.com/ChainSafe/forest/issues/5835) Add `forest-tool archive metadata` subcommand for inspecting snapshot metadata of a Filecoin snapshot in `v2` format(FRC-0108). - [#5859](https://github.com/ChainSafe/forest/pull/5859) Added size metrics for zstd frame cache and made max size configurable via `FOREST_ZSTD_FRAME_CACHE_DEFAULT_MAX_SIZE` environment variable. @@ -171,7 +173,7 @@ Non-mandatory release. It introduces a couple of features around snapshot genera - [#5946](https://github.com/ChainSafe/forest/pull/5946) Added `--verbose` to `forest-cli state compute` for printing epochs and tipset keys along with state roots. -- [#5886](https://github.com/ChainSafe/forest/issues/5886) Add `forest-tool archive merge-f3` subcommand for merging a v1 Filecoin snapshot and an F3 snapshot into a v2 Filecoin snapshot. +- [#5886](https://github.com/ChainSafe/forest/issues/5886) Add `forest-tool archive merge-f3` subcommand for merging a `v1` Filecoin snapshot and an F3 snapshot into a `v2` Filecoin snapshot. - [#4976](https://github.com/ChainSafe/forest/issues/4976) Add support for the `Filecoin.EthSubscribe` and `Filecoin.EthUnsubscribe` API methods to enable subscriptions to Ethereum event types: `heads` and `logs`. @@ -354,7 +356,7 @@ This is a mandatory release for calibnet node operators. It includes the revised ### Added -- [#5375](https://github.com/ChainSafe/forest/issues/5375) Add an RNG wrapper that that can be overriden by a reproducible seeded RNG. +- [#5375](https://github.com/ChainSafe/forest/issues/5375) Add an RNG wrapper that can be overridden by a reproducible seeded RNG. - [#5386](https://github.com/ChainSafe/forest/pull/5386) Add support for the `Filecoin.EthTraceTransaction` RPC method. @@ -709,7 +711,7 @@ improvements. Be sure to check the breaking changes before upgrading. copying them if not applicable. - [#4768](https://github.com/ChainSafe/forest/pull/4768) Moved all RPC methods - to V1 when applicabile + to V1 when applicable ### Added @@ -885,7 +887,7 @@ most notably, garbage collection fix. ### Added - [#4473](https://github.com/ChainSafe/forest/pull/4473) Add support for NV23 - _Waffle_ network upgrade (FIP-0085, FIP-0091, v14 actors). + _Waffle_ network upgrade (FIP-0085, FIP-0091, `v14` actors). - [#4352](https://github.com/ChainSafe/forest/pull/4352) Add support for the `Filecoin.StateGetClaim` RPC method. @@ -1062,7 +1064,7 @@ details. - [#4029](https://github.com/ChainSafe/forest/pull/4029) Add `forest-tool shed private-key-from-key-pair` and - `forest-tool shed key-pair-from-private-key` commands. These facilate moving + `forest-tool shed key-pair-from-private-key` commands. These facilitate moving between Forest and Lotus without losing the peer-to-peer identity. - [#4052](https://github.com/ChainSafe/forest/pull/4052) Add @@ -1703,7 +1705,7 @@ Notable updates: - [#2796](https://github.com/ChainSafe/forest/pull/2796): Fix issue when running Forest on calibnet using a configuration file only. - [#2807](https://github.com/ChainSafe/forest/pull/2807): Fix issue with v11 - actor CIDs. + actor `CIDs`. - [#2804](https://github.com/ChainSafe/forest/pull/2804): Add work around for FVM bug that caused `forest-cli sync wait` to fail. @@ -1850,7 +1852,7 @@ Notable updates: [#2404](https://github.com/ChainSafe/forest/issues/2404) - bitswap queries cancellation that do not respond after a period. [#2398](https://github.com/ChainSafe/forest/issues/2398) -- Forest daeamon crashing on sending bitswap requests. +- Forest daemon crashing on sending bitswap requests. [#2405](https://github.com/ChainSafe/forest/issues/2405) - Corrected counts displayed when using `forest-cli --chain sync wait`. [#2429](https://github.com/ChainSafe/forest/issues/2429) @@ -1942,7 +1944,7 @@ Notable updates: - Fat snapshots (snapshots that contain all transaction receipts since genesis) have been deprecated in favor of slim snapshots where receipts are downloaded on demand. -- All security advistory exceptions. Forest's dependencies are now free of known +- All security advisory exceptions. Forest's dependencies are now free of known vulnerabilities. ## Forest v0.4.1 (2022-10-04) @@ -1993,7 +1995,7 @@ Notable updates: - Resolve two security concerns by removing legacy code (RUSTSEC-2020-0071 and RUSTSEC-2021-0130). - Fixed Docker image and released it to GH container registry. -- Network selection (ie mainnet vs testnet) moved to a CLI flag rather than a +- Network selection (i.e. mainnet vs testnet) moved to a CLI flag rather than a compile-time flag. ## Forest v0.2.2 _alpha_ (2022-04-06) @@ -2198,54 +2200,54 @@ All changes: - Fix snapshot get in docs ([#1353](https://github.com/ChainSafe/forest/pull/1353)) - Fix market logic ([#1356](https://github.com/ChainSafe/forest/pull/1356)) -- V6: fix market and power actors to match go +- `V6`: fix market and power actors to match go ([#1348](https://github.com/ChainSafe/forest/pull/1348)) -- F28 fix ([#1343](https://github.com/ChainSafe/forest/pull/1343)) -- Fix: F25 ([#1342](https://github.com/ChainSafe/forest/pull/1342)) +- `F28` fix ([#1343](https://github.com/ChainSafe/forest/pull/1343)) +- Fix: `F25` ([#1342](https://github.com/ChainSafe/forest/pull/1342)) - Ci: --ignore RUSTSEC-2021-0130 ([#1350](https://github.com/ChainSafe/forest/pull/1350)) -- Drand v14 update: fix fetching around null tipsets +- Drand `v14` update: fix fetching around null tipsets ([#1339](https://github.com/ChainSafe/forest/pull/1339)) -- Fix v6 market actor bug +- Fix `v6` market actor bug ([#1341](https://github.com/ChainSafe/forest/pull/1341)) -- F27 fix ([#1328](https://github.com/ChainSafe/forest/pull/1328)) -- F17 fix ([#1324](https://github.com/ChainSafe/forest/pull/1324)) +- `F27` fix ([#1328](https://github.com/ChainSafe/forest/pull/1328)) +- `F17` fix ([#1324](https://github.com/ChainSafe/forest/pull/1324)) - Laudiacay/actors review f23 ([#1325](https://github.com/ChainSafe/forest/pull/1325)) - Fix market actor publish_storage_deals ([#1327](https://github.com/ChainSafe/forest/pull/1327)) - Remove .swp ([#1326](https://github.com/ChainSafe/forest/pull/1326)) -- F24 fix ([#1323](https://github.com/ChainSafe/forest/pull/1323)) -- F9 fix ([#1315](https://github.com/ChainSafe/forest/pull/1315)) -- F20: Fix expiration set validation order +- `F24` fix ([#1323](https://github.com/ChainSafe/forest/pull/1323)) +- `F9` fix ([#1315](https://github.com/ChainSafe/forest/pull/1315)) +- `F20`: Fix expiration set validation order ([#1322](https://github.com/ChainSafe/forest/pull/1322)) -- F13 fix ([#1313](https://github.com/ChainSafe/forest/pull/1313)) -- F21 fix ([#1311](https://github.com/ChainSafe/forest/pull/1311)) -- F11 fix ([#1312](https://github.com/ChainSafe/forest/pull/1312)) -- F15 fix ([#1314](https://github.com/ChainSafe/forest/pull/1314)) -- F18, F19 fix ([#1321](https://github.com/ChainSafe/forest/pull/1321)) -- Nv14: implement v6 actors +- `F13` fix ([#1313](https://github.com/ChainSafe/forest/pull/1313)) +- `F21` fix ([#1311](https://github.com/ChainSafe/forest/pull/1311)) +- `F11` fix ([#1312](https://github.com/ChainSafe/forest/pull/1312)) +- `F15` fix ([#1314](https://github.com/ChainSafe/forest/pull/1314)) +- `F18`, `F19` fix ([#1321](https://github.com/ChainSafe/forest/pull/1321)) +- Nv14: implement `v6` actors ([#1260](https://github.com/ChainSafe/forest/pull/1260)) - Add to troubleshooting docs ([#1282](https://github.com/ChainSafe/forest/pull/1282)) -- F12 fix ([#1290](https://github.com/ChainSafe/forest/pull/1290)) -- F1 fix ([#1293](https://github.com/ChainSafe/forest/pull/1293)) -- F16: Fix improper use of assert macro +- `F12` fix ([#1290](https://github.com/ChainSafe/forest/pull/1290)) +- `F1` fix ([#1293](https://github.com/ChainSafe/forest/pull/1293)) +- `F16`: Fix improper use of assert macro ([#1310](https://github.com/ChainSafe/forest/pull/1310)) -- F14: Fix missing continue statement +- `F14`: Fix missing continue statement ([#1309](https://github.com/ChainSafe/forest/pull/1309)) -- F10 fix ([#1308](https://github.com/ChainSafe/forest/pull/1308)) -- F7: Fix incorrect error codes +- `F10` fix ([#1308](https://github.com/ChainSafe/forest/pull/1308)) +- `F7`: Fix incorrect error codes ([#1297](https://github.com/ChainSafe/forest/pull/1297)) -- F8: Add missing decrement for miner_count +- `F8`: Add missing decrement for miner_count ([#1298](https://github.com/ChainSafe/forest/pull/1298)) -- F6: Fix incorrect error code +- `F6`: Fix incorrect error code ([#1296](https://github.com/ChainSafe/forest/pull/1296)) -- F5: Fix proposal check in market actor +- `F5`: Fix proposal check in market actor ([#1295](https://github.com/ChainSafe/forest/pull/1295)) - Remove redundant validation code and update error message to be same as in spec actors ([#1294](https://github.com/ChainSafe/forest/pull/1294)) -- F3: fix logic to be the same as in the spec actors +- `F3`: fix logic to be the same as in the spec actors ([#1292](https://github.com/ChainSafe/forest/pull/1292)) - Attempt to improve gh actions time ([#1319](https://github.com/ChainSafe/forest/pull/1319)) @@ -2253,7 +2255,7 @@ All changes: ([#1316](https://github.com/ChainSafe/forest/pull/1316)) - Ci: add gh actions workflows ([#1317](https://github.com/ChainSafe/forest/pull/1317)) -- Fix: audit issue F2 ([#1289](https://github.com/ChainSafe/forest/pull/1289)) +- Fix: audit issue `F2` ([#1289](https://github.com/ChainSafe/forest/pull/1289)) - Update codeowners ([#1306](https://github.com/ChainSafe/forest/pull/1306)) - Add Guillaume to code owners ([#1283](https://github.com/ChainSafe/forest/pull/1283)) @@ -2403,7 +2405,7 @@ All initial change sets: (Eric Tu) - `4047ff5e` 3 -> 4 ([#1153](https://github.com/ChainSafe/forest/pull/1153)) (Eric Tu) -- `446bea40` Swap to asyncronous_codec and bump futures_cbor_codec +- `446bea40` Swap to asynchronous_codec and bump futures_cbor_codec ([#1163](https://github.com/ChainSafe/forest/pull/1163)) (Eric Tu) - `e4e6711b` Encrypted keystore now defaults to enabled. Warn the user if using an unencrypted keystore. @@ -2838,7 +2840,7 @@ All initial change sets: ([#783](https://github.com/ChainSafe/forest/pull/783)) (Austin Abell) - `7743da7e` Fix projection period for faults ([#784](https://github.com/ChainSafe/forest/pull/784)) (Austin Abell) -- `fb2ca2be` Build and Api Versoining +- `fb2ca2be` Build and Api Versioning ([#752](https://github.com/ChainSafe/forest/pull/752)) (Purple Hair Rust Bard) - `aa397491` Fix get_sectors_for_winning_post and cleanup ([#781](https://github.com/ChainSafe/forest/pull/781)) (Austin Abell) @@ -3421,7 +3423,7 @@ All initial change sets: ([#229](https://github.com/ChainSafe/forest/pull/229)) (Eric Tu) - `47dfb47c` Update multibase dependency for lowercase base32 support ([#239](https://github.com/ChainSafe/forest/pull/239)) (Austin Abell) -- `39a8d88e` Allow Address network prefix to be overriden for printing +- `39a8d88e` Allow Address network prefix to be overridden for printing ([#233](https://github.com/ChainSafe/forest/pull/233)) (Austin Abell) - `faa71386` Refactor SyncManager to have ownership over tipsets ([#238](https://github.com/ChainSafe/forest/pull/238)) (Austin Abell) @@ -3533,7 +3535,7 @@ All initial change sets: ([#134](https://github.com/ChainSafe/forest/pull/134)) (Austin Abell) - `eace8d81` Storage Power Actor framework ([#129](https://github.com/ChainSafe/forest/pull/129)) (Austin Abell) -- `ede60e7b` Naive DB + Rocksdb implemenation +- `ede60e7b` Naive DB + Rocksdb implementation ([#125](https://github.com/ChainSafe/forest/pull/125)) (Gregory Markou) - `957d0529` Implement BlockHeader builder pattern ([#124](https://github.com/ChainSafe/forest/pull/124)) (Austin Abell) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b7b51c843615..38efc6c27b23 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -112,7 +112,7 @@ steps in advance to help us fix any potential bug as fast as possible. We use GitHub issues to track bugs and errors. If you run into an issue with the project: -- Open an +- Open a [bug report](https://github.com/ChainSafe/forest/issues/new?assignees=&labels=Type%3A+Bug&projects=&template=1-bug_report.md&title=). - Explain the behavior you would expect and the actual behavior. - Please provide as much context as possible and describe the _reproduction diff --git a/Makefile b/Makefile index 1bcba1f4610c..ea9eff31584c 100644 --- a/Makefile +++ b/Makefile @@ -57,6 +57,7 @@ deny: spellcheck: cargo spellcheck --code 1 || (echo "See .config/spellcheck.md for tips"; false) + find . \( -path "./documentation" -o -path "./node_modules" -o -path "./.git" -o -path "./target" \) -prune -o -name "*.md" -type f -print0 | xargs -0 -r cargo spellcheck --code 1 --cfg .config/spellcheck.toml || (echo "See .config/spellcheck.md for tips"; false) lint: license clean lint-clippy cargo fmt --all --check diff --git a/README.md b/README.md index d16c5dfd017a..5695390b13cc 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ Install [Go](https://go.dev/doc/install) sudo apt install build-essential clang ``` -### Archlinux +### Arch Linux ``` sudo pacman -S base-devel clang diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 306eeeb4f24c..000000000000 --- a/docs/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -SPELLCHECK_DICTIONARY=./dictionary.txt -format-spellcheck-dictionary: - @cat $(SPELLCHECK_DICTIONARY) | sort --ignore-case | uniq > $(SPELLCHECK_DICTIONARY).tmp - @mv $(SPELLCHECK_DICTIONARY).tmp $(SPELLCHECK_DICTIONARY) - -format-spellcheck-dictionary-check: - @cat $(SPELLCHECK_DICTIONARY) | sort --ignore-case | uniq > $(SPELLCHECK_DICTIONARY).tmp - @diff $(SPELLCHECK_DICTIONARY) $(SPELLCHECK_DICTIONARY).tmp - @rm $(SPELLCHECK_DICTIONARY).tmp diff --git a/docs/README.md b/docs/README.md index 276381bd1740..bd5e963c1fb8 100644 --- a/docs/README.md +++ b/docs/README.md @@ -44,7 +44,7 @@ yarn typecheck # Validate typescript files ### Deployment -The documentation site is continuously deployed to CloudFlare Pages, triggered on every commit to `main`. [This workflow](/.github/workflows/docs-deploy.yml) defines the deployment process. +The documentation site is continuously deployed to Cloudflare Pages, triggered on every commit to `main`. [This workflow](/.github/workflows/docs-deploy.yml) defines the deployment process. ## Site Structure diff --git a/docs/dictionary.txt b/docs/dictionary.txt deleted file mode 100644 index b8386f2902ea..000000000000 --- a/docs/dictionary.txt +++ /dev/null @@ -1,122 +0,0 @@ -2k -APIs -backend -benchmarking -blockstore -BLS -Butterflynet -Calibnet -calibnet -calibnet-related -cardinality -ChainSafe -chainsafe -ChainSafe's -changelog -CIDs -CLI -cli -Cloudflare -codebase -config -Datacap -datacap -devnet -Devops -Devs -DHT -DigitalOcean -Drand -enums -Ethereum -F3 -f3 -f3-sidecar -FFI -FIL -fil -FIL-RetroPGF -Filecoin -filecoin-project -Filfox -FilOz -FIP -FIPs -FVM -GC -GiB -Github -Grafana -hardcoded -hotfix -Hypercerts -ie. -Implementers -implementers -io -IPFS -JSON -JSON-RPC -JWT -JWTs -keypair -keystore -Kubernetes -kubernetes -libp2p -Linux -Liveness -liveness -localhost -localhost's -LRU -M1 -M2 -macOS -Mainnet -mainnet -multiaddress -namespace -NetworkEvents -NV22 -NV23 -NV24 -NVMe -onwards -orchestrator -Organisation -P2P -p2p -performant -pre-compiled -preload -preloaded -pubsub -Q4 -README -RNG -Roadmap -roadmap -RPC -rustup -S3-compatible -SecP256k1 -semver -serverless -stateroots -struct -subcommands -swappiness -SyncStateMachine -TabItem -TBD -Terraform -testnet -Tipset -tipset -tipsets -V0 -V1 -VPS -WIP -zstd diff --git a/docs/docs/developers/deep_dives/chain_follower.md b/docs/docs/developers/deep_dives/chain_follower.md index 98bd799a987c..5f85ef9387d2 100644 --- a/docs/docs/developers/deep_dives/chain_follower.md +++ b/docs/docs/developers/deep_dives/chain_follower.md @@ -11,9 +11,9 @@ The `ChainFollower` is the orchestrator of the chain synchronization process in - Managing a pool of candidate tipsets that are potential heads of the chain. - Scheduling tasks to either fetch missing parent tipsets or validate tipsets whose parents are already known. - Executing intensive validation logic to ensure the integrity of each block and its messages. -- Updating the `ChainStore` struct with newly validated tipsets, which may involve changing the node's view of the heaviest chain (the "head"). +- Updating the `ChainStore` `struct` with newly validated tipsets, which may involve changing the node's view of the heaviest chain (the "head"). -This entire process is managed by a state machine within the [chain_follower.rs](https://github.com/ChainSafe/forest/blob/main/src/chain_sync/chain_follower.rs) module, ensuring that tipsets are processed in the correct order and that the node can handle multiple competing forks simultaneously. +This entire process is managed by a state machine within the [`chain_follower.rs`](https://github.com/ChainSafe/forest/blob/main/src/chain_sync/chain_follower.rs) module, ensuring that tipsets are processed in the correct order and that the node can handle multiple competing forks simultaneously. ## Visual Workflow @@ -79,11 +79,11 @@ graph TD ## `ChainFollower` Working -The `ChainFollower` struct spawns 4 concurrent tasks to sync the chain and track its progress: +The `ChainFollower` `struct` spawns 4 concurrent tasks to sync the chain and track its progress: -1. **Forward tipsets from peers to the SyncStateMachine**: Listens for [NetworkEvents](https://github.com/ChainSafe/forest/blob/main/src/libp2p/service.rs), processes incoming blocks from gossip, fetches the `FullTipset` if necessary, and submits it to the state machine. +1. **Forward tipsets from peers to the `SyncStateMachine`**: Listens for [`NetworkEvents`](https://github.com/ChainSafe/forest/blob/main/src/libp2p/service.rs), processes incoming blocks from gossip, fetches the `FullTipset` if necessary, and submits it to the state machine. 2. **Forward tipsets from miners to the `SyncStateMachine`**: Listens on a dedicated channel for locally-produced tipsets submitted via the API. -3. **Execute `SyncStateMachine` tasks**: Manages the main event loop, taking tasks generated by the `SyncStateMachine` struct (like fetching or validating) and spawning them for execution. It also updates the node's overall sync status. +3. **Execute `SyncStateMachine` tasks**: Manages the main event loop, taking tasks generated by the `SyncStateMachine` `struct` (like fetching or validating) and spawning them for execution. It also updates the node's overall sync status. 4. **Periodically report sync progress**: Logs the current sync status at regular intervals, providing visibility into how far behind the network head the node is. ## Details of `ChainFollower` working @@ -94,23 +94,23 @@ New tipsets are introduced to the `ChainFollower` from two main sources: - **P2P Network (Gossip):** - **File:** `src/libp2p/service.rs` - - **Flow:** Forest nodes listen on the `/fil/blocks` pubsub topic. When a peer broadcasts a new block, the `Libp2pService` struct receives it in the `handle_gossip_event` function. This event is just for a single block's CID. The `ChainFollower` receives this `NetworkEvent::PubsubMessage` and realizes it needs the full block and its sibling blocks to form a `FullTipset`. It then issues a "chain exchange" request to the network using the `chain_exchange_fts` method of the `SyncNetworkContext` struct (present in `src/chain_sync/network_context.rs`). This is a direct request to a peer to provide the `FullTipset` corresponding to the block's tipset key. + - **Flow:** Forest nodes listen on the `/fil/blocks` pubsub topic. When a peer broadcasts a new block, the `Libp2pService` `struct` receives it in the `handle_gossip_event` function. This event is just for a single block's CID. The `ChainFollower` receives this `NetworkEvent::PubsubMessage` and realizes it needs the full block and its sibling blocks to form a `FullTipset`. It then issues a "chain exchange" request to the network using the `chain_exchange_fts` method of the `SyncNetworkContext` `struct` (present in `src/chain_sync/network_context.rs`). This is a direct request to a peer to provide the `FullTipset` corresponding to the block's tipset key. - **Local Miner:** - A connected miner can submit a newly created `FullTipset` directly to the `ChainFollower` through the `tipset_sender` channel field. This bypasses the network fetching step. ### The `SyncStateMachine` -Once a `FullTipset` struct is acquired, it's handed over to the `SyncStateMachine` struct. This is the core of the chain follower, managing all candidate tipsets and deciding what to do next. +Once a `FullTipset` `struct` is acquired, it's handed over to the `SyncStateMachine` `struct`. This is the core of the chain follower, managing all candidate tipsets and deciding what to do next. - **State:** The state machine maintains a `tipsets` field (a `HashMap`) of all tipsets it is currently aware of but has not yet fully validated. -- **`SyncEvent` enum:** The state machine is driven by `SyncEvent` variants: +- **`SyncEvent` `enum`:** The state machine is driven by `SyncEvent` variants: - `NewFullTipsets`: Triggered when a new tipset is discovered. The state machine adds it to its internal `tipsets` map to be processed. - `BadTipset`: Triggered when a tipset fails validation. The state machine will remove it and all its descendants from its internal map. - `ValidatedTipset`: Triggered when a tipset successfully passes validation. The state machine removes it from its map and commits it to the `ChainStore`. -- **`SyncTask` Generation:** The `tasks()` method of the `SyncStateMachine` is its heart. It iterates through the known tipsets, builds out the potential fork chains, and generates the next set of actions (`SyncTask` enums) required to make progress. +- **`SyncTask` Generation:** The `tasks()` method of the `SyncStateMachine` is its heart. It iterates through the known tipsets, builds out the potential fork chains, and generates the next set of actions (`SyncTask` `enums`) required to make progress. - If a tipsets parent is present in the `ChainStore` (meaning it's already validated), a `SyncTask::ValidateTipset` task is created. - If a tipsets parent is not in the `ChainStore`, a `SyncTask::FetchTipset` task is created for the missing parent. This recursive fetching is the important mechanism that allows Forest to sync the chain by walking backward from a given head. @@ -118,31 +118,31 @@ Once a `FullTipset` struct is acquired, it's handed over to the `SyncStateMachin When a `SyncTask::ValidateTipset` task is executed, it kicks off a comprehensive validation process defined in the `validate_block` function in `src/chain_sync/tipset_syncer.rs`. This is the most computationally intensive part of chain synchronization. For each `Block` in the `FullTipset`, the following checks are performed in parallel: -1. **Parent Tipset State Execution**: This is the most critical step. The `StateManager` struct loads the parent tipset and re-executes all of its messages to compute the final state root and message receipt root. These computed roots are compared against the `state_root` and `message_receipts` fields in the current block's header. A mismatch indicates an invalid state transition, and the block is rejected. +1. **Parent Tipset State Execution**: This is the most critical step. The `StateManager` `struct` loads the parent tipset and re-executes all of its messages to compute the final state root and message receipt root. These computed roots are compared against the `state_root` and `message_receipts` fields in the current block's header. A mismatch indicates an invalid state transition, and the block is rejected. 2. **Message Validation**: The `check_block_messages` function performs several checks: - The aggregate BLS signature for all BLS messages in the block is verified. - - The individual signature of every SecP256k1 message is verified against the sender's key. + - The individual signature of every `SecP256k1` message is verified against the sender's key. - The `nonce` (sequence number) of each message is checked against the sender's current nonce in the parent state. - The `gas_limit` of all messages is summed to ensure it does not exceed the `BLOCK_GAS_LIMIT`. - - The message root (`TxMeta` struct) is re-computed from all messages and compared to the `messages` CID in the block header. + - The message root (`TxMeta` `struct`) is re-computed from all messages and compared to the `messages` CID in the block header. 3. **Block Signature Verification**: The block header's `signature` is verified to ensure it was signed by the declared `miner_address`. -4. **Consensus Validation**: The `validate_block` method of the `FilecoinConsensus` struct is called to verify consensus-specific rules, primarily the `ElectionProof`. +4. **Consensus Validation**: The `validate_block` method of the `FilecoinConsensus` `struct` is called to verify consensus-specific rules, primarily the `ElectionProof`. ### Handling Bad Blocks When the `SyncStateMachine` receives a `SyncEvent::BadTipset` event, it takes two important actions to protect the node: -1. **Cache the Bad Block:** It adds the CID of every block in the failed tipset to the `BadBlockCache` struct. This is an LRU cache that prevents the node from wasting resources by re-fetching or re-validating a block that is already known to be invalid. (`src/chain_sync/bad_block_cache.rs`) +1. **Cache the Bad Block:** It adds the CID of every block in the failed tipset to the `BadBlockCache` `struct`. This is an LRU cache that prevents the node from wasting resources by re-fetching or re-validating a block that is already known to be invalid. (`src/chain_sync/bad_block_cache.rs`) 2. **Prune Descendants:** It traverses its internal map of tipsets and removes all known descendants of the bad tipset. Since a child of an invalid block is also invalid, this prunes entire invalid forks from the processing queue. ### Committing to the Chain If a tipset and all its blocks pass validation, a `SyncEvent::ValidatedTipset` event is sent to the `SyncStateMachine`, which triggers the final step of committing it to the local chain. (`src/chain/store/chain_store.rs`) -1. **Store the Tipset**: The `SyncStateMachine` calls the `put_tipset` method on the `ChainStore` struct. -2. **Expand the Tipset**: The `put_tipset` method first calls the `expand_tipset` method, which checks the `TipsetTracker` struct for any other valid blocks at the same epoch with the same parents. This merges them into a single, more complete tipset, making the view of the head more robust. +1. **Store the Tipset**: The `SyncStateMachine` calls the `put_tipset` method on the `ChainStore` `struct`. +2. **Expand the Tipset**: The `put_tipset` method first calls the `expand_tipset` method, which checks the `TipsetTracker` `struct` for any other valid blocks at the same epoch with the same parents. This merges them into a single, more complete tipset, making the view of the head more robust. 3. **Update the Head**: The new, expanded tipsets weight is compared to the current head's weight in the `update_heaviest` method. If it's heavier, the `set_heaviest_tipset` method of the `ChainStore` is invoked. 4. **Broadcast Head Change**: The `set_heaviest_tipset` method updates the head in the database and broadcasts a `HeadChange::Apply` event. This notification is critical, as it allows other Forest subsystems like the Message Pool and RPC API to update their own state based on the new head of the chain. diff --git a/docs/docs/developers/guides/network_upgrades.md b/docs/docs/developers/guides/network_upgrades.md index 1d042744f443..2d4a9b416cf6 100644 --- a/docs/docs/developers/guides/network_upgrades.md +++ b/docs/docs/developers/guides/network_upgrades.md @@ -13,7 +13,7 @@ The network upgrades rough schedule is published in [Filecoin Core Devs discussi ### Network upgrade scope -The network upgrades scope is published in the [Filecoin Core Devs discussions](https://github.com/filecoin-project/core-devs/discussions) (see the [NV23 scope](https://github.com/filecoin-project/core-devs/discussions/149)). The scope includes the changes in the Filecoin protocol in the form of accepted FIPs. +The network upgrades scope is published in the [Filecoin Core Devs discussions](https://github.com/filecoin-project/core-devs/discussions) (see the [`NV23` scope](https://github.com/filecoin-project/core-devs/discussions/149)). The scope includes the changes in the Filecoin protocol in the form of accepted FIPs. Some FIPs require changes in the FVM, some (most) in the Builtin Actors, and some in the Forest. Additionally, some changes require state migrations, which may not be trivial to implement and require significant computing resources. @@ -37,7 +37,7 @@ graph TD #### Skeleton with base migration -This provides the base for the state migrations and network-aware node changes. See the sample PR for NV24 [here](https://github.com/ChainSafe/forest/pull/4819). +This provides the base for the state migrations and network-aware node changes. See the sample PR for `NV24` [here](https://github.com/ChainSafe/forest/pull/4819). #### State migrations @@ -55,13 +55,13 @@ Both Forest and the `builtin-actors` repository depend on the FVM. The FVM is up #### `fil-actor-states` update and release -The `fil-actor-states` repository is updated to the latest bundle release. The changes are then released, and the Forest repository is updated to include the new release. Necessary CIDs are updated. The bundle release and the version to be used are coordinated across the Filecoin implementations. +The `fil-actor-states` repository is updated to the latest bundle release. The changes are then released, and the Forest repository is updated to include the new release. Necessary `CIDs` are updated. The bundle release and the version to be used are coordinated across the Filecoin implementations. #### Network-aware node changes Some changes are required in the Forest itself but outside migrations. These changes are usually related to the network upgrade itself, e.g., changes in the block validation rules. -#### 2k network testing +#### `2k` network testing The changes are tested locally on the devnet with a Lotus RC and a `builtin-actors` bundle RC. @@ -89,7 +89,7 @@ The steps required to update the Builtin Actors in Forest are as follows: 2. Ensure that the changes are compatible with Forest. If not, prepare a PR ahead of time to address breaking changes. 3. Cut a new release of the `fil-actor-states`. 4. Update the `fil-actor-states` dependency in Forest. -5. Update the CIDs in the [bundle module](https://github.com/ChainSafe/forest/blob/main/src/networks/actors_bundle.rs) to include bundles for any networks that you want to support. +5. Update the `CIDs` in the [bundle module](https://github.com/ChainSafe/forest/blob/main/src/networks/actors_bundle.rs) to include bundles for any networks that you want to support. 6. Update the manifest with `forest-tool state-migration generate-actors-metadata > build/manifest.json`. This will add necessary entries to the manifest. Note that if there were no Rust interface changes, e.g., a re-tag, steps 1-4 can be skipped. @@ -118,7 +118,7 @@ The most crucial part of the network upgrade is coordinating with other Filecoin We communicate the network upgrades via the following channels: -- [Forest Discussions](https://github.com/ChainSafe/forest/discussions). See the [NV23 announcement](https://github.com/ChainSafe/forest/discussions/4488) for an example. +- [Forest Discussions](https://github.com/ChainSafe/forest/discussions). See the [`NV23` announcement](https://github.com/ChainSafe/forest/discussions/4488) for an example. - The `#fil-forest-announcements` channel in the Filecoin Slack. ## Network upgrade monitoring diff --git a/docs/docs/users/getting_started/install.md b/docs/docs/users/getting_started/install.md index 09ac1184fae3..846590d3dfd3 100644 --- a/docs/docs/users/getting_started/install.md +++ b/docs/docs/users/getting_started/install.md @@ -3,8 +3,8 @@ title: Installing sidebar_position: 2 --- -import Tabs from "@theme/Tabs"; -import TabItem from "@theme/TabItem"; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; @@ -77,7 +77,7 @@ More information about Docker setup and usage can be found in the [Docker docume - Rust compiler (install via [rustup](https://rustup.rs/)) - OS `Base-Devel`/`Build-Essential` - Clang compiler -- Go for building F3 sidecar module +- Go for building `F3` sidecar module For Ubuntu, you can install the dependencies (excluding Rust) with: diff --git a/docs/docs/users/guides/gc.md b/docs/docs/users/guides/gc.md index 7000b975d9bc..a8baf920acdf 100644 --- a/docs/docs/users/guides/gc.md +++ b/docs/docs/users/guides/gc.md @@ -58,7 +58,7 @@ During the GC process, Forest consumes extra RAM and disk space temporarily: - While traversing reachable blocks, it uses 32 bytes of RAM per reachable block. - While exporting a lite snapshot, it uses extra disk space before cleaning up parity-db and stale CAR snapshots. -For a typical ~80 GiB mainnet snapshot, this results in ~2.5 GiB of additional RAM and ~80 GiB disk space usage. +For a typical 80 GiB mainnet snapshot, this results in approximately 2.5 GiB of additional RAM and 80 GiB disk space usage. ### Syncing Pauses or Performance Overheads diff --git a/docs/docs/users/guides/interacting_with_wallets.md b/docs/docs/users/guides/interacting_with_wallets.md index 4a67bdd70c65..71945930093b 100644 --- a/docs/docs/users/guides/interacting_with_wallets.md +++ b/docs/docs/users/guides/interacting_with_wallets.md @@ -9,7 +9,7 @@ The Forest client provides two types of wallets: 1. **Local wallet (only accessible by you)**: This wallet is recommended for day-to-day use due to its higher security. Since it is only accessible by you, it minimizes exposure and reduces the likelihood of compromise. -2. **Node wallet (accessible by the Forest node)**: This wallet is managed by the Forest node and is included for backward compatibility with Lotus. It’s less secure as the node may have direct access to it for network operations. This could potentially expose it to unauthorized access or other network-related vulnerabilities. +2. **Node wallet (accessible by the Forest node)**: This wallet is managed by the Forest node and is included for backward compatibility with Lotus. It is less secure as the node may have direct access to it for network operations. This could potentially expose it to unauthorized access or other network-related vulnerabilities. In the following sections, we will be using the wallet in its local mode. diff --git a/docs/docs/users/guides/monitoring/metrics.md b/docs/docs/users/guides/monitoring/metrics.md index 7f430db560e0..2e6b70a8d8ba 100644 --- a/docs/docs/users/guides/monitoring/metrics.md +++ b/docs/docs/users/guides/monitoring/metrics.md @@ -2,7 +2,7 @@ title: Metrics --- -Prometheus metrics are exposed on localhost's port `6116` by default, under `/metrics`. They are enabled by default and can be disabled with the `--no-metrics` flag. The metrics endpoint can be modified with the `--metrics-address` flag. +Prometheus metrics are exposed on `localhost`'s port `6116` by default, under `/metrics`. They are enabled by default and can be disabled with the `--no-metrics` flag. The metrics endpoint can be modified with the `--metrics-address` flag. ```bash curl localhost:6116/metrics diff --git a/docs/docs/users/guides/running_with_gateway.md b/docs/docs/users/guides/running_with_gateway.md index 8200fe05fd3e..50b2c23181b7 100644 --- a/docs/docs/users/guides/running_with_gateway.md +++ b/docs/docs/users/guides/running_with_gateway.md @@ -28,7 +28,7 @@ For more information on configuring Lotus Gateway, refer to [Lotus Gateway docum There is not much configuration needed to run Lotus Gateway with Forest. -1. You must export the `FULLNODE_API_INFO` environment variable with the Forest node admin JWT token and its multiaddress. You can find the Forest JWT token in its initialization logs and under the chosen file if you have set the `--save-token ` flag. The multiaddress is the same as the one you would use to connect to the Forest node. +1. You must export the `FULLNODE_API_INFO` environment variable with the Forest node admin JWT token and its multi-address. You can find the Forest JWT token in its initialization logs and under the chosen file if you have set the `--save-token ` flag. The multi-address is the same as the one you would use to connect to the Forest node. 2. Choose a port for the Lotus Gateway to listen on. The default is `2346`, but you can choose any port not already in use. If necessary, open the port in your firewall. 3. Ensure the Forest node is running and accessible from the Lotus Gateway. @@ -43,7 +43,7 @@ That's it! Forest should now be available only through the Lotus Gateway. You ca curl http://localhost:5432/rpc/v1 -X POST -H "Content-Type: application/json" --data '{"method":"Filecoin.ChainHead","params":[], "id":1,"jsonrpc":"2.0"}' ``` -## 🌲Forest + 🪷 Lotus Gateway + 🐳 Docker Compose = ❤️ +## 🌲 Forest + 🪷 Lotus Gateway + 🐳 Docker Compose = ❤️ If you want to run Forest and Lotus Gateway in a Docker container, you can use Docker Compose. Ensure you have [docker](https://www.docker.com/) installed. This will: diff --git a/docs/docs/users/knowledge_base/docker_tips.md b/docs/docs/users/knowledge_base/docker_tips.md index 1d61ecd94152..38294401b237 100644 --- a/docs/docs/users/knowledge_base/docker_tips.md +++ b/docs/docs/users/knowledge_base/docker_tips.md @@ -3,7 +3,7 @@ title: Docker Tips & Tricks sidebar_position: 3 --- -# Forest in Docker🌲❤️🐋 +# Forest in Docker 🌲❤️🐋 ## Prerequisites @@ -11,15 +11,15 @@ sidebar_position: 3 the following engines: - Docker Engine (Community) on Linux, - Docker for macOS - - Docker on Windows Subsystem for Linux 2(WSL2) + - Docker on Windows Subsystem for Linux 2(`WSL2`) Native images are available for the following platform/architecture(s): - `linux/arm64` - `linux/amd64` -The images will work out-of-the box on both Intel processors and macOS with -M1 / M2. +The images will work out-of-the-box on both Intel processors and macOS with +`M1` / `M2`. ## Tags diff --git a/docs/docs/users/knowledge_base/network_upgrades_state_migrations.md b/docs/docs/users/knowledge_base/network_upgrades_state_migrations.md index 3516f66f9e4d..2db1f0983df6 100644 --- a/docs/docs/users/knowledge_base/network_upgrades_state_migrations.md +++ b/docs/docs/users/knowledge_base/network_upgrades_state_migrations.md @@ -14,7 +14,7 @@ Some preparation is required for a smooth transition from one network version to State migration is part of the network upgrade. Given the size of the Filecoin state, it usually requires the node to go through every Actor, which takes several seconds. If more changes are required, the migration might take significantly more time and resources. The implementation teams announce the expected upgrade duration and requirements beforehand so that the node can be prepared accordingly. :::tip -On average, ~3-4 network upgrades are performed annually. This number varies based on the FIPs proposed and implementer capacities. This means that the node shouldn't need all the resources that state migrations require. For example, during the NV22 network upgrade, Forest required 64 GiB memory. The following update needed at most 16 GiB memory. It may make sense to upgrade the node only around specific network upgrades. +On average, ~3-4 network upgrades are performed annually. This number varies based on the FIPs proposed and implementer capacities. This means that the node shouldn't need all the resources that state migrations require. For example, during the `NV22` network upgrade, Forest required 64 GiB memory. The following update needed at most 16 GiB memory. It may make sense to upgrade the node only around specific network upgrades. ::: ## Avoiding migrations / node recovery @@ -24,7 +24,7 @@ Sometimes, it is not feasible to perform a network migration. If a node is hoste Forest snapshots are available in the [forest-archive](https://forest-archive.chainsafe.dev/list/) (they can also be used with Lotus). Latest snapshots for mainnet are offered by ChainSafe [here](https://forest-archive.chainsafe.dev/list/mainnet/latest). The link to the latest produced snapshot is [here](https://forest-archive.chainsafe.dev/latest/mainnet/). To avoid the network migration, stop it before the network upgrade and wait until a snapshot is generated **after** the upgrade. :::info example -You read that in the [Forest NV23 support announcement](https://github.com/ChainSafe/forest/discussions/4488) the mainnet is going to be upgraded to NV23 at the epoch `4154640`, which corresponds to `2024-08-06T12:00:00Z`. You stop your note at least a minute before the upgrade, so before `2024-08-06T11:59:00Z` and wait until the latest snapshot at the [forest-archive](https://forest-archive.chainsafe.dev/latest/mainnet/) is newer than the epoch `4154640`. +You read that in the [Forest `NV23` support announcement](https://github.com/ChainSafe/forest/discussions/4488) the mainnet is going to be upgraded to `NV23` at the epoch `4154640`, which corresponds to `2024-08-06T12:00:00Z`. You stop your note at least a minute before the upgrade, so before `2024-08-06T11:59:00Z` and wait until the latest snapshot at the [forest-archive](https://forest-archive.chainsafe.dev/latest/mainnet/) is newer than the epoch `4154640`. You use `curl` to check the latest snapshot. ```bash @@ -43,7 +43,7 @@ You see that the snapshot is past the upgrade epoch by ten epochs. You download aria2c -x5 https://forest-archive.chainsafe.dev/latest/mainnet/ ``` -You start your node with `--import-snapshot ` and enjoy the new, fancy NV23 features. Hooray! +You start your node with `--import-snapshot ` and enjoy the new, fancy `NV23` features. Hooray! Alternatively, if you are fine with purging the current database, you can do it and use Forest's `--auto-download-snapshot` feature after confirming that the latest snapshot is past the upgrade epoch. diff --git a/docs/docs/users/openrpc.json b/docs/docs/users/openrpc.json index 078328d8d62a..720154bd3f69 100644 --- a/docs/docs/users/openrpc.json +++ b/docs/docs/users/openrpc.json @@ -8385,7 +8385,7 @@ "required": ["key", "beacon", "epoch", "timestamp"] }, "F3TipSetKey": { - "description": "TipSetKey is the canonically ordered concatenation of the block CIDs in a tipset.", + "description": "TipSetKey is the canonically ordered concatenation of the block `CIDs` in a tipset.", "type": "string" }, "FilterID": { diff --git a/docs/docs/users/reference/cli.md b/docs/docs/users/reference/cli.md index 6ad56b4e6b1c..7e56e7e23911 100644 --- a/docs/docs/users/reference/cli.md +++ b/docs/docs/users/reference/cli.md @@ -340,7 +340,7 @@ SUBCOMMANDS: info Print node info shutdown Shutdown Forest healthcheck Print healthcheck info - f3 Manages Filecoin Fast Finality (F3) interactions + f3 Manages Filecoin Fast Finality (`F3`) interactions wait-api Wait for lotus API to come online help Print this message or the help of the given subcommand(s) @@ -416,7 +416,7 @@ Manually set the head to the given tipset. This invalidates blocks between the d Usage: forest-cli chain set-head [OPTIONS] ... Arguments: - ... Construct the new head tipset from these CIDs + ... Construct the new head tipset from these `CIDs` Options: --epoch Use the tipset from this epoch as the new head. Negative numbers specify decrements from the current head @@ -832,16 +832,16 @@ Options: ### `forest-cli f3` ``` -Manages Filecoin Fast Finality (F3) interactions +Manages Filecoin Fast Finality (`F3`) interactions Usage: forest-cli f3 Commands: - manifest Gets the current manifest used by F3 - status Checks the F3 status - certs Manages interactions with F3 finality certificates [aliases: c] - powertable Gets F3 power table at a specific instance ID or latest instance if none is specified [aliases: pt] - ready Checks if F3 is in sync + manifest Gets the current manifest used by `F3` + status Checks the `F3` status + certs Manages interactions with `F3` finality certificates [aliases: c] + powertable Gets `F3` power table at a specific instance ID or latest instance if none is specified [aliases: pt] + ready Checks if `F3` is in sync help Print this message or the help of the given subcommand(s) Options: @@ -851,7 +851,7 @@ Options: ### `forest-cli f3 manifest` ``` -Gets the current manifest used by F3 +Gets the current manifest used by `F3` Usage: forest-cli f3 manifest [OPTIONS] @@ -872,7 +872,7 @@ Options: ### `forest-cli f3 status` ``` -Checks the F3 status +Checks the `F3` status Usage: forest-cli f3 status @@ -883,13 +883,13 @@ Options: ### `forest-cli f3 certs` ``` -Manages interactions with F3 finality certificates +Manages interactions with `F3` finality certificates Usage: forest-cli f3 certs Commands: - get Gets an F3 finality certificate to a given instance ID, or the latest certificate if no instance is specified - list Lists a range of F3 finality certificates + get Gets an `F3` finality certificate to a given instance ID, or the latest certificate if no instance is specified + list Lists a range of `F3` finality certificates help Print this message or the help of the given subcommand(s) Options: @@ -899,7 +899,7 @@ Options: ### `forest-cli f3 certs get` ``` -Gets an F3 finality certificate to a given instance ID, or the latest certificate if no instance is specified +Gets an `F3` finality certificate to a given instance ID, or the latest certificate if no instance is specified Usage: forest-cli f3 certs get [OPTIONS] [INSTANCE] @@ -924,7 +924,7 @@ Options: ### `forest-cli f3 certs list` ``` -Lists a range of F3 finality certificates +Lists a range of `F3` finality certificates Usage: forest-cli f3 certs list [OPTIONS] [RANGE] @@ -957,12 +957,12 @@ Options: ### `forest-cli f3 powertable` ``` -Gets F3 power table at a specific instance ID or latest instance if none is specified +Gets `F3` power table at a specific instance ID or latest instance if none is specified Usage: forest-cli f3 powertable Commands: - get Gets F3 power table at a specific instance ID or latest instance if none is specified [aliases: g] + get Gets `F3` power table at a specific instance ID or latest instance if none is specified [aliases: g] get-proportion Gets the total proportion of power for a list of actors at a given instance [aliases: gp] help Print this message or the help of the given subcommand(s) @@ -973,7 +973,7 @@ Options: ### `forest-cli f3 powertable get` ``` -Gets F3 power table at a specific instance ID or latest instance if none is specified +Gets `F3` power table at a specific instance ID or latest instance if none is specified Usage: forest-cli f3 powertable get [OPTIONS] [INSTANCE] @@ -1004,17 +1004,17 @@ Options: ### `forest-cli f3 ready` ``` -Checks if F3 is in sync +Checks if `F3` is in sync Usage: forest-cli f3 ready [OPTIONS] Options: --wait - Wait until F3 is in sync + Wait until `F3` is in sync --threshold - The threshold of the epoch gap between chain head and F3 head within which F3 is considered in sync [default: 20] + The threshold of the epoch gap between chain head and `F3` head within which `F3` is considered in sync [default: 20] --no-progress-timeout - Exit after F3 making no progress for this duration [default: 10m] + Exit after `F3` making no progress for this duration [default: 10m] -h, --help Print help ``` @@ -2020,7 +2020,7 @@ Miscellaneous, semver-exempt commands for developer use Usage: forest-tool shed Commands: - summarize-tipsets Enumerate the tipset CIDs for a span of epochs starting at `height` and working backwards + summarize-tipsets Enumerate the tipset `CIDs` for a span of epochs starting at `height` and working backwards peer-id-from-key-pair Generate a `PeerId` from the given key-pair file private-key-from-key-pair Generate a base64-encoded private key from the given key-pair file. This effectively transforms Forest's key-pair file into a Lotus-compatible private key key-pair-from-private-key Generate a key-pair file from the given base64-encoded private key. This effectively transforms Lotus's private key into a Forest-compatible key-pair file. If `output` is not provided, the key-pair is printed to stdout as a base64-encoded string @@ -2035,7 +2035,7 @@ Options: ### `forest-tool shed summarize-tipsets` ``` -Enumerate the tipset CIDs for a span of epochs starting at `height` and working backwards. +Enumerate the tipset `CIDs` for a span of epochs starting at `height` and working backwards. Useful for getting blocks to live test an RPC endpoint. diff --git a/docs/docs/users/reference/env_variables.md b/docs/docs/users/reference/env_variables.md index 3475862d30f8..8c386803021f 100644 --- a/docs/docs/users/reference/env_variables.md +++ b/docs/docs/users/reference/env_variables.md @@ -32,16 +32,16 @@ process. | `RUST_LOG` | string | empty | `debug,forest_libp2p::service=info` | Allows for log level customization. | | `FOREST_IGNORE_DRAND` | 1 or true | empty | 1 | Ignore Drand validation. | | `FOREST_LIBP2P_METRICS_ENABLED` | 1 or true | empty | 1 | Include `libp2p` metrics in Forest's Prometheus output. | -| `FOREST_F3_SIDECAR_RPC_ENDPOINT` | string | 127.0.0.1:23456 | `127.0.0.1:23456` | An RPC endpoint of F3 sidecar. | -| `FOREST_F3_SIDECAR_FFI_ENABLED` | 1 or true | hard-coded per chain | 1 | Whether or not to start the F3 sidecar via FFI | -| `FOREST_F3_CONSENSUS_ENABLED` | 1 or true | hard-coded per chain | 1 | Whether or not to apply the F3 consensus to the node | -| `FOREST_F3_FINALITY` | integer | inherited from chain configuration | 900 | Set the chain finality epochs in F3 manifest | -| `FOREST_F3_PERMANENT_PARTICIPATING_MINER_ADDRESSES` | comma delimited strings | empty | `t0100,t0101` | Set the miner addresses that participate in F3 permanently | -| `FOREST_F3_INITIAL_POWER_TABLE` | string | empty | `bafyreicmaj5hhoy5mgqvamfhgexxyergw7hdeshizghodwkjg6qmpoco7i` | Set the F3 initial power table CID | -| `FOREST_F3_ROOT` | string | [FOREST_DATA_ROOT]/f3 | `/var/tmp/f3` | Set the data directory for F3 | -| `FOREST_F3_BOOTSTRAP_EPOCH` | integer | -1 | 100 | Set the bootstrap epoch for F3 | -| `FOREST_DRAND_MAINNET_CONFIG` | string | empty | refer to Drand config format section | Override `DRAND_MAINNET` config | -| `FOREST_DRAND_QUICKNET_CONFIG` | string | empty | refer to Drand config format section | Override `DRAND_QUICKNET` config | +| `FOREST_F3_SIDECAR_RPC_ENDPOINT` | string | 127.0.0.1:23456 | `127.0.0.1:23456` | An RPC endpoint of `F3` sidecar. | +| `FOREST_F3_SIDECAR_FFI_ENABLED` | 1 or true | hard-coded per chain | 1 | Whether or not to start the `F3` sidecar via FFI | +| `FOREST_F3_CONSENSUS_ENABLED` | 1 or true | hard-coded per chain | 1 | Whether or not to apply the `F3` consensus to the node | +| `FOREST_F3_FINALITY` | integer | inherited from chain configuration | 900 | Set the chain finality epochs in `F3` manifest | +| `FOREST_F3_PERMANENT_PARTICIPATING_MINER_ADDRESSES` | comma delimited strings | empty | `t0100,t0101` | Set the miner addresses that participate in `F3` permanently | +| `FOREST_F3_INITIAL_POWER_TABLE` | string | empty | `bafyreicmaj5hhoy5mgqvamfhgexxyergw7hdeshizghodwkjg6qmpoco7i` | Set the `F3` initial power table CID | +| `FOREST_F3_ROOT` | string | [FOREST_DATA_ROOT]/f3 | `/var/tmp/f3` | Set the data directory for `F3` | +| `FOREST_F3_BOOTSTRAP_EPOCH` | integer | -1 | 100 | Set the bootstrap epoch for `F3` | +| `FOREST_DRAND_MAINNET_CONFIG` | string | empty | refer to Drand `config` format section | Override `DRAND_MAINNET` config | +| `FOREST_DRAND_QUICKNET_CONFIG` | string | empty | refer to Drand `config` format section | Override `DRAND_QUICKNET` config | | `FOREST_TRACE_FILTER_MAX_RESULT` | positive integer | 500 | 1000 | Sets the maximum results returned per request by `trace_filter` | | `FOREST_CHAIN_INDEXER_ENABLED` | 1 or true | false | 1 | Whether or not to index the chain to support the Ethereum RPC API | | `FOREST_MESSAGES_IN_TIPSET_CACHE_SIZE` | positive integer | 100 | 42 | The size of an internal cache of tipsets to messages | @@ -54,10 +54,10 @@ process. ### `FOREST_F3_SIDECAR_FFI_BUILD_OPT_OUT` -This is an environment variable that allows users to opt out of building the f3-sidecar. It's only useful when building +This is an environment variable that allows users to opt out of building the `f3`-sidecar. It's only useful when building the binary. -By default, the Go f3-sidecar is built and linked into Forest binary unless environment +By default, the Go `f3`-sidecar is built and linked into Forest binary unless environment variable `FOREST_F3_SIDECAR_FFI_BUILD_OPT_OUT=1` is set. ### `FOREST_DB_DEV_MODE` @@ -86,7 +86,7 @@ Intended for controlled cross-system token sharing where expiration validation m > Disabling expiration checks for all JWTs will also allow expired tokens. > This significantly weakens security and should only be used in tightly controlled environments. Not recommended for general use. -### Drand config format +### Drand `config` format ```json { diff --git a/docs/docs/users/reference/json_rpc.mdx b/docs/docs/users/reference/json_rpc.mdx index c936364900a5..7f6fe1672b76 100644 --- a/docs/docs/users/reference/json_rpc.mdx +++ b/docs/docs/users/reference/json_rpc.mdx @@ -1,6 +1,6 @@ --- title: JSON-RPC Schema -sidebar_position: 4 +`sidebar_position`: 4 --- export const style = { diff --git a/docs/docs/users/reference/json_rpc_overview.md b/docs/docs/users/reference/json_rpc_overview.md index 0f0697b04664..744fb69ddf2f 100644 --- a/docs/docs/users/reference/json_rpc_overview.md +++ b/docs/docs/users/reference/json_rpc_overview.md @@ -14,7 +14,7 @@ This API is still a WIP, with more methods being added continuously. :::note Need a specific method? Let us know on -[Github](https://github.com/ChainSafe/forest/issues) or Filecoin Slack +[GitHub](https://github.com/ChainSafe/forest/issues) or Filecoin Slack (`#fil-forest-help`) 🙏 ::: @@ -24,12 +24,12 @@ Need a specific method? Let us know on The RPC interface is the primary mechanism for interacting with Forest. Until June 2025, the Lotus -[V0](https://github.com/filecoin-project/lotus/blob/master/documentation/en/api-methods-v0-deprecated.md) +[`V0`](https://github.com/filecoin-project/lotus/blob/master/documentation/en/api-methods-v0-deprecated.md) and -[V1](https://github.com/filecoin-project/lotus/blob/master/documentation/en/api-methods-v1-stable.md) +[`V1`](https://github.com/filecoin-project/lotus/blob/master/documentation/en/api-methods-v1-stable.md) APIs served as the reference for Forest's implementation. -Since June 2025, the [Common Node API](https://github.com/filecoin-project/FIPs/blob/master/FRCs/frc-0104.md) has been the standard for RPC methods across Filecoin clients, replacing the Lotus-specific API as the primary reference for V1 methods. +Since June 2025, the [Common Node API](https://github.com/filecoin-project/FIPs/blob/master/FRCs/frc-0104.md) has been the standard for RPC methods across Filecoin clients, replacing the Lotus-specific API as the primary reference for `V1` methods. :::info diff --git a/docs/package.json b/docs/package.json index 482719669307..deab99f97b97 100644 --- a/docs/package.json +++ b/docs/package.json @@ -16,7 +16,6 @@ "write-translations": "docusaurus write-translations", "write-heading-ids": "docusaurus write-heading-ids", "typecheck": "tsc", - "spellcheck": "spellchecker -f \"./docs/**/*.md\" -l en-US -q -d ./dictionary.txt", "format-check": "prettier --check .", "format": "prettier --write ." }, @@ -35,7 +34,6 @@ "devDependencies": { "@docusaurus/module-type-aliases": "^3.9.2", "@docusaurus/tsconfig": "^3.9.2", - "spellchecker-cli": "^7.0.0", "typescript": "^5.9.3" }, "browserslist": { diff --git a/documentation/src/developer_documentation/chain_index_spike.md b/documentation/src/developer_documentation/chain_index_spike.md index b6fafa478782..30f0f165c523 100644 --- a/documentation/src/developer_documentation/chain_index_spike.md +++ b/documentation/src/developer_documentation/chain_index_spike.md @@ -84,7 +84,7 @@ on how much historical state is indexed. **Question:** What are our options for Rust in this context? 1. **Rusqlite**? -2. **Sqlx** crate? In case of abtraction of the backend DB: +2. **SQLx** crate? In case of abstraction of the backend DB: - **PostgreSQL** - **SQLite** - Other options diff --git a/documentation/src/developer_documentation/rpc_api_compatibility.md b/documentation/src/developer_documentation/rpc_api_compatibility.md index 87f86e73f7e7..1fde8bc9ba1d 100644 --- a/documentation/src/developer_documentation/rpc_api_compatibility.md +++ b/documentation/src/developer_documentation/rpc_api_compatibility.md @@ -55,7 +55,7 @@ be implemented in the foreseeable future. The `lotus-gateway` executable is a reverse-proxy that sanitizes RPC calls before they're forwarded to a Filecoin node. The `forest-tool api compare` command will fail if run against a gateway rather than directly against a node. -This means API compatiblity testing has to be done with a local node rather than +This means API compatibility testing has to be done with a local node rather than `api.node.glif.io`. ## Use `mitmproxy` diff --git a/f3-sidecar/README.md b/f3-sidecar/README.md index 19739287ff16..0ff575403fa2 100644 --- a/f3-sidecar/README.md +++ b/f3-sidecar/README.md @@ -3,7 +3,7 @@ In addition to the Rust toolchain, Go toolchain is required to build the `f3-sidecar`. The Go version is specified in `go.mod`. -Follow https://go.dev/doc/install or use one of the version managers of Go. +Follow this [guide](https://go.dev/doc/install) to install or use one of the version managers of Go. (e.g. https://github.com/voidint/g?tab=readme-ov-file#installation) ### EC tests @@ -17,15 +17,15 @@ Follow https://go.dev/doc/install or use one of the version managers of Go. `--save-token jwt_path`) - import a shared miner key for testing `forest-wallet --remote-wallet import` (the shared miner worker key can be found in `scripts/tests/api_compare/.env`) -- run f3 sidecar `go run . -jwt $(cat jwt_path)` +- run `f3` sidecar `go run . -jwt $(cat jwt_path)` - (optional) to inspect RPC calls, run `mitmproxy --mode reverse:http://localhost:2345 --listen-port 8080` then `go run . -rpc http://127.0.0.1:8080/rpc/v1` -### How F3 sidecar interacts with Forest +### How `F3` sidecar interacts with Forest -An F3 sidecar node is a standalone node that is a part of a p2p network and -participates in the f3 protocol. +An `F3` sidecar node is a standalone node that is a part of a p2p network and +participates in the `f3` protocol. Besides what have been handled internally(e.g. p2p communications) in the `go-f3` lib @@ -40,7 +40,7 @@ Besides what have been handled internally(e.g. p2p communications) in the participating miners - it requires a backend that provides the actor IDs of the participating miners - it requires a p2p node as bootstrapper to discover more peers via Kademlia -- additionally, to power the `Filecoin.F3*` RPC methods in forest, a sidecar +- additionally, to power the `Filecoin.F3*` RPC methods in forest, a `f3` sidecar node runs an RPC server that implements the same RPC methods to which the associated forest node can delegate the RPC requests @@ -57,10 +57,10 @@ flowchart TD A --> |dynamic manifest backend| D[manifest p2p server] ``` -### To build and run F3 sidecar within Forest via FFI +### To build and run `F3` sidecar within Forest via FFI -By default, the Go F3-sidecar is built and linked into Forest binary unless +By default, the Go `F3`-sidecar is built and linked into Forest binary unless environment variable `FOREST_F3_SIDECAR_FFI_BUILD_OPT_OUT=1` is set. -F3 sidecar is not started by default, set `FOREST_F3_SIDECAR_FFI_ENABLED=1` to +`F3` sidecar is not started by default, set `FOREST_F3_SIDECAR_FFI_ENABLED=1` to opt in. diff --git a/scripts/devnet-curio/README.md b/scripts/devnet-curio/README.md index 963afc19a111..d14e2623b2e3 100644 --- a/scripts/devnet-curio/README.md +++ b/scripts/devnet-curio/README.md @@ -29,7 +29,7 @@ docker compose up --build This will build the local Forest (using the Dockerfile in the project's root) image, tagged Lotus and setup the devnet. Initial setup may be slow due to -fetching params and setting everyting up. Consecutive starts will be quick. +fetching params and setting everything up. Consecutive starts will be quick. Stop the devnet with: diff --git a/scripts/devnet/README.md b/scripts/devnet/README.md index 0dc7418a4409..e6985a049500 100644 --- a/scripts/devnet/README.md +++ b/scripts/devnet/README.md @@ -24,7 +24,7 @@ docker compose up --build This will build the local Forest (using the Dockerfile in the project's root) image, tagged Lotus and setup the devnet. Initial setup may be slow due to -fetching params and setting everyting up. Consecutive starts will be quick. +fetching params and setting everything up. Consecutive starts will be quick. Stop the devnet with: diff --git a/scripts/tests/bootstrapper/README.md b/scripts/tests/bootstrapper/README.md index afd5c500f1d8..3fdd8e731d4b 100644 --- a/scripts/tests/bootstrapper/README.md +++ b/scripts/tests/bootstrapper/README.md @@ -26,7 +26,7 @@ flowchart TD ./test_bootstrapper.sh lotus ``` -## Teardown +## `Teardown` ```bash docker compose -f ./docker-compose-forest.yml down -v --rmi all diff --git a/src/blocks/block.rs b/src/blocks/block.rs index 5f5de480bc14..c0ca52889f9d 100644 --- a/src/blocks/block.rs +++ b/src/blocks/block.rs @@ -8,11 +8,11 @@ use fvm_ipld_encoding::tuple::*; use super::CachingBlockHeader; -/// Limit of BLS and SECP messages combined in a block. +/// Limit of BLS and `SECP` messages combined in a block. pub const BLOCK_MESSAGE_LIMIT: usize = 10000; /// A complete Filecoin block. This contains the block header as well as all BLS -/// and SECP messages. +/// and `SECP` messages. #[derive(Clone, Debug, PartialEq, Eq)] pub struct Block { pub header: CachingBlockHeader, @@ -42,7 +42,7 @@ impl Block { } } -/// Tracks the Merkle roots of both SECP and BLS messages separately. +/// Tracks the Merkle roots of both `SECP` and BLS messages separately. #[derive(Serialize_tuple, Deserialize_tuple)] pub struct TxMeta { pub bls_message_root: Cid, diff --git a/src/blocks/chain4u.rs b/src/blocks/chain4u.rs index 5e7c79b24978..0374a7973b72 100644 --- a/src/blocks/chain4u.rs +++ b/src/blocks/chain4u.rs @@ -478,7 +478,7 @@ impl Override { /// [`Chain4U`] will change [`RawBlockHeader`] fields to create a valid graph of tipsets. /// -/// This struct describes which fields are _allowed_ to change. +/// This `struct` describes which fields are _allowed_ to change. #[derive(Default, Debug, Clone)] pub struct HeaderBuilder { pub miner_address: Override
, diff --git a/src/blocks/tipset.rs b/src/blocks/tipset.rs index 490d5de22caa..5cc2d54a6892 100644 --- a/src/blocks/tipset.rs +++ b/src/blocks/tipset.rs @@ -27,8 +27,8 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use tracing::info; -/// A set of `CIDs` forming a unique key for a Tipset. -/// Equal keys will have equivalent iteration order, but note that the `CIDs` +/// A set of ``CIDs`` forming a unique key for a Tipset. +/// Equal keys will have equivalent iteration order, but note that the ``CIDs`` /// are *not* maintained in the same order as the canonical iteration order of /// blocks in a tipset (which is by ticket) #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord, GetSize)] @@ -337,7 +337,7 @@ impl Tipset { self.key .get_or_init(|| TipsetKey::from(self.headers.iter_ne().map(|h| *h.cid()).collect_vec())) } - /// Returns a non-empty collection of `CIDs` for the current tipset + /// Returns a non-empty collection of ``CIDs`` for the current tipset pub fn cids(&self) -> NonEmpty { self.key().to_cids() } diff --git a/src/chain/mod.rs b/src/chain/mod.rs index cbbf4ef718af..3eba9108f078 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -49,7 +49,7 @@ pub async fn export_from_head( Ok((head_ts, digest)) } -/// Exports a Filecoin snapshot in v1 format +/// Exports a Filecoin snapshot in `v1` format /// See pub async fn export( db: &Arc, @@ -62,7 +62,7 @@ pub async fn export( export_to_forest_car::(roots, None, db, tipset, lookup_depth, writer, options).await } -/// Exports a Filecoin snapshot in v2 format +/// Exports a Filecoin snapshot in `v2` format /// See pub async fn export_v2( db: &Arc, diff --git a/src/chain/snapshot_format.rs b/src/chain/snapshot_format.rs index b59c6aa54eff..b65669e755d8 100644 --- a/src/chain/snapshot_format.rs +++ b/src/chain/snapshot_format.rs @@ -50,7 +50,7 @@ pub struct FilecoinSnapshotMetadata { pub version: FilecoinSnapshotVersion, /// Chain head tipset key pub head_tipset_key: NonEmpty, - /// F3 snapshot `CID` + /// `F3` snapshot `CID` pub f3_data: Option, } diff --git a/src/chain/store/chain_store.rs b/src/chain/store/chain_store.rs index 9433737b2b90..11b6d7978b34 100644 --- a/src/chain/store/chain_store.rs +++ b/src/chain/store/chain_store.rs @@ -52,7 +52,7 @@ pub enum HeadChange { } /// Stores chain data such as heaviest tipset and cached tipset info at each -/// epoch. This structure is thread-safe, and all caches are wrapped in a mutex +/// epoch. This structure is thread-safe, and all caches are wrapped in a `mutex` /// to allow a consistent `ChainStore` to be shared across tasks. pub struct ChainStore { /// Publisher for head change events @@ -464,7 +464,7 @@ fn filter_lowest_index(values: Vec<(EthHash, Cid, u64, usize)>) -> Vec<(EthHash, .collect() } -/// Returns a Tuple of BLS messages of type `UnsignedMessage` and SECP messages +/// Returns a Tuple of BLS messages of type `UnsignedMessage` and `SECP` messages /// of type `SignedMessage` pub fn block_messages( db: &DB, @@ -496,7 +496,7 @@ where Ok((bls_msgs, secp_msgs)) } -/// Returns a tuple of CIDs for both unsigned and signed messages +/// Returns a tuple of `CIDs` for both unsigned and signed messages pub fn read_msg_cids( db: &DB, block_header: &CachingBlockHeader, @@ -533,7 +533,7 @@ where Ok(()) } -/// Returns a vector of CIDs from provided root CID +/// Returns a vector of `CIDs` from provided root CID fn read_amt_cids(db: &DB, root: &Cid) -> Result, Error> where DB: Blockstore, diff --git a/src/chain_sync/consensus.rs b/src/chain_sync/consensus.rs index d374b4c93595..090f948b5d83 100644 --- a/src/chain_sync/consensus.rs +++ b/src/chain_sync/consensus.rs @@ -4,7 +4,7 @@ use nunny::Vec as NonEmpty; use tokio::task::JoinSet; -/// Helper function to collect errors from async validations. +/// Helper function to collect errors from `async` validations. pub async fn collect_errs( mut handles: JoinSet>, ) -> Result<(), NonEmpty> { diff --git a/src/chain_sync/validation.rs b/src/chain_sync/validation.rs index ecafc30f369a..9320e44a48cf 100644 --- a/src/chain_sync/validation.rs +++ b/src/chain_sync/validation.rs @@ -120,7 +120,7 @@ impl TipsetValidator<'_> { bls_msgs: &[Message], secp_msgs: &[SignedMessage], ) -> Result { - // Generate message CIDs + // Generate message `CIDs` let bls_cids = bls_msgs .iter() .map(Cid::from_cbor_blake2b256) diff --git a/src/cid_collections/hash_map.rs b/src/cid_collections/hash_map.rs index 8de5b3f6a778..9afb089b8691 100644 --- a/src/cid_collections/hash_map.rs +++ b/src/cid_collections/hash_map.rs @@ -144,7 +144,7 @@ pub enum Entry<'a, V: 'a> { } /// A view into an occupied entry in a `HashMap`. -/// It is part of the [`Entry`] enum. +/// It is part of the [`Entry`] `enum`. /// /// See also [`std::collections::hash_map::OccupiedEntry`]. #[allow(dead_code)] @@ -175,7 +175,7 @@ enum OccupiedEntryInner<'a, V> { } /// A view into a vacant entry in a `HashMap`. -/// It is part of the [`Entry`] enum. +/// It is part of the [`Entry`] `enum`. /// /// See also [`std::collections::hash_map::VacantEntry`]. #[allow(dead_code)] diff --git a/src/cid_collections/mod.rs b/src/cid_collections/mod.rs index bd8e40f99a97..8ba93bb73d8f 100644 --- a/src/cid_collections/mod.rs +++ b/src/cid_collections/mod.rs @@ -10,17 +10,17 @@ pub use small_cid_vec::SmallCidNonEmptyVec; /// The core primitive for saving space in this module. /// -/// CIDs contain a significant amount of static data (such as version, codec, hash identifier, hash +/// `CIDs` contain a significant amount of static data (such as version, codec, hash identifier, hash /// length). /// -/// Nearly all Filecoin CIDs are `V1`,`DagCbor` encoded, and hashed with `Blake2b256` (which has a hash +/// Nearly all Filecoin `CIDs` are `V1`,`DagCbor` encoded, and hashed with `Blake2b256` (which has a hash /// length of 256 bits (32 bytes)). /// Naively representing such a CID requires 96 bytes but the non-static portion is only /// 32 bytes, represented as [`CidV1DagCborBlake2b256`]. /// /// In collections, choose to store only 32 bytes where possible. /// -/// Note that construction of CIDs should always go through this type, to ensure +/// Note that construction of `CIDs` should always go through this type, to ensure /// - canonicalization /// - the contract of [`Uncompactable`] /// diff --git a/src/cid_collections/small_cid_vec.rs b/src/cid_collections/small_cid_vec.rs index a98ebddedae0..62b3591463f6 100644 --- a/src/cid_collections/small_cid_vec.rs +++ b/src/cid_collections/small_cid_vec.rs @@ -10,7 +10,7 @@ use serde::{Deserialize, Serialize}; #[cfg(doc)] use crate::blocks::TipsetKey; -/// There are typically MANY small, immutable collections of CIDs in, e.g [`TipsetKey`]s. +/// There are typically MANY small, immutable collections of `CIDs` in, e.g [`TipsetKey`]s. /// /// Save space on those by: /// - Using [`SmallCid`]s @@ -130,7 +130,7 @@ impl<'de> Deserialize<'de> for SmallCid { ///////////////////// #[cfg(test)] -// Note this goes through MaybeCompactedCid, artificially bumping the probability of compact CIDs +// Note this goes through MaybeCompactedCid, artificially bumping the probability of compact `CIDs` impl quickcheck::Arbitrary for SmallCid { fn arbitrary(g: &mut quickcheck::Gen) -> Self { Self::from(Cid::from(MaybeCompactedCid::arbitrary(g))) diff --git a/src/cli/humantoken.rs b/src/cli/humantoken.rs index cdbe27dbe619..919eddab04b7 100644 --- a/src/cli/humantoken.rs +++ b/src/cli/humantoken.rs @@ -135,7 +135,7 @@ mod parse { /// ``` /// /// # Known bugs - /// - `1efil` will not parse as an exa (`10^18`), because we'll try and + /// - `1efil` will not parse as an `exa` (`10^18`), because we'll try and /// parse it as a exponent in the float. Instead use `1 efil`. pub fn parse(input: &str) -> anyhow::Result { let (mut big_decimal, scale) = parse_big_decimal_and_scale(input)?; diff --git a/src/cli/subcommands/chain_cmd.rs b/src/cli/subcommands/chain_cmd.rs index c15bab97725c..94a0ac817aa5 100644 --- a/src/cli/subcommands/chain_cmd.rs +++ b/src/cli/subcommands/chain_cmd.rs @@ -63,7 +63,7 @@ pub enum ChainCommands { /// Manually set the head to the given tipset. This invalidates blocks /// between the desired head and the new head SetHead { - /// Construct the new head tipset from these CIDs + /// Construct the new head tipset from these `CIDs` #[arg(num_args = 1.., required = true)] cids: Vec, /// Use the tipset from this epoch as the new head. diff --git a/src/cli/subcommands/f3_cmd.rs b/src/cli/subcommands/f3_cmd.rs index a788202adc95..5f80089c7421 100644 --- a/src/cli/subcommands/f3_cmd.rs +++ b/src/cli/subcommands/f3_cmd.rs @@ -78,32 +78,32 @@ pub enum F3OutputFormat { Json, } -/// Manages Filecoin Fast Finality (F3) interactions +/// Manages Filecoin Fast Finality (`F3`) interactions #[derive(Debug, Subcommand)] pub enum F3Commands { - /// Gets the current manifest used by F3 + /// Gets the current manifest used by `F3` Manifest { /// The output format. #[arg(long, value_enum, default_value_t = F3OutputFormat::Text)] output: F3OutputFormat, }, - /// Checks the F3 status. + /// Checks the `F3` status. Status, - /// Manages interactions with F3 finality certificates. + /// Manages interactions with `F3` finality certificates. #[command(subcommand, visible_alias = "c")] Certs(F3CertsCommands), - /// Gets F3 power table at a specific instance ID or latest instance if none is specified. + /// Gets `F3` power table at a specific instance ID or latest instance if none is specified. #[command(subcommand, name = "powertable", visible_alias = "pt")] PowerTable(F3PowerTableCommands), - /// Checks if F3 is in sync. + /// Checks if `F3` is in sync. Ready { - /// Wait until F3 is in sync. + /// Wait until `F3` is in sync. #[arg(long)] wait: bool, - /// The threshold of the epoch gap between chain head and F3 head within which F3 is considered in sync. + /// The threshold of the epoch gap between chain head and `F3` head within which `F3` is considered in sync. #[arg(long, default_value_t = 20)] threshold: usize, - /// Exit after F3 making no progress for this duration. + /// Exit after `F3` making no progress for this duration. #[arg(long, default_value = "10m", requires = "wait")] no_progress_timeout: humantime::Duration, }, @@ -146,7 +146,7 @@ impl F3Commands { let is_running = client.call(F3IsRunning::request(())?).await?; if !is_running { - anyhow::bail!("F3 is not running"); + anyhow::bail!("`F3` is not running"); } async fn get_heads( @@ -181,7 +181,7 @@ impl F3Commands { >= chain_head.epoch() { let text = format!( - "[+] F3 is in sync. Chain head epoch: {}, F3 head epoch: {}", + "[+] `F3` is in sync. Chain head epoch: {}, `F3` head epoch: {}", chain_head.epoch(), cert_head.chain_head().epoch ); @@ -190,7 +190,7 @@ impl F3Commands { break; } else { let text = format!( - "[-] F3 is not in sync. Chain head epoch: {}, F3 head epoch: {}", + "[-] `F3` is not in sync. Chain head epoch: {}, `F3` head epoch: {}", chain_head.epoch(), cert_head.chain_head().epoch ); @@ -229,17 +229,17 @@ impl F3Commands { } } -/// Manages interactions with F3 finality certificates. +/// Manages interactions with `F3` finality certificates. #[derive(Debug, Subcommand)] pub enum F3CertsCommands { - /// Gets an F3 finality certificate to a given instance ID, or the latest certificate if no instance is specified. + /// Gets an `F3` finality certificate to a given instance ID, or the latest certificate if no instance is specified. Get { instance: Option, /// The output format. #[arg(long, value_enum, default_value_t = F3OutputFormat::Text)] output: F3OutputFormat, }, - /// Lists a range of F3 finality certificates. + /// Lists a range of `F3` finality certificates. List { /// Inclusive range of `from` and `to` instances in following notation: /// `..`. Either `` or `` may be omitted, but not both. @@ -343,7 +343,7 @@ impl F3CertsCommands { #[derive(Debug, Subcommand)] pub enum F3PowerTableCommands { - /// Gets F3 power table at a specific instance ID or latest instance if none is specified. + /// Gets `F3` power table at a specific instance ID or latest instance if none is specified. #[command(visible_alias = "g")] Get { /// instance ID. (default: latest) diff --git a/src/cli/subcommands/mod.rs b/src/cli/subcommands/mod.rs index bbb39793a7e9..24991471532b 100644 --- a/src/cli/subcommands/mod.rs +++ b/src/cli/subcommands/mod.rs @@ -94,7 +94,7 @@ pub enum Subcommand { #[command(subcommand)] Healthcheck(HealthcheckCommand), - /// Manages Filecoin Fast Finality (F3) interactions + /// Manages Filecoin Fast Finality (`F3`) interactions #[command(subcommand)] F3(F3Commands), diff --git a/src/cli/subcommands/state_cmd.rs b/src/cli/subcommands/state_cmd.rs index 273cffc4e489..a6a4adcde5b8 100644 --- a/src/cli/subcommands/state_cmd.rs +++ b/src/cli/subcommands/state_cmd.rs @@ -43,7 +43,7 @@ pub enum StateCommands { /// Actor address to read the state of actor_address: StrictAddress, }, - /// Returns the built-in actor bundle CIDs for the current network + /// Returns the built-in actor bundle `CIDs` for the current network ActorCids { /// Format output #[arg(long, default_value = "text")] diff --git a/src/cli_shared/cli/client.rs b/src/cli_shared/cli/client.rs index a00f50287396..ea93680d6536 100644 --- a/src/cli_shared/cli/client.rs +++ b/src/cli_shared/cli/client.rs @@ -48,7 +48,7 @@ pub struct Client { pub snapshot_path: Option, pub import_mode: ImportMode, /// Skips loading import CAR file and assumes it's already been loaded. - /// Will use the CIDs in the header of the file to index the chain. + /// Will use the `CIDs` in the header of the file to index the chain. pub skip_load: bool, /// When importing CAR files, chunk key-value pairs before committing them /// to the database. diff --git a/src/daemon/bundle.rs b/src/daemon/bundle.rs index d41dd963fd38..6278f3a27ec4 100644 --- a/src/daemon/bundle.rs +++ b/src/daemon/bundle.rs @@ -82,7 +82,7 @@ pub static ACTOR_BUNDLE_CACHE_DIR: LazyLock = LazyLock::new(|| { .join("actor-bundles") }); -/// Loads the missing actor bundle, returns the CIDs of the loaded bundles. +/// Loads the missing actor bundle, returns the `CIDs` of the loaded bundles. pub async fn load_actor_bundles_from_server( db: &impl PersistentStore, network: &NetworkChain, diff --git a/src/db/car/any.rs b/src/db/car/any.rs index 1932eb19404e..e592c7f241a7 100644 --- a/src/db/car/any.rs +++ b/src/db/car/any.rs @@ -6,7 +6,7 @@ //! header and the first key-value block, and picks the appropriate block store //! (either [`super::ForestCar`] or [`super::PlainCar`]). //! -//! CARv2 is not supported yet. +//! `CARv2` is not supported yet. use super::{CacheKey, RandomAccessFileReader, ZstdFrameCache}; use crate::blocks::{Tipset, TipsetKey}; diff --git a/src/db/car/forest.rs b/src/db/car/forest.rs index 17ef3fa0c908..9e37f61ad2a4 100644 --- a/src/db/car/forest.rs +++ b/src/db/car/forest.rs @@ -43,7 +43,7 @@ //! //! `zstd` frame format: //! -//! CARv1 specification: +//! `CARv1` specification: //! use super::{CacheKey, ZstdFrameCache}; @@ -322,7 +322,7 @@ impl Encoder { offset += header_len; - // Write seekable zstd and collect a mapping of CIDs to frame_offset+data_offset. + // Write seekable zstd and collect a mapping of `CIDs` to frame_offset+data_offset. let mut builder = index::Builder::new(); while let Some((cids, zstd_frame)) = stream.try_next().await? { builder.extend(cids.into_iter().map(|cid| (cid, offset as u64))); @@ -563,7 +563,7 @@ mod tests { fn encode_hash_collisions() { use crate::utils::multihash::prelude::*; - // Distinct CIDs may map to the same hash value + // Distinct `CIDs` may map to the same hash value let cid_a = Cid::new_v1(0, MultihashCode::Identity.digest(&[10])); let cid_b = Cid::new_v1(0, MultihashCode::Identity.digest(&[0])); // A and B are _not_ the same... @@ -572,7 +572,7 @@ mod tests { assert_eq!(index::hash::summary(&cid_a), index::hash::summary(&cid_b)); // For testing purposes, we ignore that the data doesn't map to the - // CIDs. + // `CIDs`. let blocks = nonempty![ CarBlock { cid: cid_a, @@ -593,7 +593,7 @@ mod tests { )) .unwrap(); - // Even with colliding hashes, the CIDs can still be queried: + // Even with colliding hashes, the `CIDs` can still be queried: assert_eq!(forest_car.get(&cid_a).unwrap().unwrap(), blocks[0].data); assert_eq!(forest_car.get(&cid_b).unwrap().unwrap(), blocks[1].data); } diff --git a/src/db/car/forest/index/mod.rs b/src/db/car/forest/index/mod.rs index 6f444a361f7c..22e19d2c302a 100644 --- a/src/db/car/forest/index/mod.rs +++ b/src/db/car/forest/index/mod.rs @@ -124,7 +124,7 @@ where /// Returns `Ok([])` if no offsets are found, or [`Err(_)`] if the underlying /// IO fails. /// - /// Does not allocate unless 2 or more CIDs have collided, see [module documentation](mod@self). + /// Does not allocate unless 2 or more `CIDs` have collided, see [module documentation](mod@self). /// /// You MUST check the actual CID at the offset to see if it matches. pub fn get(&self, key: Cid) -> io::Result> { diff --git a/src/db/car/mod.rs b/src/db/car/mod.rs index 6b74bab81edd..2401436242b2 100644 --- a/src/db/car/mod.rs +++ b/src/db/car/mod.rs @@ -32,7 +32,7 @@ pub type CacheKey = u64; type FrameOffset = u64; -/// According to FRC-0108, v2 snapshots have exactly one root pointing to metadata +/// According to FRC-0108, `v2` snapshots have exactly one root pointing to metadata pub const V2_SNAPSHOT_ROOT_COUNT: usize = 1; pub static ZSTD_FRAME_CACHE_DEFAULT_MAX_SIZE: LazyLock = LazyLock::new(|| { diff --git a/src/db/car/plain.rs b/src/db/car/plain.rs index bc5bec4225db..2af2618311df 100644 --- a/src/db/car/plain.rs +++ b/src/db/car/plain.rs @@ -22,7 +22,7 @@ //! offset =body length //! ``` //! -//! # CARv1 layout and seeking +//! # `CARv1` layout and seeking //! //! The first varint frame is a _header frame_, where the frame body is a [`CarHeader`] encoded //! using [`ipld_dagcbor`](serde_ipld_dagcbor). @@ -57,7 +57,7 @@ //! - Use safe arithmetic for all operations - a malicious frame shouldn't cause a crash. //! - Theoretically, file-backed blockstores should be clonable (or even [`Sync`]) with very low //! overhead, so that multiple threads could perform operations concurrently. -//! - CARv2 support +//! - `CARv2` support //! - A wrapper that abstracts over car formats for reading. use crate::chain::FilecoinSnapshotMetadata; @@ -92,7 +92,7 @@ use tracing::{debug, trace}; /// It can often be time, memory, or disk prohibitive to read large snapshots into a database like /// [`ParityDb`](crate::db::parity_db::ParityDb). /// -/// This is an implementer of [`Blockstore`] that simply wraps an uncompressed [CARv1 +/// This is an implementer of [`Blockstore`] that simply wraps an uncompressed [`CARv1` /// file](https://ipld.io/specs/transport/car/carv1). /// /// On creation, [`PlainCar`] builds an in-memory index of the [`Cid`]s in the file, @@ -374,7 +374,7 @@ fn read_v1_header(mut reader: impl Read) -> io::Result { /// Importantly, we seek `block data length`, rather than read any in. /// This allows us to keep indexing fast. /// -/// [`Ok(None)`] on EOF +/// [`Ok(None)`] on `EOF` #[tracing::instrument(level = "trace", skip_all, ret)] fn read_block_data_location_and_skip( mut reader: impl Read + Seek, @@ -410,7 +410,7 @@ fn read_block_data_location_and_skip( } /// Reads `body length`, leaving the reader at the start of a varint frame, -/// or returns [`Ok(None)`] if we've reached EOF +/// or returns [`Ok(None)`] if we've reached `EOF` /// ```text /// start ►│ /// ├───────────┬─────────────┐ diff --git a/src/db/mod.rs b/src/db/mod.rs index 9712dfab063b..4265d246c59a 100644 --- a/src/db/mod.rs +++ b/src/db/mod.rs @@ -117,7 +117,7 @@ pub trait EthMappingsStore { #[allow(dead_code)] fn exists(&self, key: &EthHash) -> anyhow::Result; - /// Returns all message CIDs with their timestamp. + /// Returns all message `CIDs` with their timestamp. fn get_message_cids(&self) -> anyhow::Result>; /// Deletes `keys` if keys exist in store. diff --git a/src/documentation.rs b/src/documentation.rs index f8d6c4386e21..780a66faf34f 100644 --- a/src/documentation.rs +++ b/src/documentation.rs @@ -97,7 +97,7 @@ /// /// The [`ChainMuxer`](crate::chain_sync::ChainMuxer) receives two kinds of [messages](crate::libp2p::PubsubMessage) /// from peers: -/// - [`GossipBlock`](crate::blocks::GossipBlock)s are descriptions of a single block, with the `BlockHeader` and `Message` CIDs. +/// - [`GossipBlock`](crate::blocks::GossipBlock)s are descriptions of a single block, with the `BlockHeader` and `Message` `CIDs`. /// - [`SignedMessage`](crate::message::SignedMessage)s /// /// It assembles these messages into a chain to genesis. diff --git a/src/eth/eip_1559_transaction.rs b/src/eth/eip_1559_transaction.rs index 5aa6d2c948f9..f1b0c2cd6220 100644 --- a/src/eth/eip_1559_transaction.rs +++ b/src/eth/eip_1559_transaction.rs @@ -133,7 +133,7 @@ impl EthEip1559TxArgs { Ok(stream.out().to_vec()) } - /// Constructs a signed message using EIP-1559 transaction args + /// Constructs a signed message using EIP-1559 transaction `args` pub fn get_signed_message( &self, from: Address, @@ -144,7 +144,7 @@ impl EthEip1559TxArgs { Ok(SignedMessage { message, signature }) } - /// Constructs an unsigned message using EIP-1559 transaction args + /// Constructs an unsigned message using EIP-1559 transaction `args` pub fn get_unsigned_message( &self, from: Address, diff --git a/src/eth/eip_155_transaction.rs b/src/eth/eip_155_transaction.rs index 349267789742..7e38d0152fa4 100644 --- a/src/eth/eip_155_transaction.rs +++ b/src/eth/eip_155_transaction.rs @@ -219,7 +219,7 @@ impl EthLegacyEip155TxArgs { Ok(stream.out().to_vec()) } - /// Constructs a signed message using legacy EIP-155 transaction args + /// Constructs a signed message using legacy EIP-155 transaction `args` pub fn get_signed_message( &self, from: Address, @@ -230,7 +230,7 @@ impl EthLegacyEip155TxArgs { Ok(SignedMessage { message, signature }) } - /// Constructs an unsigned message using legacy EIP-155 transaction args + /// Constructs an unsigned message using legacy EIP-155 transaction `args` pub fn get_unsigned_message( &self, from: Address, @@ -238,7 +238,7 @@ impl EthLegacyEip155TxArgs { ) -> anyhow::Result { ensure!( validate_eip155_chain_id(eth_chain_id, &self.v).is_ok(), - "Failed to validate EIP155 chain Id" + "Failed to validate `EIP155` chain Id" ); let method_info = get_filecoin_method_info(&self.to, &self.input)?; Ok(Message { @@ -269,7 +269,7 @@ impl EthLegacyEip155TxArgsBuilder { } } -/// Validates the EIP155 chain ID by deriving it from the given `v` value +/// Validates the `EIP155` chain ID by deriving it from the given `v` value pub fn validate_eip155_chain_id(eth_chain_id: EthChainId, v: &BigInt) -> anyhow::Result<()> { let derived_chain_id = derive_eip_155_chain_id(v)?; ensure!( @@ -283,7 +283,7 @@ pub fn validate_eip155_chain_id(eth_chain_id: EthChainId, v: &BigInt) -> anyhow: Ok(()) } -/// Derives the EIP155 chain ID from the `V` value +/// Derives the `EIP155` chain ID from the `V` value pub fn derive_eip_155_chain_id(v: &BigInt) -> anyhow::Result { if v.bits() <= 64 { let v = v.to_u64().context("Failed to convert v to u64")?; diff --git a/src/eth/homestead_transaction.rs b/src/eth/homestead_transaction.rs index af41b81e3179..38580d043e4b 100644 --- a/src/eth/homestead_transaction.rs +++ b/src/eth/homestead_transaction.rs @@ -186,14 +186,14 @@ impl EthLegacyHomesteadTxArgs { Ok(stream.out().to_vec()) } - /// Constructs a signed message using legacy homestead transaction args + /// Constructs a signed message using legacy homestead transaction `args` pub fn get_signed_message(&self, from: Address) -> anyhow::Result { let message = self.get_unsigned_message(from)?; let signature = self.signature()?; Ok(SignedMessage { message, signature }) } - /// Constructs an unsigned message using legacy homestead transaction args + /// Constructs an unsigned message using legacy homestead transaction `args` pub fn get_unsigned_message(&self, from: Address) -> anyhow::Result { let method_info = get_filecoin_method_info(&self.to, &self.input)?; Ok(Message { diff --git a/src/f3/mod.rs b/src/f3/mod.rs index 003fbb21016d..f94220187cf2 100644 --- a/src/f3/mod.rs +++ b/src/f3/mod.rs @@ -143,7 +143,7 @@ pub fn import_f3_snapshot( Ok(()) } -/// Whether F3 sidecar via FFI is enabled. +/// Whether `F3` sidecar via FFI is enabled. pub fn is_sidecar_ffi_enabled(chain_config: &ChainConfig) -> bool { // Respect the environment variable when set, and fallback to chain config when not set. let enabled = diff --git a/src/health/endpoints.rs b/src/health/endpoints.rs index 1d1a5f3e57a9..3866b66ab684 100644 --- a/src/health/endpoints.rs +++ b/src/health/endpoints.rs @@ -44,7 +44,7 @@ pub(crate) async fn livez( /// - The node is in sync with the network /// - The current epoch of the node is not too far behind the network /// - The RPC server is running if not disabled -/// - The F3 side car is running if enabled +/// - The `F3` side car is running if enabled /// /// If any of these conditions are not met, the nod is **not** ready to serve requests. pub(crate) async fn readyz( diff --git a/src/interpreter/fvm2.rs b/src/interpreter/fvm2.rs index 97f4d627b812..a6304f8ea0f8 100644 --- a/src/interpreter/fvm2.rs +++ b/src/interpreter/fvm2.rs @@ -147,7 +147,7 @@ impl Consensus for ForestExternsV2 { // ever have been accepted in a chain is not checked/does not matter here. // for that reason when checking block parent relationships, rather than // instantiating a Tipset to do so (which runs a syntactic check), we do - // it directly on the CIDs. + // it directly on the `CIDs`. // (0) cheap preliminary checks diff --git a/src/interpreter/fvm3.rs b/src/interpreter/fvm3.rs index 595af639cc0e..7badd4a03da0 100644 --- a/src/interpreter/fvm3.rs +++ b/src/interpreter/fvm3.rs @@ -168,7 +168,7 @@ impl Consensus for ForestExterns { // ever have been accepted in a chain is not checked/does not matter here. // for that reason when checking block parent relationships, rather than // instantiating a Tipset to do so (which runs a syntactic check), we do - // it directly on the CIDs. + // it directly on the `CIDs`. // (0) cheap preliminary checks diff --git a/src/interpreter/fvm4.rs b/src/interpreter/fvm4.rs index 948095fa112d..68a841038ac9 100644 --- a/src/interpreter/fvm4.rs +++ b/src/interpreter/fvm4.rs @@ -167,7 +167,7 @@ impl Consensus for ForestExterns { // ever have been accepted in a chain is not checked/does not matter here. // for that reason when checking block parent relationships, rather than // instantiating a Tipset to do so (which runs a syntactic check), we do - // it directly on the CIDs. + // it directly on the `CIDs`. // (0) cheap preliminary checks diff --git a/src/ipld/util.rs b/src/ipld/util.rs index 5186d6689888..b7563d19bb26 100644 --- a/src/ipld/util.rs +++ b/src/ipld/util.rs @@ -61,9 +61,9 @@ pub fn cancel_export() { } fn should_save_block_to_snapshot(cid: Cid) -> bool { - // Don't include identity CIDs. + // Don't include identity `CIDs`. // We only include raw and dagcbor, for now. - // Raw for "code" CIDs. + // Raw for "code" `CIDs`. if cid.hash().code() == u64::from(MultihashCode::Identity) { false } else { diff --git a/src/libp2p/chain_exchange/message.rs b/src/libp2p/chain_exchange/message.rs index bdd34f1a17c9..19fb7c2b706d 100644 --- a/src/libp2p/chain_exchange/message.rs +++ b/src/libp2p/chain_exchange/message.rs @@ -140,7 +140,7 @@ impl ChainExchangeResponse { .collect() } } -/// Contains all BLS and SECP messages and their indexes per block +/// Contains all BLS and `SECP` messages and their indexes per block #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] pub struct CompactedMessages { /// Unsigned BLS messages. @@ -149,7 +149,7 @@ pub struct CompactedMessages { /// if `bls_msg_includes[2] = vec![5]` then `TipsetBundle.blocks[2]` contains `bls_msgs[5]` pub bls_msg_includes: Vec>, - /// Signed SECP messages. + /// Signed `SECP` messages. pub secp_msgs: Vec, /// Describes which block each message belongs to. pub secp_msg_includes: Vec>, diff --git a/src/libp2p/discovery.rs b/src/libp2p/discovery.rs index 0294886a601a..c3c6b89b0cf4 100644 --- a/src/libp2p/discovery.rs +++ b/src/libp2p/discovery.rs @@ -35,7 +35,7 @@ use crate::{networks::GenesisNetworkName, utils::version::FOREST_VERSION_STRING} pub struct DerivedDiscoveryBehaviour { /// Kademlia discovery. kademlia: Toggle>, - /// Kademlia discovery for bootstrapping F3 sidecar when the main Kademlia is disabled. + /// Kademlia discovery for bootstrapping `F3` sidecar when the main Kademlia is disabled. kademlia_f3_sidecar: kad::Behaviour, /// Discovers nodes on the local network. mdns: Toggle, diff --git a/src/libp2p_bitswap/message.rs b/src/libp2p_bitswap/message.rs index cbc2ac3c6489..ca8189c69da1 100644 --- a/src/libp2p_bitswap/message.rs +++ b/src/libp2p_bitswap/message.rs @@ -82,7 +82,7 @@ pub enum BitswapResponse { Block(Vec), } -/// `Bitswap` message Enum type that is either a [`BitswapRequest`] or a +/// `Bitswap` message `Enum` type that is either a [`BitswapRequest`] or a /// [`BitswapResponse`] #[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] pub enum BitswapMessage { diff --git a/src/lotus_json/ipld.rs b/src/lotus_json/ipld.rs index 91a6c68c6ab9..6664943ab456 100644 --- a/src/lotus_json/ipld.rs +++ b/src/lotus_json/ipld.rs @@ -20,7 +20,7 @@ //! //! # Tech debt //! - The real way to do this is to implement [`ipld_core::codec::Codec`] bits appropriately, -//! or embrace using our own struct. +//! or embrace using our own `struct`. use std::{collections::BTreeMap, fmt}; diff --git a/src/lotus_json/mod.rs b/src/lotus_json/mod.rs index 6735a6e653f0..ecf6bc639c6e 100644 --- a/src/lotus_json/mod.rs +++ b/src/lotus_json/mod.rs @@ -32,7 +32,7 @@ //! ``` //! //! In rust, the most common serialization framework is [`serde`]. -//! It has ONE (de)serialization model for each struct - the serialization code _cannot_ know +//! It has ONE (de)serialization model for each `struct` - the serialization code _cannot_ know //! if it's writing JSON or CBOR. //! //! The cleanest way handle the distinction would be a serde-compatible trait: @@ -54,7 +54,7 @@ //! [`macro@serde::Serialize`] and [`macro@serde::Deserialize`] instead? //! //! # Lotus JSON in Forest -//! - Have a struct which represents a domain object: e.g [`GossipBlock`](crate::blocks::GossipBlock). +//! - Have a `struct` which represents a domain object: e.g [`GossipBlock`](crate::blocks::GossipBlock). //! - Implement [`serde::Serialize`] on that object, normally using [`fvm_ipld_encoding::tuple::Serialize_tuple`]. //! This corresponds to the CBOR representation. //! - Implement [`HasLotusJson`] on the domain object. @@ -92,11 +92,11 @@ //! //! If you require access to private fields, consider: //! - implementing an exhaustive helper method, e.g [`crate::beacon::BeaconEntry::into_parts`]. -//! - moving implementation to the module where the struct is defined, e.g [`crate::blocks::tipset::lotus_json`]. +//! - moving implementation to the module where the `struct` is defined, e.g [`crate::blocks::tipset::lotus_json`]. //! If you do this, you MUST manually add snapshot and `quickcheck` tests. //! -//! ### Compound structs -//! - Each field of a struct should be wrapped with [`LotusJson`]. +//! ### Compound `structs` +//! - Each field of a `struct` should be wrapped with [`LotusJson`]. //! - Implementations of [`HasLotusJson::into_lotus_json`] and [`HasLotusJson::from_lotus_json`] //! should use [`Into`] and [`LotusJson::into_inner`] calls //! - Use destructuring to ensure exhaustiveness @@ -119,7 +119,7 @@ //! //! # Future work //! - use [`proptest`](https://docs.rs/proptest/) to test the parser pipeline -//! - use a derive macro for simple compound structs +//! - use a derive macro for simple compound `structs` use crate::shim::actors::miner::DeadlineInfo; use derive_more::From; @@ -134,7 +134,7 @@ use uuid::Uuid; use {pretty_assertions::assert_eq, quickcheck::quickcheck}; pub trait HasLotusJson: Sized { - /// The struct representing JSON. You should `#[derive(Deserialize, Serialize)]` on it. + /// The `struct` representing JSON. You should `#[derive(Deserialize, Serialize)]` on it. type LotusJson: Serialize + DeserializeOwned; /// To ensure code quality, conversion to/from lotus JSON MUST be tested. /// Provide snapshots of the JSON, and the domain type it should serialize to. @@ -471,7 +471,7 @@ pub mod base64_standard { } } -/// MUST NOT be used in any `LotusJson` structs +/// MUST NOT be used in any `LotusJson` `structs` pub fn serialize(value: &T, serializer: S) -> Result where S: Serializer, @@ -480,7 +480,7 @@ where value.clone().into_lotus_json().serialize(serializer) } -/// MUST NOT be used in any `LotusJson` structs. +/// MUST NOT be used in any `LotusJson` `structs`. pub fn deserialize<'de, D, T>(deserializer: D) -> Result where D: Deserializer<'de>, @@ -489,7 +489,7 @@ where Ok(T::from_lotus_json(Deserialize::deserialize(deserializer)?)) } -/// A domain struct that is (de) serialized through its lotus JSON representation. +/// A domain `struct` that is (de) serialized through its lotus JSON representation. #[derive( Debug, Deserialize, From, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Clone, )] diff --git a/src/lotus_json/sector_size.rs b/src/lotus_json/sector_size.rs index 74ad8f5a40a3..96c1fc22653c 100644 --- a/src/lotus_json/sector_size.rs +++ b/src/lotus_json/sector_size.rs @@ -6,7 +6,7 @@ use crate::shim::sector::SectorSize; #[derive(Serialize, Deserialize, JsonSchema)] #[schemars(rename = "SectorSize")] -// This should probably be a JSON Schema `enum` +// This should probably be a JSON Schema enum pub struct SectorSizeLotusJson(#[schemars(with = "u64")] SectorSize); impl HasLotusJson for SectorSize { diff --git a/src/message/signed_message.rs b/src/message/signed_message.rs index 3e0b3f7624e5..9683eb17e9f2 100644 --- a/src/message/signed_message.rs +++ b/src/message/signed_message.rs @@ -56,7 +56,7 @@ impl SignedMessage { self.signature.signature_type() == SignatureType::Bls } - /// Checks if the signed message is a SECP message. + /// Checks if the signed message is a `SECP` message. pub fn is_secp256k1(&self) -> bool { self.signature.signature_type() == SignatureType::Secp256k1 } diff --git a/src/rpc/channel.rs b/src/rpc/channel.rs index b7d31db0af9c..7a523831be7b 100644 --- a/src/rpc/channel.rs +++ b/src/rpc/channel.rs @@ -100,7 +100,7 @@ pub struct PendingSubscriptionSink { pub(crate) inner: MethodSink, /// `MethodCallback`. pub(crate) method: &'static str, - /// Shared Mutex of subscriptions for this method. + /// Shared `Mutex` of subscriptions for this method. pub(crate) subscribers: Subscribers, /// ID of the `subscription call` (i.e. not the same as subscription id) which is used /// to reply to subscription method call and must only be used once. diff --git a/src/rpc/error.rs b/src/rpc/error.rs index c3c62cbcedfe..6636f4afe28d 100644 --- a/src/rpc/error.rs +++ b/src/rpc/error.rs @@ -60,7 +60,7 @@ impl ServerError { pub fn known_code(&self) -> ErrorCode { self.inner.code().into() } - /// We are only including this method to get the JSON Schemas for our OpenRPC + /// We are only including this method to get the JSON `Schemas` for our OpenRPC /// machinery pub fn stubbed_for_openrpc() -> Self { Self::new( diff --git a/src/rpc/methods/f3/types.rs b/src/rpc/methods/f3/types.rs index 3672c0b751a3..d0ca18cd767c 100644 --- a/src/rpc/methods/f3/types.rs +++ b/src/rpc/methods/f3/types.rs @@ -28,7 +28,7 @@ use std::{cmp::Ordering, time::Duration}; const MAX_LEASE_INSTANCES: u64 = 5; -/// TipSetKey is the canonically ordered concatenation of the block CIDs in a tipset. +/// TipSetKey is the canonically ordered concatenation of the block `CIDs` in a tipset. #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, JsonSchema)] pub struct F3TipSetKey( #[schemars(with = "String")] diff --git a/src/rpc/methods/mpool.rs b/src/rpc/methods/mpool.rs index 10ceddab0ca9..d7392a2c211f 100644 --- a/src/rpc/methods/mpool.rs +++ b/src/rpc/methods/mpool.rs @@ -155,7 +155,7 @@ impl RpcMethod<1> for MpoolPush { } } -/// Add a batch of `SignedMessage`s to `mpool`, return message CIDs +/// Add a batch of `SignedMessage`s to `mpool`, return message `CIDs` pub enum MpoolBatchPush {} impl RpcMethod<1> for MpoolBatchPush { const NAME: &'static str = "Filecoin.MpoolBatchPush"; @@ -204,7 +204,7 @@ impl RpcMethod<1> for MpoolPushUntrusted { } } -/// Add a batch of `SignedMessage`s to `mpool`, return message CIDs +/// Add a batch of `SignedMessage`s to `mpool`, return message `CIDs` pub enum MpoolBatchPushUntrusted {} impl RpcMethod<1> for MpoolBatchPushUntrusted { const NAME: &'static str = "Filecoin.MpoolBatchPushUntrusted"; diff --git a/src/rpc/methods/state.rs b/src/rpc/methods/state.rs index 232ceb86497f..5cd98f185d77 100644 --- a/src/rpc/methods/state.rs +++ b/src/rpc/methods/state.rs @@ -1260,7 +1260,7 @@ impl RpcMethod<2> for StateSearchMsgLimited { } } -// Sample CIDs (useful for testing): +// Sample `CIDs` (useful for testing): // Mainnet: // 1,594,681 bafy2bzaceaclaz3jvmbjg3piazaq5dcesoyv26cdpoozlkzdiwnsvdvm2qoqm OhSnap upgrade // 1_960_320 bafy2bzacec43okhmihmnwmgqspyrkuivqtxv75rpymsdbulq6lgsdq2vkwkcg Skyr upgrade @@ -1338,7 +1338,7 @@ impl RpcMethod<2> for StateFetchRoot { // Do a depth-first-search of the IPLD graph (DAG). Nodes that are _not_ present in our database // are fetched in background tasks. If the number of tasks reaches MAX_CONCURRENT_REQUESTS, the // depth-first-search pauses until one of the work tasks returns. The memory usage of this - // algorithm is dominated by the set of seen CIDs and the 'dfs' stack is not expected to grow to + // algorithm is dominated by the set of seen `CIDs` and the 'dfs' stack is not expected to grow to // more than 1000 elements (even when walking tens of millions of nodes). let dfs = Arc::new(Mutex::new(vec![Ipld::Link(root_cid)])); let mut to_be_fetched = vec![]; @@ -1348,14 +1348,14 @@ impl RpcMethod<2> for StateFetchRoot { while let Some(ipld) = lock_pop(&dfs) { { let mut dfs_guard = dfs.lock(); - // Scan for unseen CIDs. Available IPLD nodes are pushed to the depth-first-search + // Scan for unseen `CIDs`. Available IPLD nodes are pushed to the depth-first-search // stack, unavailable nodes will be requested in worker tasks. for new_cid in ipld.iter().filter_map(&mut get_ipld_link) { counter += 1; if counter.is_multiple_of(1_000) { // set RUST_LOG=forest::rpc::state_api=debug to enable these printouts. tracing::debug!( - "Graph walk: CIDs: {counter}, Fetched: {fetched}, Failures: {failures}, dfs: {}, Concurrent: {}", + "Graph walk: `CIDs`: {counter}, Fetched: {fetched}, Failures: {failures}, dfs: {}, Concurrent: {}", dfs_guard.len(), task_set.len() ); @@ -1433,7 +1433,7 @@ impl RpcMethod<2> for StateFetchRoot { } Ok(format!( - "IPLD graph traversed! CIDs: {counter}, fetched: {fetched}, failures: {failures}." + "IPLD graph traversed! `CIDs`: {counter}, fetched: {fetched}, failures: {failures}." )) } } diff --git a/src/rpc/reflect/mod.rs b/src/rpc/reflect/mod.rs index f03aac0d342c..21165c8c7bb2 100644 --- a/src/rpc/reflect/mod.rs +++ b/src/rpc/reflect/mod.rs @@ -50,7 +50,7 @@ pub type Ctx = Arc>; /// /// Note, an earlier draft of this trait had an additional type parameter for `Ctx` /// for generality. -/// However, fixing it as [`Ctx<...>`] saves on complexity/confusion for implementors, +/// However, fixing it as [`Ctx<...>`] saves on complexity/confusion for `implementors`, /// at the expense of handler flexibility, which could come back to bite us. /// - All handlers accept the same type. /// - All `Ctx`s must be `Send + Sync + 'static` due to bounds on [`RpcModule`]. diff --git a/src/rpc/registry/actors_reg.rs b/src/rpc/registry/actors_reg.rs index f990a51d4764..5d5c9424df46 100644 --- a/src/rpc/registry/actors_reg.rs +++ b/src/rpc/registry/actors_reg.rs @@ -200,7 +200,7 @@ mod test { fn test_basic_load_and_serialize_actor_state_all_supported_actors() { let db = Arc::new(MemoryDB::default()); - // Test all supported actor types with real CIDs + // Test all supported actor types with real `CIDs` let supported_actors = vec![ (BuiltinActor::Account, "Account"), (BuiltinActor::Cron, "Cron"), diff --git a/src/rpc/types/mod.rs b/src/rpc/types/mod.rs index 6d53a930db23..2c8cecd3e91f 100644 --- a/src/rpc/types/mod.rs +++ b/src/rpc/types/mod.rs @@ -270,7 +270,7 @@ pub struct SectorOnChainInfo { /// sector activation, extension and whenever a sector's `QAP` is changed. This fee is payable for /// the lifetime of the sector and is aggregated in the deadline's `daily_fee` field. /// - /// This field is not included in the serialized form of the struct prior to the activation of + /// This field is not included in the serialized form of the `struct` prior to the activation of /// FIP-0100, and is added as the 16th element of the array after that point only for new sectors /// or sectors that are updated after that point. For old sectors, the value of this field will /// always be zero. diff --git a/src/shim/actors/builtin/miner/mod.rs b/src/shim/actors/builtin/miner/mod.rs index 2de8aec3bf5e..5a7590374088 100644 --- a/src/shim/actors/builtin/miner/mod.rs +++ b/src/shim/actors/builtin/miner/mod.rs @@ -414,7 +414,7 @@ impl State { } } - /// Unclaimed funds. Actor balance - (locked funds, precommit deposit, ip requirement) Can go negative if the miner is in IP debt. + /// Unclaimed funds. Actor balance - (locked funds, pre-commit deposit, IP requirement) Can go negative if the miner is in IP debt. pub fn available_balance(&self, balance: &BigInt) -> anyhow::Result { let balance: TokenAmount = TokenAmount::from_atto(balance.clone()); let balance_v3 = from_token_v2_to_v3(&balance); @@ -1349,11 +1349,11 @@ impl From for SectorOnChainInfo { pub struct DeadlineInfo { /// Epoch at which this info was calculated. pub current_epoch: ChainEpoch, - /// First epoch of the proving period (<= CurrentEpoch). + /// First epoch of the proving period (<= `CurrentEpoch`). pub period_start: ChainEpoch, - /// Current deadline index, in [0..WPoStProvingPeriodDeadlines). + /// Current deadline index, in [0..`WPoStProvingPeriodDeadlines`). pub index: u64, - /// First epoch from which a proof may be submitted (>= CurrentEpoch). + /// First epoch from which a proof may be submitted (>= `CurrentEpoch`). pub open: ChainEpoch, /// First epoch from which a proof may no longer be submitted (>= Open). pub close: ChainEpoch, diff --git a/src/shim/crypto.rs b/src/shim/crypto.rs index 5b1ad4536d59..a449a66b057f 100644 --- a/src/shim/crypto.rs +++ b/src/shim/crypto.rs @@ -81,7 +81,7 @@ impl Signature { } } - /// Creates a SECP Signature given the raw bytes. + /// Creates a `SECP` Signature given the raw bytes. pub fn new_secp256k1(bytes: Vec) -> Self { Self { sig_type: SignatureType::Secp256k1, @@ -135,7 +135,7 @@ impl Signature { /// Authenticates the message signature using protocol-specific validation: /// - Delegated: Uses the Ethereum message with RLP encoding for signature verification, Verifies message roundtrip integrity - /// - BLS/SECP: Standard signature verification + /// - BLS/`SECP`: Standard signature verification pub fn authenticate_msg( &self, eth_chain_id: EthChainId, diff --git a/src/shim/machine/manifest.rs b/src/shim/machine/manifest.rs index 9a5423c63e02..e311e5816264 100644 --- a/src/shim/machine/manifest.rs +++ b/src/shim/machine/manifest.rs @@ -19,7 +19,7 @@ use serde::{Deserialize, Serialize}; /// This should be the latest enumeration of all builtin actors pub use fil_actors_shared::v11::runtime::builtins::Type as BuiltinActor; -/// A list of [`BuiltinActor`]s to their CIDs +/// A list of [`BuiltinActor`]s to their `CIDs` // Theoretically, this struct could just have fields for all the actors, // acting as a kind of perfect hash map, but performance will be fine as-is // #[derive(Serialize, Deserialize, Debug)] diff --git a/src/shim/version.rs b/src/shim/version.rs index 1c33782f4442..7fd94683348d 100644 --- a/src/shim/version.rs +++ b/src/shim/version.rs @@ -39,7 +39,7 @@ pub struct NetworkVersion(#[schemars(with = "u32")] pub NetworkVersion_latest); lotus_json_with_self!(NetworkVersion); -/// Defines public constants V0, V1, ... for [`NetworkVersion`]. +/// Defines public constants `V0`, `V1`, ... for [`NetworkVersion`]. /// Each constant is mapped to the corresponding [`NetworkVersion_latest`] variant. macro_rules! define_network_versions { ($($version:literal),+ $(,)?) => { diff --git a/src/tool/subcommands/api_cmd.rs b/src/tool/subcommands/api_cmd.rs index 8187f8c03536..40275ca4191d 100644 --- a/src/tool/subcommands/api_cmd.rs +++ b/src/tool/subcommands/api_cmd.rs @@ -219,7 +219,7 @@ pub enum ApiCommands { /// that rely on internal state. /// /// Inputs: - /// - `--to`, `--from`: delegated Filecoin (f4) addresses + /// - `--to`, `--from`: delegated Filecoin (`f4`) addresses /// - `--payload`: calldata in hex (accepts optional `0x` prefix) /// - `--topic`: `32‑byte` event topic in hex /// - `--filter`: run only tests that interact with a specific RPC method @@ -237,10 +237,10 @@ pub enum ApiCommands { /// test result: ok. 7 passed; 0 failed; 0 ignored; 0 filtered out /// ``` TestStateful { - /// Test Transaction `to` address (delegated f4) + /// Test Transaction `to` address (delegated `f4`) #[arg(long)] to: Address, - /// Test Transaction `from` address (delegated f4) + /// Test Transaction `from` address (delegated `f4`) #[arg(long)] from: Address, /// Test Transaction hex `payload` diff --git a/src/tool/subcommands/api_cmd/api_compare_tests.rs b/src/tool/subcommands/api_cmd/api_compare_tests.rs index e808862b0e1b..31addb681b1e 100644 --- a/src/tool/subcommands/api_cmd/api_compare_tests.rs +++ b/src/tool/subcommands/api_cmd/api_compare_tests.rs @@ -2027,8 +2027,8 @@ fn f3_tests_with_tipset(tipset: &Tipset) -> anyhow::Result> { ]) } -// Extract tests that use chain-specific data such as block CIDs or message -// CIDs. Right now, only the last `n_tipsets` tipsets are used. +// Extract tests that use chain-specific data such as block `CIDs` or message +// `CIDs`. Right now, only the last `n_tipsets` tipsets are used. fn snapshot_tests( store: Arc, num_tipsets: usize, diff --git a/src/tool/subcommands/api_cmd/stateful_tests.rs b/src/tool/subcommands/api_cmd/stateful_tests.rs index 874df6a47977..242c249a175b 100644 --- a/src/tool/subcommands/api_cmd/stateful_tests.rs +++ b/src/tool/subcommands/api_cmd/stateful_tests.rs @@ -43,7 +43,7 @@ pub struct RpcTestScenario { } impl RpcTestScenario { - /// Create a basic scenario from a simple async closure. + /// Create a basic scenario from a simple `async` closure. pub fn basic(run_fn: F) -> Self where F: Fn(Arc) -> Fut + Send + Sync + 'static, diff --git a/src/tool/subcommands/archive_cmd.rs b/src/tool/subcommands/archive_cmd.rs index 789e16b1f44d..d39ffae72a0c 100644 --- a/src/tool/subcommands/archive_cmd.rs +++ b/src/tool/subcommands/archive_cmd.rs @@ -98,9 +98,9 @@ pub enum ArchiveCommands { /// Path to an archive (`.car` or `.car.zst`). snapshot: PathBuf, }, - /// Show FRC-0108 header of a standalone F3 snapshot. + /// Show FRC-0108 header of a standalone `F3` snapshot. F3Header { - /// Path to a standalone F3 snapshot. + /// Path to a standalone `F3` snapshot. snapshot: PathBuf, }, /// Trim a snapshot of the chain and write it to `` @@ -150,12 +150,12 @@ pub enum ArchiveCommands { #[arg(long, default_value_t = false)] force: bool, }, - /// Merge a v1 Filecoin snapshot with an F3 snapshot into a v2 Filecoin snapshot in `.forest.car.zst` format + /// Merge a `v1` Filecoin snapshot with an `F3` snapshot into a `v2` Filecoin snapshot in `.forest.car.zst` format MergeF3 { - /// Path to the v1 Filecoin snapshot + /// Path to the `v1` Filecoin snapshot #[arg(long = "v1")] filecoin_v1: PathBuf, - /// Path to the F3 snapshot + /// Path to the `F3` snapshot #[arg(long)] f3: PathBuf, /// Path to the snapshot output file in `.forest.car.zst` format diff --git a/src/tool/subcommands/shed_cmd.rs b/src/tool/subcommands/shed_cmd.rs index 97b0977e5c70..1e5c2a940ad1 100644 --- a/src/tool/subcommands/shed_cmd.rs +++ b/src/tool/subcommands/shed_cmd.rs @@ -22,7 +22,7 @@ use std::path::PathBuf; #[derive(Subcommand)] pub enum ShedCommands { - /// Enumerate the tipset CIDs for a span of epochs starting at `height` and working backwards. + /// Enumerate the tipset `CIDs` for a span of epochs starting at `height` and working backwards. /// /// Useful for getting blocks to live test an RPC endpoint. SummarizeTipsets { diff --git a/src/utils/db/car_stream.rs b/src/utils/db/car_stream.rs index 165f3dbe00be..cbfd41fbd634 100644 --- a/src/utils/db/car_stream.rs +++ b/src/utils/db/car_stream.rs @@ -34,7 +34,7 @@ const MAX_FRAME_LEN: usize = 512 * 1024 * 1024; #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct CarV1Header { - // The roots array must contain one or more CIDs, + // The roots array must contain one or more `CIDs`, // each of which should be present somewhere in the remainder of the CAR. // See pub roots: NonEmpty, @@ -114,7 +114,7 @@ impl CarBlockWrite for T { pin_project! { /// Stream of CAR blocks. If the input data is compressed with zstd, it will /// automatically be decompressed. - /// Note that [`CarStream`] automatically skips the metadata block and F3 data + /// Note that [`CarStream`] automatically skips the metadata block and `F3` data /// block defined in [`FRC-0108`](https://github.com/filecoin-project/FIPs/blob/master/FRCs/frc-0108.md) pub struct CarStream { #[pin] @@ -133,13 +133,13 @@ fn is_zstd(buf: &[u8]) -> bool { } impl CarStream { - /// Create a stream with automatic but unsafe CARv2 header extraction. + /// Create a stream with automatic but unsafe `CARv2` header extraction. /// - /// Note that if the input is zstd compressed, the CARv2 header extraction + /// Note that if the input is zstd compressed, the `CARv2` header extraction /// is on a best efforts basis. It could fail when `reader.fill_buf()` is insufficient - /// for decoding the first zstd frame, and treat input as CARv1, because this method + /// for decoding the first zstd frame, and treat input as `CARv1`, because this method /// does not require the input to be [`tokio::io::AsyncSeek`]. - /// It's recommended to use [`CarStream::new`] for zstd compressed CARv2 input. + /// It's recommended to use [`CarStream::new`] for zstd compressed `CARv2` input. #[allow(dead_code)] pub async fn new_unsafe(mut reader: ReaderT) -> io::Result { let header_v2 = Self::try_decode_header_v2_from_fill_buf(reader.fill_buf().await?) @@ -149,7 +149,7 @@ impl CarStream { Self::new_with_header_v2(reader, header_v2).await } - /// Create a stream with pre-extracted CARv2 header + /// Create a stream with pre-extracted `CARv2` header pub async fn new_with_header_v2( mut reader: ReaderT, header_v2: Option, @@ -237,7 +237,7 @@ impl CarStream { } } - /// Extracts CARv2 header from the input, returns the reader and CARv2 header. + /// Extracts `CARv2` header from the input, returns the reader and `CARv2` header. /// /// Note that position of the input reader has to be reset before calling [`CarStream::new_with_header_v2`]. /// Use [`CarStream::extract_header_v2_and_reset_reader_position`] to automatically reset stream position. @@ -274,13 +274,13 @@ impl CarStream { } impl CarStream { - /// Create a stream with automatic CARv2 header extraction. + /// Create a stream with automatic `CARv2` header extraction. pub async fn new(reader: ReaderT) -> io::Result { let (reader, header_v2) = Self::extract_header_v2_and_reset_reader_position(reader).await?; Self::new_with_header_v2(reader, header_v2).await } - /// Extracts CARv2 header from the input, resets the reader position and returns the reader and CARv2 header. + /// Extracts `CARv2` header from the input, resets the reader position and returns the reader and `CARv2` header. pub async fn extract_header_v2_and_reset_reader_position( mut reader: ReaderT, ) -> io::Result<(ReaderT, Option)> { diff --git a/src/utils/db/mod.rs b/src/utils/db/mod.rs index d3a997dad470..4fa80757f0f5 100644 --- a/src/utils/db/mod.rs +++ b/src/utils/db/mod.rs @@ -15,9 +15,9 @@ use multihash_codetable::Code; use serde::ser::Serialize; -/// Extension methods for inserting and retrieving IPLD data with CIDs +/// Extension methods for inserting and retrieving IPLD data with `CIDs` pub trait BlockstoreExt: Blockstore { - /// Batch put CBOR objects into block store and returns vector of CIDs + /// Batch put CBOR objects into block store and returns vector of `CIDs` #[allow(clippy::disallowed_types)] fn bulk_put<'a, S, V>(&self, values: V, code: Code) -> anyhow::Result> where diff --git a/src/utils/encoding/cid_de_cbor.rs b/src/utils/encoding/cid_de_cbor.rs index accb21adc274..6a4a608e9643 100644 --- a/src/utils/encoding/cid_de_cbor.rs +++ b/src/utils/encoding/cid_de_cbor.rs @@ -22,7 +22,7 @@ pub fn extract_cids(cbor_blob: &[u8]) -> anyhow::Result { /// vector of [`Cid`]. struct CidVec(SmallCidVec); -/// [`FilterCids`] traverses an [`ipld_core::ipld::Ipld`] tree, appending [`Cid`]s (and only CIDs) to a single vector. +/// [`FilterCids`] traverses an [`ipld_core::ipld::Ipld`] tree, appending [`Cid`]s (and only `CIDs`) to a single vector. /// This is much faster than constructing an [`ipld_core::ipld::Ipld`] tree and then performing the filtering. struct FilterCids<'a>(&'a mut SmallCidVec); @@ -92,7 +92,7 @@ impl<'de> DeserializeSeed<'de> for FilterCids<'_> { Ok(()) } - // "New-type" structs are only used to de-serialize CIDs. + // "New-type" structs are only used to de-serialize `CIDs`. #[inline] fn visit_newtype_struct(self, deserializer: D) -> Result where @@ -104,7 +104,7 @@ impl<'de> DeserializeSeed<'de> for FilterCids<'_> { Ok(()) } - // We don't care about anything else as the CIDs could only be found in "new-type" + // We don't care about anything else as the `CIDs` could only be found in "new-type" // structs. So we visit only lists, maps and said structs. #[inline] fn visit_str(self, _value: &str) -> Result diff --git a/src/utils/encoding/fallback_de_ipld_dagcbor.rs b/src/utils/encoding/fallback_de_ipld_dagcbor.rs index e94d9f600538..de35641ecfb2 100644 --- a/src/utils/encoding/fallback_de_ipld_dagcbor.rs +++ b/src/utils/encoding/fallback_de_ipld_dagcbor.rs @@ -666,7 +666,7 @@ impl<'de, 'a, R: dec::Read<'de>> de::Deserializer<'de> for &'a mut CidDeserializ let byte = peek_one(&mut self.0.reader)?; match dec::if_major(byte) { major::BYTES => { - // CBOR encoded CIDs have a zero byte prefix we have to remove. + // CBOR encoded `CIDs` have a zero byte prefix we have to remove. match >>::decode(&mut self.0.reader)?.0 { Cow::Borrowed(buf) => match buf.split_first() { Some((0, rest)) => visitor.visit_borrowed_bytes(rest), diff --git a/src/utils/io/progress_log.rs b/src/utils/io/progress_log.rs index fab0b3b68ce6..1eeb2561c2cd 100644 --- a/src/utils/io/progress_log.rs +++ b/src/utils/io/progress_log.rs @@ -6,14 +6,14 @@ //! Previously we used progress bars thanks to the [`indicatif`](https://crates.io/crates/indicatif) library but we had a few issues with them: //! - They behaved poorly together with regular logging //! - They were too verbose and printed even for very small tasks (less than 5 seconds) -//! - They were only used when connected to a TTY and not written in log files +//! - They were only used when connected to a `TTY` and not written in log files //! //! This lead us to develop our own logging code. //! This module provides two new types for logging progress that are [`WithProgress`] and [`WithProgressRaw`]. //! The main goal of [`WithProgressRaw`] is to maintain a similar API to the previous one from progress bar so we could remove the [`indicatif`](https://crates.io/crates/indicatif) dependency, //! but, gradually, we would like to move to something better and use the [`WithProgress`] type. //! The [`WithProgress`] type will provide a way to wrap user code while handling logging presentation details. -//! [`WithProgress`] is a wrapper that should extend to Iterators, Streams, Read/Write types. Right now it only wraps async reads. +//! [`WithProgress`] is a wrapper that should extend to Iterators, Streams, Read/Write types. Right now it only wraps `async` reads. //! //! # Example //! ``` diff --git a/src/utils/io/writer_checksum.rs b/src/utils/io/writer_checksum.rs index d13952877d29..66408348acab 100644 --- a/src/utils/io/writer_checksum.rs +++ b/src/utils/io/writer_checksum.rs @@ -19,7 +19,7 @@ pin_project! { /// Trait marking the object that is collecting a kind of a checksum. pub trait Checksum { - /// Return the checksum and resets the internal hasher. + /// Return the checksum and resets the internal `hasher`. fn finalize(&mut self) -> std::io::Result>>; } diff --git a/src/utils/misc/env.rs b/src/utils/misc/env.rs index 588723e529e2..f3a0b15dd872 100644 --- a/src/utils/misc/env.rs +++ b/src/utils/misc/env.rs @@ -12,13 +12,13 @@ pub fn env_or_default(key: &str, default: T) -> T { .unwrap_or(default) } -/// Check if the given environment variable is set to truthy value. +/// Check if the given environment variable is set to `truthy` value. /// Returns false if not set. pub fn is_env_truthy(env: &str) -> bool { is_env_set_and_truthy(env).unwrap_or_default() } -/// Check if the given environment variable is set to truthy value. +/// Check if the given environment variable is set to `truthy` value. /// Returns None if not set. pub fn is_env_set_and_truthy(env: &str) -> Option { std::env::var(env) diff --git a/src/utils/multihash.rs b/src/utils/multihash.rs index d2f613f8528f..b5bd293704e9 100644 --- a/src/utils/multihash.rs +++ b/src/utils/multihash.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0, MIT //! -//! This module back-fills the Identify hasher and code that was removed in `multihash` crate. +//! This module back-fills the Identify `hasher` and code that was removed in `multihash` crate. //! See //! and //! @@ -176,7 +176,7 @@ impl MultihashCode { } } -/// Identity hasher with a maximum size. +/// Identity `hasher` with a maximum size. /// /// # Panics /// diff --git a/src/utils/proofs_api/paramfetch.rs b/src/utils/proofs_api/paramfetch.rs index a43240636c6a..637167d87f40 100644 --- a/src/utils/proofs_api/paramfetch.rs +++ b/src/utils/proofs_api/paramfetch.rs @@ -1,7 +1,7 @@ // Copyright 2019-2025 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT //! This module contains the logic for fetching the proofs parameters from the network. -//! As a general rule, the parameters are first fetched from ChainSafe's Cloudflare R2 bucket, if +//! As a general rule, the parameters are first fetched from ChainSafe's Cloudflare `R2` bucket, if //! that fails (or is overridden by [`PROOFS_ONLY_IPFS_GATEWAY_ENV`]), the IPFS gateway is used as a fallback. //! //! The reason for this is that the IPFS gateway is not as reliable and performant as the centralized solution, which contributed to @@ -34,7 +34,7 @@ use super::parameters::{ /// Default IPFS gateway to use for fetching parameters. /// Set via the [`IPFS_GATEWAY_ENV`] environment variable. const DEFAULT_IPFS_GATEWAY: &str = "https://proofs.filecoin.io/ipfs/"; -/// Domain bound to the Cloudflare R2 bucket. +/// Domain bound to the Cloudflare `R2` bucket. const CLOUDFLARE_PROOF_PARAMETER_DOMAIN: &str = "filecoin-proof-parameters.chainsafe.dev"; /// If set to 1, enforce using the IPFS gateway for fetching parameters. @@ -171,7 +171,7 @@ async fn fetch_params_ipfs_gateway(path: &Path, info: &ParameterData) -> anyhow: result } -/// Downloads the parameter file from Cloudflare R2 to the given path. It wraps the [`download_from_cloudflare`] function with a retry and timeout mechanisms. +/// Downloads the parameter file from Cloudflare `R2` to the given path. It wraps the [`download_from_cloudflare`] function with a retry and timeout mechanisms. async fn fetch_params_cloudflare(name: &str, path: &Path) -> anyhow::Result<()> { info!("Fetching param file {name} from Cloudflare R2 {CLOUDFLARE_PROOF_PARAMETER_DOMAIN}"); let result = (|| download_from_cloudflare(name, path)) @@ -190,7 +190,7 @@ async fn fetch_params_cloudflare(name: &str, path: &Path) -> anyhow::Result<()> result } -/// Downloads the parameter file from Cloudflare R2 to the given path. In case of an error, +/// Downloads the parameter file from Cloudflare `R2` to the given path. In case of an error, /// the file is not written to the final path to avoid corrupted files. async fn download_from_cloudflare(name: &str, path: &Path) -> anyhow::Result<()> { let response = global_http_client()