Skip to content
29 changes: 17 additions & 12 deletions cmd/devp2p/internal/v5test/discv5tests.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func (s *Suite) AllTests() []utesting.Test {
{Name: "Ping", Fn: s.TestPing},
{Name: "PingLargeRequestID", Fn: s.TestPingLargeRequestID},
{Name: "PingMultiIP", Fn: s.TestPingMultiIP},
{Name: "PingHandshakeInterrupted", Fn: s.TestPingHandshakeInterrupted},
{Name: "HandshakeResend", Fn: s.TestHandshakeResend},
{Name: "TalkRequest", Fn: s.TestTalkRequest},
{Name: "FindnodeZeroDistance", Fn: s.TestFindnodeZeroDistance},
{Name: "FindnodeResults", Fn: s.TestFindnodeResults},
Expand Down Expand Up @@ -158,32 +158,37 @@ the attempt from a different IP.`)
}
}

// TestPingHandshakeInterrupted starts a handshake, but doesn't finish it and sends a second ordinary message
// packet instead of a handshake message packet. The remote node should respond with
// another WHOAREYOU challenge for the second packet.
func (s *Suite) TestPingHandshakeInterrupted(t *utesting.T) {
t.Log(`TestPingHandshakeInterrupted starts a handshake, but doesn't finish it and sends a second ordinary message
packet instead of a handshake message packet. The remote node should respond with
another WHOAREYOU challenge for the second packet.`)

// TestHandshakeResend starts a handshake, but doesn't finish it and sends a second ordinary message
// packet instead of a handshake message packet. The remote node should repeat the previous WHOAREYOU
// challenge for the first PING.
func (s *Suite) TestHandshakeResend(t *utesting.T) {
conn, l1 := s.listen1(t)
defer conn.close()

// First PING triggers challenge.
ping := &v5wire.Ping{ReqID: conn.nextReqID()}
conn.write(l1, ping, nil)
var challenge1 *v5wire.Whoareyou
switch resp := conn.read(l1).(type) {
case *v5wire.Whoareyou:
challenge1 = resp
t.Logf("got WHOAREYOU for PING")
default:
t.Fatal("expected WHOAREYOU, got", resp)
}

// Send second PING.
ping2 := &v5wire.Ping{ReqID: conn.nextReqID()}
switch resp := conn.reqresp(l1, ping2).(type) {
case *v5wire.Pong:
checkPong(t, resp, ping2, l1)
conn.write(l1, ping2, nil)
switch resp := conn.read(l1).(type) {
case *v5wire.Whoareyou:
if resp.Nonce != challenge1.Nonce {
t.Fatalf("wrong nonce %x in WHOAREYOU (want %x)", resp.Nonce[:], challenge1.Nonce[:])
}
if !bytes.Equal(resp.ChallengeData, challenge1.ChallengeData) {
t.Fatalf("wrong ChallengeData in resent WHOAREYOU (want %x)", resp.ChallengeData, challenge1.ChallengeData)
}
resp.Node = conn.remote
default:
t.Fatal("expected WHOAREYOU, got", resp)
}
Expand Down
6 changes: 0 additions & 6 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,13 +91,7 @@ var (
utils.LogNoHistoryFlag,
utils.LogExportCheckpointsFlag,
utils.StateHistoryFlag,
utils.LightServeFlag, // deprecated
utils.LightIngressFlag, // deprecated
utils.LightEgressFlag, // deprecated
utils.LightMaxPeersFlag, // deprecated
utils.LightNoPruneFlag, // deprecated
utils.LightKDFFlag,
utils.LightNoSyncServeFlag, // deprecated
utils.EthRequiredBlocksFlag,
utils.LegacyWhitelistFlag, // deprecated
utils.CacheFlag,
Expand Down
23 changes: 0 additions & 23 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -1245,28 +1245,6 @@ func setIPC(ctx *cli.Context, cfg *node.Config) {
}
}

// setLes shows the deprecation warnings for LES flags.
func setLes(ctx *cli.Context, cfg *ethconfig.Config) {
if ctx.IsSet(LightServeFlag.Name) {
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightServeFlag.Name)
}
if ctx.IsSet(LightIngressFlag.Name) {
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightIngressFlag.Name)
}
if ctx.IsSet(LightEgressFlag.Name) {
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightEgressFlag.Name)
}
if ctx.IsSet(LightMaxPeersFlag.Name) {
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightMaxPeersFlag.Name)
}
if ctx.IsSet(LightNoPruneFlag.Name) {
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightNoPruneFlag.Name)
}
if ctx.IsSet(LightNoSyncServeFlag.Name) {
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightNoSyncServeFlag.Name)
}
}

// MakeDatabaseHandles raises out the number of allowed file handles per process
// for Geth and returns half of the allowance to assign to the database.
func MakeDatabaseHandles(max int) int {
Expand Down Expand Up @@ -1582,7 +1560,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
setBlobPool(ctx, &cfg.BlobPool)
setMiner(ctx, &cfg.Miner)
setRequiredBlocks(ctx, cfg)
setLes(ctx, cfg)

// Cap the cache allowance and tune the garbage collector
mem, err := gopsutil.VirtualMemory()
Expand Down
37 changes: 0 additions & 37 deletions cmd/utils/flags_legacy.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,6 @@ var DeprecatedFlags = []cli.Flag{
CacheTrieRejournalFlag,
LegacyDiscoveryV5Flag,
TxLookupLimitFlag,
LightServeFlag,
LightIngressFlag,
LightEgressFlag,
LightMaxPeersFlag,
LightNoPruneFlag,
LightNoSyncServeFlag,
LogBacktraceAtFlag,
LogDebugFlag,
MinerNewPayloadTimeoutFlag,
Expand Down Expand Up @@ -88,37 +82,6 @@ var (
Value: ethconfig.Defaults.TransactionHistory,
Category: flags.DeprecatedCategory,
}
// Light server and client settings, Deprecated November 2023
LightServeFlag = &cli.IntFlag{
Name: "light.serve",
Usage: "Maximum percentage of time allowed for serving LES requests (deprecated)",
Category: flags.DeprecatedCategory,
}
LightIngressFlag = &cli.IntFlag{
Name: "light.ingress",
Usage: "Incoming bandwidth limit for serving light clients (deprecated)",
Category: flags.DeprecatedCategory,
}
LightEgressFlag = &cli.IntFlag{
Name: "light.egress",
Usage: "Outgoing bandwidth limit for serving light clients (deprecated)",
Category: flags.DeprecatedCategory,
}
LightMaxPeersFlag = &cli.IntFlag{
Name: "light.maxpeers",
Usage: "Maximum number of light clients to serve, or light servers to attach to (deprecated)",
Category: flags.DeprecatedCategory,
}
LightNoPruneFlag = &cli.BoolFlag{
Name: "light.nopruning",
Usage: "Disable ancient light chain data pruning (deprecated)",
Category: flags.DeprecatedCategory,
}
LightNoSyncServeFlag = &cli.BoolFlag{
Name: "light.nosyncserve",
Usage: "Enables serving light clients before syncing (deprecated)",
Category: flags.DeprecatedCategory,
}
// Deprecated November 2023
LogBacktraceAtFlag = &cli.StringFlag{
Name: "log.backtrace",
Expand Down
62 changes: 39 additions & 23 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,8 +183,13 @@ func (c *CacheConfig) triedbConfig(isVerkle bool) *triedb.Config {
}
if c.StateScheme == rawdb.PathScheme {
config.PathDB = &pathdb.Config{
StateHistory: c.StateHistory,
CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
StateHistory: c.StateHistory,
TrieCleanSize: c.TrieCleanLimit * 1024 * 1024,
StateCleanSize: c.SnapshotLimit * 1024 * 1024,

// TODO(rjl493456442): The write buffer represents the memory limit used
// for flushing both trie data and state data to disk. The config name
// should be updated to eliminate the confusion.
WriteBufferSize: c.TrieDirtyLimit * 1024 * 1024,
}
}
Expand Down Expand Up @@ -380,11 +385,14 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
// Do nothing here until the state syncer picks it up.
log.Info("Genesis state is missing, wait state sync")
} else {
// Head state is missing, before the state recovery, find out the
// disk layer point of snapshot(if it's enabled). Make sure the
// rewound point is lower than disk layer.
// Head state is missing, before the state recovery, find out the disk
// layer point of snapshot(if it's enabled). Make sure the rewound point
// is lower than disk layer.
//
// Note it's unnecessary in path mode which always keep trie data and
// state data consistent.
var diskRoot common.Hash
if bc.cacheConfig.SnapshotLimit > 0 {
if bc.cacheConfig.SnapshotLimit > 0 && bc.cacheConfig.StateScheme == rawdb.HashScheme {
diskRoot = rawdb.ReadSnapshotRoot(bc.db)
}
if diskRoot != (common.Hash{}) {
Expand Down Expand Up @@ -457,15 +465,39 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
bc.logger.OnGenesisBlock(bc.genesisBlock, alloc)
}
}
bc.setupSnapshot()

// Rewind the chain in case of an incompatible config upgrade.
if compatErr != nil {
log.Warn("Rewinding chain to upgrade configuration", "err", compatErr)
if compatErr.RewindToTime > 0 {
bc.SetHeadWithTimestamp(compatErr.RewindToTime)
} else {
bc.SetHead(compatErr.RewindToBlock)
}
rawdb.WriteChainConfig(db, genesisHash, chainConfig)
}

// Start tx indexer if it's enabled.
if txLookupLimit != nil {
bc.txIndexer = newTxIndexer(*txLookupLimit, bc)
}
return bc, nil
}

func (bc *BlockChain) setupSnapshot() {
// Short circuit if the chain is established with path scheme, as the
// state snapshot has been integrated into path database natively.
if bc.cacheConfig.StateScheme == rawdb.PathScheme {
return
}
// Load any existing snapshot, regenerating it if loading failed
if bc.cacheConfig.SnapshotLimit > 0 {
// If the chain was rewound past the snapshot persistent layer (causing
// a recovery block number to be persisted to disk), check if we're still
// in recovery mode and in that case, don't invalidate the snapshot on a
// head mismatch.
var recover bool

head := bc.CurrentBlock()
if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer >= head.Number.Uint64() {
log.Warn("Enabling snapshot recovery", "chainhead", head.Number, "diskbase", *layer)
Expand All @@ -482,22 +514,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
// Re-initialize the state database with snapshot
bc.statedb = state.NewDatabase(bc.triedb, bc.snaps)
}

// Rewind the chain in case of an incompatible config upgrade.
if compatErr != nil {
log.Warn("Rewinding chain to upgrade configuration", "err", compatErr)
if compatErr.RewindToTime > 0 {
bc.SetHeadWithTimestamp(compatErr.RewindToTime)
} else {
bc.SetHead(compatErr.RewindToBlock)
}
rawdb.WriteChainConfig(db, genesisHash, chainConfig)
}
// Start tx indexer if it's enabled.
if txLookupLimit != nil {
bc.txIndexer = newTxIndexer(*txLookupLimit, bc)
}
return bc, nil
}

// empty returns an indicator whether the blockchain is empty.
Expand Down
34 changes: 23 additions & 11 deletions core/blockchain_repair_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1791,7 +1791,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
}
)
defer engine.Close()
if snapshots {
if snapshots && scheme == rawdb.HashScheme {
config.SnapshotLimit = 256
config.SnapshotWait = true
}
Expand Down Expand Up @@ -1820,7 +1820,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
if err := chain.triedb.Commit(canonblocks[tt.commitBlock-1].Root(), false); err != nil {
t.Fatalf("Failed to flush trie state: %v", err)
}
if snapshots {
if snapshots && scheme == rawdb.HashScheme {
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
}
Expand Down Expand Up @@ -1952,8 +1952,10 @@ func testIssue23496(t *testing.T, scheme string) {
if _, err := chain.InsertChain(blocks[1:2]); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
if err := chain.snaps.Cap(blocks[1].Root(), 0); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
if scheme == rawdb.HashScheme {
if err := chain.snaps.Cap(blocks[1].Root(), 0); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
}
}

// Insert block B3 and commit the state into disk
Expand Down Expand Up @@ -1997,15 +1999,23 @@ func testIssue23496(t *testing.T, scheme string) {
}
expHead := uint64(1)
if scheme == rawdb.PathScheme {
expHead = uint64(2)
// The pathdb database makes sure that snapshot and trie are consistent,
// so only the last block is reverted in case of a crash.
expHead = uint64(3)
}
if head := chain.CurrentBlock(); head.Number.Uint64() != expHead {
t.Errorf("Head block mismatch: have %d, want %d", head.Number, expHead)
}

// Reinsert B2-B4
if _, err := chain.InsertChain(blocks[1:]); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
if scheme == rawdb.PathScheme {
// Reinsert B4
if _, err := chain.InsertChain(blocks[3:]); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
}
} else {
// Reinsert B2-B4
if _, err := chain.InsertChain(blocks[1:]); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
}
}
if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
Expand All @@ -2016,7 +2026,9 @@ func testIssue23496(t *testing.T, scheme string) {
if head := chain.CurrentBlock(); head.Number.Uint64() != uint64(4) {
t.Errorf("Head block mismatch: have %d, want %d", head.Number, uint64(4))
}
if layer := chain.Snapshots().Snapshot(blocks[2].Root()); layer == nil {
t.Error("Failed to regenerate the snapshot of known state")
if scheme == rawdb.HashScheme {
if layer := chain.Snapshots().Snapshot(blocks[2].Root()); layer == nil {
t.Error("Failed to regenerate the snapshot of known state")
}
}
}
2 changes: 1 addition & 1 deletion core/blockchain_sethead_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2023,7 +2023,7 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
}
if tt.commitBlock > 0 {
chain.triedb.Commit(canonblocks[tt.commitBlock-1].Root(), false)
if snapshots {
if snapshots && scheme == rawdb.HashScheme {
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
}
Expand Down
22 changes: 14 additions & 8 deletions core/blockchain_snapshot_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
if basic.commitBlock > 0 && basic.commitBlock == point {
chain.TrieDB().Commit(blocks[point-1].Root(), false)
}
if basic.snapshotBlock > 0 && basic.snapshotBlock == point {
if basic.snapshotBlock > 0 && basic.snapshotBlock == point && basic.scheme == rawdb.HashScheme {
// Flushing the entire snap tree into the disk, the
// relevant (a) snapshot root and (b) snapshot generator
// will be persisted atomically.
Expand Down Expand Up @@ -149,13 +149,17 @@ func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks [
block := chain.GetBlockByNumber(basic.expSnapshotBottom)
if block == nil {
t.Errorf("The corresponding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom)
} else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) {
t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot())
} else if basic.scheme == rawdb.HashScheme {
if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) {
t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot())
}
}

// Check the snapshot, ensure it's integrated
if err := chain.snaps.Verify(block.Root()); err != nil {
t.Errorf("The disk layer is not integrated %v", err)
if basic.scheme == rawdb.HashScheme {
if err := chain.snaps.Verify(block.Root()); err != nil {
t.Errorf("The disk layer is not integrated %v", err)
}
}
}

Expand Down Expand Up @@ -565,12 +569,14 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
//
// Expected head header : C8
// Expected head fast block: C8
// Expected head block : G
// Expected snapshot disk : C4
// Expected head block : G (Hash mode), C6 (Hash mode)
// Expected snapshot disk : C4 (Hash mode)
for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
expHead := uint64(0)
if scheme == rawdb.PathScheme {
expHead = uint64(4)
// The pathdb database makes sure that snapshot and trie are consistent,
// so only the last two blocks are reverted in case of a crash.
expHead = uint64(6)
}
test := &crashSnapshotTest{
snapshotTestBasic{
Expand Down
Loading