Skip to content

Commit e5d979e

Browse files
authored
Merge branch 'ethereum:master' into gethintegration
2 parents 713d0b1 + 892a661 commit e5d979e

32 files changed

+2701
-314
lines changed

cmd/geth/main.go

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -91,13 +91,7 @@ var (
9191
utils.LogNoHistoryFlag,
9292
utils.LogExportCheckpointsFlag,
9393
utils.StateHistoryFlag,
94-
utils.LightServeFlag, // deprecated
95-
utils.LightIngressFlag, // deprecated
96-
utils.LightEgressFlag, // deprecated
97-
utils.LightMaxPeersFlag, // deprecated
98-
utils.LightNoPruneFlag, // deprecated
9994
utils.LightKDFFlag,
100-
utils.LightNoSyncServeFlag, // deprecated
10195
utils.EthRequiredBlocksFlag,
10296
utils.LegacyWhitelistFlag, // deprecated
10397
utils.CacheFlag,

cmd/utils/flags.go

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1245,28 +1245,6 @@ func setIPC(ctx *cli.Context, cfg *node.Config) {
12451245
}
12461246
}
12471247

1248-
// setLes shows the deprecation warnings for LES flags.
1249-
func setLes(ctx *cli.Context, cfg *ethconfig.Config) {
1250-
if ctx.IsSet(LightServeFlag.Name) {
1251-
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightServeFlag.Name)
1252-
}
1253-
if ctx.IsSet(LightIngressFlag.Name) {
1254-
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightIngressFlag.Name)
1255-
}
1256-
if ctx.IsSet(LightEgressFlag.Name) {
1257-
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightEgressFlag.Name)
1258-
}
1259-
if ctx.IsSet(LightMaxPeersFlag.Name) {
1260-
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightMaxPeersFlag.Name)
1261-
}
1262-
if ctx.IsSet(LightNoPruneFlag.Name) {
1263-
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightNoPruneFlag.Name)
1264-
}
1265-
if ctx.IsSet(LightNoSyncServeFlag.Name) {
1266-
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightNoSyncServeFlag.Name)
1267-
}
1268-
}
1269-
12701248
// MakeDatabaseHandles raises out the number of allowed file handles per process
12711249
// for Geth and returns half of the allowance to assign to the database.
12721250
func MakeDatabaseHandles(max int) int {
@@ -1582,7 +1560,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
15821560
setBlobPool(ctx, &cfg.BlobPool)
15831561
setMiner(ctx, &cfg.Miner)
15841562
setRequiredBlocks(ctx, cfg)
1585-
setLes(ctx, cfg)
15861563

15871564
// Cap the cache allowance and tune the garbage collector
15881565
mem, err := gopsutil.VirtualMemory()

cmd/utils/flags_legacy.go

Lines changed: 0 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -39,12 +39,6 @@ var DeprecatedFlags = []cli.Flag{
3939
CacheTrieRejournalFlag,
4040
LegacyDiscoveryV5Flag,
4141
TxLookupLimitFlag,
42-
LightServeFlag,
43-
LightIngressFlag,
44-
LightEgressFlag,
45-
LightMaxPeersFlag,
46-
LightNoPruneFlag,
47-
LightNoSyncServeFlag,
4842
LogBacktraceAtFlag,
4943
LogDebugFlag,
5044
MinerNewPayloadTimeoutFlag,
@@ -88,37 +82,6 @@ var (
8882
Value: ethconfig.Defaults.TransactionHistory,
8983
Category: flags.DeprecatedCategory,
9084
}
91-
// Light server and client settings, Deprecated November 2023
92-
LightServeFlag = &cli.IntFlag{
93-
Name: "light.serve",
94-
Usage: "Maximum percentage of time allowed for serving LES requests (deprecated)",
95-
Category: flags.DeprecatedCategory,
96-
}
97-
LightIngressFlag = &cli.IntFlag{
98-
Name: "light.ingress",
99-
Usage: "Incoming bandwidth limit for serving light clients (deprecated)",
100-
Category: flags.DeprecatedCategory,
101-
}
102-
LightEgressFlag = &cli.IntFlag{
103-
Name: "light.egress",
104-
Usage: "Outgoing bandwidth limit for serving light clients (deprecated)",
105-
Category: flags.DeprecatedCategory,
106-
}
107-
LightMaxPeersFlag = &cli.IntFlag{
108-
Name: "light.maxpeers",
109-
Usage: "Maximum number of light clients to serve, or light servers to attach to (deprecated)",
110-
Category: flags.DeprecatedCategory,
111-
}
112-
LightNoPruneFlag = &cli.BoolFlag{
113-
Name: "light.nopruning",
114-
Usage: "Disable ancient light chain data pruning (deprecated)",
115-
Category: flags.DeprecatedCategory,
116-
}
117-
LightNoSyncServeFlag = &cli.BoolFlag{
118-
Name: "light.nosyncserve",
119-
Usage: "Enables serving light clients before syncing (deprecated)",
120-
Category: flags.DeprecatedCategory,
121-
}
12285
// Deprecated November 2023
12386
LogBacktraceAtFlag = &cli.StringFlag{
12487
Name: "log.backtrace",

core/blockchain.go

Lines changed: 39 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -183,8 +183,13 @@ func (c *CacheConfig) triedbConfig(isVerkle bool) *triedb.Config {
183183
}
184184
if c.StateScheme == rawdb.PathScheme {
185185
config.PathDB = &pathdb.Config{
186-
StateHistory: c.StateHistory,
187-
CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
186+
StateHistory: c.StateHistory,
187+
TrieCleanSize: c.TrieCleanLimit * 1024 * 1024,
188+
StateCleanSize: c.SnapshotLimit * 1024 * 1024,
189+
190+
// TODO(rjl493456442): The write buffer represents the memory limit used
191+
// for flushing both trie data and state data to disk. The config name
192+
// should be updated to eliminate the confusion.
188193
WriteBufferSize: c.TrieDirtyLimit * 1024 * 1024,
189194
}
190195
}
@@ -380,11 +385,14 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
380385
// Do nothing here until the state syncer picks it up.
381386
log.Info("Genesis state is missing, wait state sync")
382387
} else {
383-
// Head state is missing, before the state recovery, find out the
384-
// disk layer point of snapshot(if it's enabled). Make sure the
385-
// rewound point is lower than disk layer.
388+
// Head state is missing, before the state recovery, find out the disk
389+
// layer point of snapshot(if it's enabled). Make sure the rewound point
390+
// is lower than disk layer.
391+
//
392+
// Note it's unnecessary in path mode which always keep trie data and
393+
// state data consistent.
386394
var diskRoot common.Hash
387-
if bc.cacheConfig.SnapshotLimit > 0 {
395+
if bc.cacheConfig.SnapshotLimit > 0 && bc.cacheConfig.StateScheme == rawdb.HashScheme {
388396
diskRoot = rawdb.ReadSnapshotRoot(bc.db)
389397
}
390398
if diskRoot != (common.Hash{}) {
@@ -457,15 +465,39 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
457465
bc.logger.OnGenesisBlock(bc.genesisBlock, alloc)
458466
}
459467
}
468+
bc.setupSnapshot()
469+
470+
// Rewind the chain in case of an incompatible config upgrade.
471+
if compatErr != nil {
472+
log.Warn("Rewinding chain to upgrade configuration", "err", compatErr)
473+
if compatErr.RewindToTime > 0 {
474+
bc.SetHeadWithTimestamp(compatErr.RewindToTime)
475+
} else {
476+
bc.SetHead(compatErr.RewindToBlock)
477+
}
478+
rawdb.WriteChainConfig(db, genesisHash, chainConfig)
479+
}
480+
481+
// Start tx indexer if it's enabled.
482+
if txLookupLimit != nil {
483+
bc.txIndexer = newTxIndexer(*txLookupLimit, bc)
484+
}
485+
return bc, nil
486+
}
460487

488+
func (bc *BlockChain) setupSnapshot() {
489+
// Short circuit if the chain is established with path scheme, as the
490+
// state snapshot has been integrated into path database natively.
491+
if bc.cacheConfig.StateScheme == rawdb.PathScheme {
492+
return
493+
}
461494
// Load any existing snapshot, regenerating it if loading failed
462495
if bc.cacheConfig.SnapshotLimit > 0 {
463496
// If the chain was rewound past the snapshot persistent layer (causing
464497
// a recovery block number to be persisted to disk), check if we're still
465498
// in recovery mode and in that case, don't invalidate the snapshot on a
466499
// head mismatch.
467500
var recover bool
468-
469501
head := bc.CurrentBlock()
470502
if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer >= head.Number.Uint64() {
471503
log.Warn("Enabling snapshot recovery", "chainhead", head.Number, "diskbase", *layer)
@@ -482,22 +514,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
482514
// Re-initialize the state database with snapshot
483515
bc.statedb = state.NewDatabase(bc.triedb, bc.snaps)
484516
}
485-
486-
// Rewind the chain in case of an incompatible config upgrade.
487-
if compatErr != nil {
488-
log.Warn("Rewinding chain to upgrade configuration", "err", compatErr)
489-
if compatErr.RewindToTime > 0 {
490-
bc.SetHeadWithTimestamp(compatErr.RewindToTime)
491-
} else {
492-
bc.SetHead(compatErr.RewindToBlock)
493-
}
494-
rawdb.WriteChainConfig(db, genesisHash, chainConfig)
495-
}
496-
// Start tx indexer if it's enabled.
497-
if txLookupLimit != nil {
498-
bc.txIndexer = newTxIndexer(*txLookupLimit, bc)
499-
}
500-
return bc, nil
501517
}
502518

503519
// empty returns an indicator whether the blockchain is empty.

core/blockchain_repair_test.go

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1791,7 +1791,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
17911791
}
17921792
)
17931793
defer engine.Close()
1794-
if snapshots {
1794+
if snapshots && scheme == rawdb.HashScheme {
17951795
config.SnapshotLimit = 256
17961796
config.SnapshotWait = true
17971797
}
@@ -1820,7 +1820,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s
18201820
if err := chain.triedb.Commit(canonblocks[tt.commitBlock-1].Root(), false); err != nil {
18211821
t.Fatalf("Failed to flush trie state: %v", err)
18221822
}
1823-
if snapshots {
1823+
if snapshots && scheme == rawdb.HashScheme {
18241824
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
18251825
t.Fatalf("Failed to flatten snapshots: %v", err)
18261826
}
@@ -1952,8 +1952,10 @@ func testIssue23496(t *testing.T, scheme string) {
19521952
if _, err := chain.InsertChain(blocks[1:2]); err != nil {
19531953
t.Fatalf("Failed to import canonical chain start: %v", err)
19541954
}
1955-
if err := chain.snaps.Cap(blocks[1].Root(), 0); err != nil {
1956-
t.Fatalf("Failed to flatten snapshots: %v", err)
1955+
if scheme == rawdb.HashScheme {
1956+
if err := chain.snaps.Cap(blocks[1].Root(), 0); err != nil {
1957+
t.Fatalf("Failed to flatten snapshots: %v", err)
1958+
}
19571959
}
19581960

19591961
// Insert block B3 and commit the state into disk
@@ -1997,15 +1999,23 @@ func testIssue23496(t *testing.T, scheme string) {
19971999
}
19982000
expHead := uint64(1)
19992001
if scheme == rawdb.PathScheme {
2000-
expHead = uint64(2)
2002+
// The pathdb database makes sure that snapshot and trie are consistent,
2003+
// so only the last block is reverted in case of a crash.
2004+
expHead = uint64(3)
20012005
}
20022006
if head := chain.CurrentBlock(); head.Number.Uint64() != expHead {
20032007
t.Errorf("Head block mismatch: have %d, want %d", head.Number, expHead)
20042008
}
2005-
2006-
// Reinsert B2-B4
2007-
if _, err := chain.InsertChain(blocks[1:]); err != nil {
2008-
t.Fatalf("Failed to import canonical chain tail: %v", err)
2009+
if scheme == rawdb.PathScheme {
2010+
// Reinsert B4
2011+
if _, err := chain.InsertChain(blocks[3:]); err != nil {
2012+
t.Fatalf("Failed to import canonical chain tail: %v", err)
2013+
}
2014+
} else {
2015+
// Reinsert B2-B4
2016+
if _, err := chain.InsertChain(blocks[1:]); err != nil {
2017+
t.Fatalf("Failed to import canonical chain tail: %v", err)
2018+
}
20092019
}
20102020
if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
20112021
t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
@@ -2016,7 +2026,9 @@ func testIssue23496(t *testing.T, scheme string) {
20162026
if head := chain.CurrentBlock(); head.Number.Uint64() != uint64(4) {
20172027
t.Errorf("Head block mismatch: have %d, want %d", head.Number, uint64(4))
20182028
}
2019-
if layer := chain.Snapshots().Snapshot(blocks[2].Root()); layer == nil {
2020-
t.Error("Failed to regenerate the snapshot of known state")
2029+
if scheme == rawdb.HashScheme {
2030+
if layer := chain.Snapshots().Snapshot(blocks[2].Root()); layer == nil {
2031+
t.Error("Failed to regenerate the snapshot of known state")
2032+
}
20212033
}
20222034
}

core/blockchain_sethead_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2023,7 +2023,7 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
20232023
}
20242024
if tt.commitBlock > 0 {
20252025
chain.triedb.Commit(canonblocks[tt.commitBlock-1].Root(), false)
2026-
if snapshots {
2026+
if snapshots && scheme == rawdb.HashScheme {
20272027
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
20282028
t.Fatalf("Failed to flatten snapshots: %v", err)
20292029
}

core/blockchain_snapshot_test.go

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
105105
if basic.commitBlock > 0 && basic.commitBlock == point {
106106
chain.TrieDB().Commit(blocks[point-1].Root(), false)
107107
}
108-
if basic.snapshotBlock > 0 && basic.snapshotBlock == point {
108+
if basic.snapshotBlock > 0 && basic.snapshotBlock == point && basic.scheme == rawdb.HashScheme {
109109
// Flushing the entire snap tree into the disk, the
110110
// relevant (a) snapshot root and (b) snapshot generator
111111
// will be persisted atomically.
@@ -149,13 +149,17 @@ func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks [
149149
block := chain.GetBlockByNumber(basic.expSnapshotBottom)
150150
if block == nil {
151151
t.Errorf("The corresponding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom)
152-
} else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) {
153-
t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot())
152+
} else if basic.scheme == rawdb.HashScheme {
153+
if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) {
154+
t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot())
155+
}
154156
}
155157

156158
// Check the snapshot, ensure it's integrated
157-
if err := chain.snaps.Verify(block.Root()); err != nil {
158-
t.Errorf("The disk layer is not integrated %v", err)
159+
if basic.scheme == rawdb.HashScheme {
160+
if err := chain.snaps.Verify(block.Root()); err != nil {
161+
t.Errorf("The disk layer is not integrated %v", err)
162+
}
159163
}
160164
}
161165

@@ -565,12 +569,14 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
565569
//
566570
// Expected head header : C8
567571
// Expected head fast block: C8
568-
// Expected head block : G
569-
// Expected snapshot disk : C4
572+
// Expected head block : G (Hash mode), C6 (Hash mode)
573+
// Expected snapshot disk : C4 (Hash mode)
570574
for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
571575
expHead := uint64(0)
572576
if scheme == rawdb.PathScheme {
573-
expHead = uint64(4)
577+
// The pathdb database makes sure that snapshot and trie are consistent,
578+
// so only the last two blocks are reverted in case of a crash.
579+
expHead = uint64(6)
574580
}
575581
test := &crashSnapshotTest{
576582
snapshotTestBasic{

core/state/database.go

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -175,26 +175,27 @@ func NewDatabaseForTesting() *CachingDB {
175175
func (db *CachingDB) Reader(stateRoot common.Hash) (Reader, error) {
176176
var readers []StateReader
177177

178-
// Set up the state snapshot reader if available. This feature
179-
// is optional and may be partially useful if it's not fully
180-
// generated.
181-
if db.snap != nil {
182-
// If standalone state snapshot is available (hash scheme),
183-
// then construct the legacy snap reader.
178+
// Configure the state reader using the standalone snapshot in hash mode.
179+
// This reader offers improved performance but is optional and only
180+
// partially useful if the snapshot is not fully generated.
181+
if db.TrieDB().Scheme() == rawdb.HashScheme && db.snap != nil {
184182
snap := db.snap.Snapshot(stateRoot)
185183
if snap != nil {
186184
readers = append(readers, newFlatReader(snap))
187185
}
188-
} else {
189-
// If standalone state snapshot is not available, try to construct
190-
// the state reader with database.
186+
}
187+
// Configure the state reader using the path database in path mode.
188+
// This reader offers improved performance but is optional and only
189+
// partially useful if the snapshot data in path database is not
190+
// fully generated.
191+
if db.TrieDB().Scheme() == rawdb.PathScheme {
191192
reader, err := db.triedb.StateReader(stateRoot)
192193
if err == nil {
193-
readers = append(readers, newFlatReader(reader)) // state reader is optional
194+
readers = append(readers, newFlatReader(reader))
194195
}
195196
}
196-
// Set up the trie reader, which is expected to always be available
197-
// as the gatekeeper unless the state is corrupted.
197+
// Configure the trie reader, which is expected to be available as the
198+
// gatekeeper unless the state is corrupted.
198199
tr, err := newTrieReader(stateRoot, db.triedb, db.pointCache)
199200
if err != nil {
200201
return nil, err

core/state/snapshot/generate_test.go

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,9 @@ func newHelper(scheme string) *testHelper {
166166
diskdb := rawdb.NewMemoryDatabase()
167167
config := &triedb.Config{}
168168
if scheme == rawdb.PathScheme {
169-
config.PathDB = &pathdb.Config{} // disable caching
169+
config.PathDB = &pathdb.Config{
170+
SnapshotNoBuild: true,
171+
} // disable caching
170172
} else {
171173
config.HashDB = &hashdb.Config{} // disable caching
172174
}

core/state/statedb_test.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -979,7 +979,8 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
979979
)
980980
if scheme == rawdb.PathScheme {
981981
tdb = triedb.NewDatabase(memDb, &triedb.Config{PathDB: &pathdb.Config{
982-
CleanCacheSize: 0,
982+
TrieCleanSize: 0,
983+
StateCleanSize: 0,
983984
WriteBufferSize: 0,
984985
}}) // disable caching
985986
} else {

0 commit comments

Comments
 (0)