Skip to content

Commit

Permalink
Regular info tree updates (#1399) (#1403)
Browse files Browse the repository at this point in the history
* starting work on info tree updates during execution

* info tree updater in sequencer loop

* logging latest index for info tree updates
# Conflicts:
#	cmd/utils/flags.go
#	eth/ethconfig/config_zkevm.go
#	turbo/cli/default_flags.go
#	turbo/cli/flags_zkevm.go
#	turbo/stages/zk_stages.go
#	zk/stages/stage_l1_info_tree.go
#	zk/stages/stage_l1_sequencer_sync.go
#	zk/stages/stage_sequence_execute_utils.go
  • Loading branch information
hexoscott authored Nov 4, 2024
1 parent b667dbe commit eefe88a
Show file tree
Hide file tree
Showing 11 changed files with 364 additions and 240 deletions.
10 changes: 10 additions & 0 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -678,6 +678,16 @@ var (
Usage: "A comma separated list of batch numbers that are known bad on the L1. These will automatically be marked as bad during L1 recovery",
Value: "",
}
InfoTreeUpdateInterval = cli.DurationFlag{
Name: "zkevm.info-tree-update-interval",
Usage: "The interval at which the sequencer checks the L1 for new GER information",
Value: 1 * time.Minute,
}
ACLPrintHistory = cli.IntFlag{
Name: "acl.print-history",
Usage: "Number of entries to print from the ACL history on node start up",
Value: 10,
}
DebugTimers = cli.BoolFlag{
Name: "debug.timers",
Usage: "Enable debug timers",
Expand Down
7 changes: 5 additions & 2 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ import (
"github.com/ledgerwatch/erigon/zk/utils"
"github.com/ledgerwatch/erigon/zk/witness"
"github.com/ledgerwatch/erigon/zkevm/etherman"
"github.com/ledgerwatch/erigon/zk/l1infotree"
)

// Config contains the configuration options of the ETH protocol.
Expand Down Expand Up @@ -871,6 +872,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
cfg.L1HighestBlockType,
)

l1InfoTreeUpdater := l1infotree.NewUpdater(cfg.Zk, l1InfoTreeSyncer)

if isSequencer {
// if we are sequencing transactions, we do the sequencing loop...
witnessGenerator := witness.NewGenerator(
Expand Down Expand Up @@ -941,11 +944,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
backend.dataStream,
backend.l1Syncer,
seqVerSyncer,
l1InfoTreeSyncer,
l1BlockSyncer,
backend.txPool2,
backend.txPool2DB,
verifier,
l1InfoTreeUpdater,
)

backend.syncUnwindOrder = zkStages.ZkSequencerUnwindOrder
Expand Down Expand Up @@ -979,9 +982,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
backend.forkValidator,
backend.engine,
backend.l1Syncer,
l1InfoTreeSyncer,
streamClient,
backend.dataStream,
l1InfoTreeUpdater,
)

backend.syncUnwindOrder = zkStages.ZkUnwindOrder
Expand Down
2 changes: 2 additions & 0 deletions eth/ethconfig/config_zkevm.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,8 @@ type Zk struct {

TxPoolRejectSmartContractDeployments bool

ACLPrintHistory int
InfoTreeUpdateInterval time.Duration
BadBatches []uint64
}

Expand Down
4 changes: 4 additions & 0 deletions turbo/cli/default_flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -234,5 +234,9 @@ var DefaultFlags = []cli.Flag{
&utils.DisableVirtualCounters,
&utils.DAUrl,
&utils.VirtualCountersSmtReduction,


&utils.ACLPrintHistory,
&utils.InfoTreeUpdateInterval,
&utils.BadBatches,
}
3 changes: 3 additions & 0 deletions turbo/cli/flags_zkevm.go
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,9 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) {
DataStreamWriteTimeout: ctx.Duration(utils.DataStreamWriteTimeout.Name),
DataStreamInactivityTimeout: ctx.Duration(utils.DataStreamInactivityTimeout.Name),
VirtualCountersSmtReduction: ctx.Float64(utils.VirtualCountersSmtReduction.Name),

ACLPrintHistory: ctx.Int(utils.ACLPrintHistory.Name),
InfoTreeUpdateInterval: ctx.Duration(utils.InfoTreeUpdateInterval.Name),
BadBatches: badBatches,
}

Expand Down
10 changes: 6 additions & 4 deletions turbo/stages/zk_stages.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
zkStages "github.com/ledgerwatch/erigon/zk/stages"
"github.com/ledgerwatch/erigon/zk/syncer"
"github.com/ledgerwatch/erigon/zk/txpool"
"github.com/ledgerwatch/erigon/zk/l1infotree"
)

// NewDefaultZkStages creates stages for zk syncer (RPC mode)
Expand All @@ -33,9 +34,9 @@ func NewDefaultZkStages(ctx context.Context,
forkValidator *engineapi.ForkValidator,
engine consensus.Engine,
l1Syncer *syncer.L1Syncer,
l1InfoTreeSyncer *syncer.L1Syncer,
datastreamClient zkStages.DatastreamClient,
datastreamServer *datastreamer.StreamServer,
infoTreeUpdater *l1infotree.Updater,
) []*stagedsync.Stage {
dirs := cfg.Dirs
blockReader := snapshotsync.NewBlockReaderWithSnapshots(snapshots, cfg.TransactionsV3)
Expand All @@ -47,7 +48,7 @@ func NewDefaultZkStages(ctx context.Context,

return zkStages.DefaultZkStages(ctx,
zkStages.StageL1SyncerCfg(db, l1Syncer, cfg.Zk),
zkStages.StageL1InfoTreeCfg(db, cfg.Zk, l1InfoTreeSyncer),
zkStages.StageL1InfoTreeCfg(db, cfg.Zk, infoTreeUpdater),
zkStages.StageBatchesCfg(db, datastreamClient, cfg.Zk),
zkStages.StageDataStreamCatchupCfg(datastreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()),
stagedsync.StageCumulativeIndexCfg(db),
Expand Down Expand Up @@ -97,11 +98,11 @@ func NewSequencerZkStages(ctx context.Context,
datastreamServer *datastreamer.StreamServer,
sequencerStageSyncer *syncer.L1Syncer,
l1Syncer *syncer.L1Syncer,
l1InfoTreeSyncer *syncer.L1Syncer,
l1BlockSyncer *syncer.L1Syncer,
txPool *txpool.TxPool,
txPoolDb kv.RwDB,
verifier *legacy_executor_verifier.LegacyExecutorVerifier,
infoTreeUpdater *l1infotree.Updater,
) []*stagedsync.Stage {
dirs := cfg.Dirs
blockReader := snapshotsync.NewBlockReaderWithSnapshots(snapshots, cfg.TransactionsV3)
Expand All @@ -114,7 +115,7 @@ func NewSequencerZkStages(ctx context.Context,
stagedsync.StageCumulativeIndexCfg(db),
zkStages.StageL1SyncerCfg(db, l1Syncer, cfg.Zk),
zkStages.StageL1SequencerSyncCfg(db, cfg.Zk, sequencerStageSyncer),
zkStages.StageL1InfoTreeCfg(db, cfg.Zk, l1InfoTreeSyncer),
zkStages.StageL1InfoTreeCfg(db, cfg.Zk, infoTreeUpdater),
zkStages.StageSequencerL1BlockSyncCfg(db, cfg.Zk, l1BlockSyncer),
zkStages.StageDataStreamCatchupCfg(datastreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()),
zkStages.StageSequenceBlocksCfg(
Expand All @@ -140,6 +141,7 @@ func NewSequencerZkStages(ctx context.Context,
txPoolDb,
verifier,
uint16(cfg.YieldSize),
infoTreeUpdater,
),
stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3, agg),
zkStages.StageZkInterHashesCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg, cfg.Zk),
Expand Down
Loading

0 comments on commit eefe88a

Please sign in to comment.