From 428f1ea6eb1032e81ebe159548f27b19df45b0ce Mon Sep 17 00:00:00 2001 From: Petr Hanzl Date: Mon, 18 Dec 2023 18:03:31 +0100 Subject: [PATCH] Refactor stochastic replay. --- cmd/aida-sdb/trace/replay.go | 4 +- cmd/aida-sdb/trace/replay_substate.go | 2 +- cmd/aida-stochastic-sdb/main.go | 2 +- cmd/aida-stochastic-sdb/replay.go | 152 ++++++++++++ cmd/aida-stochastic-sdb/replay_test.go | 185 +++++++++++++++ cmd/aida-stochastic-sdb/stochastic/replay.go | 131 ----------- cmd/util-worldstate/state/dump.go | 4 +- executor/matcher.go | 18 ++ executor/stochastic_provider.go | 48 ++++ stochastic/event_registry.go | 2 +- stochastic/fuzzing_test.go | 14 +- stochastic/generator/random_access.go | 8 +- stochastic/generator/random_access_test.go | 6 +- stochastic/operation.go | 37 +-- stochastic/replay.go | 235 +++++-------------- stochastic/replay_test.go | 16 +- stochastic/visualizer/model.go | 28 +-- stochastic/visualizer/renderer.go | 4 +- tracer/context/context.go | 2 +- utils/config.go | 185 +++++++-------- utils/default_config.go | 181 +++++++------- utils/flags.go | 4 + 22 files changed, 712 insertions(+), 556 deletions(-) create mode 100644 cmd/aida-stochastic-sdb/replay.go create mode 100644 cmd/aida-stochastic-sdb/replay_test.go delete mode 100644 cmd/aida-stochastic-sdb/stochastic/replay.go create mode 100644 executor/stochastic_provider.go diff --git a/cmd/aida-sdb/trace/replay.go b/cmd/aida-sdb/trace/replay.go index af4ab9780..500db4ef4 100644 --- a/cmd/aida-sdb/trace/replay.go +++ b/cmd/aida-sdb/trace/replay.go @@ -53,11 +53,11 @@ type operationProcessor struct { } func (p operationProcessor) Process(state executor.State[[]operation.Operation], ctx *executor.Context) error { - p.runTransaction(uint64(state.Block), state.Data, ctx.State) + p.runTransaction(state.Block, state.Data, ctx.State) return nil } -func (p operationProcessor) runTransaction(block uint64, operations []operation.Operation, stateDb state.StateDB) { +func (p operationProcessor) runTransaction(block int, operations []operation.Operation, stateDb state.StateDB) { for _, op := range operations { operation.Execute(op, stateDb, p.rCtx) if p.cfg.Debug && block >= p.cfg.DebugFrom { diff --git a/cmd/aida-sdb/trace/replay_substate.go b/cmd/aida-sdb/trace/replay_substate.go index ad2814813..e60d10611 100644 --- a/cmd/aida-sdb/trace/replay_substate.go +++ b/cmd/aida-sdb/trace/replay_substate.go @@ -58,7 +58,7 @@ type substateProcessor struct { func (p substateProcessor) Process(state executor.State[*substate.Substate], ctx *executor.Context) error { return p.operationProvider.Run(state.Block, state.Block, func(t executor.TransactionInfo[[]operation.Operation]) error { - p.runTransaction(uint64(state.Block), t.Data, ctx.State) + p.runTransaction(state.Block, t.Data, ctx.State) return nil }) } diff --git a/cmd/aida-stochastic-sdb/main.go b/cmd/aida-stochastic-sdb/main.go index a15e0ee52..f4b65aeb5 100644 --- a/cmd/aida-stochastic-sdb/main.go +++ b/cmd/aida-stochastic-sdb/main.go @@ -19,7 +19,7 @@ func initStochasticApp() *cli.App { &stochastic.StochasticEstimateCommand, &stochastic.StochasticGenerateCommand, &stochastic.StochasticRecordCommand, - &stochastic.StochasticReplayCommand, + &StochasticReplayCommand, &stochastic.StochasticVisualizeCommand, }, } diff --git a/cmd/aida-stochastic-sdb/replay.go b/cmd/aida-stochastic-sdb/replay.go new file mode 100644 index 000000000..62610047b --- /dev/null +++ b/cmd/aida-stochastic-sdb/replay.go @@ -0,0 +1,152 @@ +package main + +import ( + "fmt" + "math/rand" + "time" + + "github.com/Fantom-foundation/Aida/executor" + "github.com/Fantom-foundation/Aida/executor/extension/aidadb" + "github.com/Fantom-foundation/Aida/executor/extension/primer" + "github.com/Fantom-foundation/Aida/executor/extension/profiler" + "github.com/Fantom-foundation/Aida/executor/extension/statedb" + "github.com/Fantom-foundation/Aida/executor/extension/tracker" + "github.com/Fantom-foundation/Aida/executor/extension/validator" + "github.com/Fantom-foundation/Aida/logger" + "github.com/Fantom-foundation/Aida/state" + "github.com/Fantom-foundation/Aida/stochastic" + "github.com/Fantom-foundation/Aida/utils" + "github.com/urfave/cli/v2" +) + +// StochasticReplayCommand data structure for the replay app. +var StochasticReplayCommand = cli.Command{ + Action: RunStochasticReplay, + Name: "replay", + Usage: "Simulates StateDB operations using a random generator with realistic distributions", + ArgsUsage: " ", + Flags: []cli.Flag{ + &utils.BalanceRangeFlag, + &utils.CarmenSchemaFlag, + &utils.ContinueOnFailureFlag, + &utils.CpuProfileFlag, + &utils.DebugFromFlag, + &utils.MemoryBreakdownFlag, + &utils.NonceRangeFlag, + &utils.RandomSeedFlag, + &utils.StateDbImplementationFlag, + &utils.StateDbVariantFlag, + &utils.DbTmpFlag, + &utils.StateDbLoggingFlag, + &utils.TraceFileFlag, + &utils.TraceDebugFlag, + &utils.TraceFlag, + &utils.ShadowDbImplementationFlag, + &utils.ShadowDbVariantFlag, + &logger.LogLevelFlag, + }, + Description: ` +The stochastic replay command requires two argument: + + + determines the number of blocks + contains the simulation parameters produced by the stochastic estimator.`, +} + +func RunStochasticReplay(ctx *cli.Context) error { + cfg, err := utils.NewConfig(ctx, utils.BlockRangeArgs) + if err != nil { + return err + } + + if cfg.StochasticSimulationFile == "" { + return fmt.Errorf("you must define path to simulation file (--%v)", utils.StochasticSimulationFileFlag.Name) + } + + simulation, err := stochastic.ReadSimulation(cfg.StochasticSimulationFile) + if err != nil { + return fmt.Errorf("cannot read simulation; %v", err) + } + + rg := rand.New(rand.NewSource(cfg.RandomSeed)) + + simulations, err := executor.OpenSimulations(simulation, ctx, rg) + if err != nil { + return err + } + defer simulations.Close() + + return runStochasticReplay(cfg, simulations, nil, makeStochasticProcessor(cfg, simulation, rg), nil) + +} + +func makeStochasticProcessor(cfg *utils.Config, e *stochastic.EstimationModelJSON, rg *rand.Rand) executor.Processor[stochastic.Data] { + return stochasticProcessor{ + stochastic.CreateState(e, rg, logger.NewLogger(cfg.LogLevel, "Stochastic Processor")), cfg, + } +} + +type stochasticProcessor struct { + *stochastic.State + cfg *utils.Config +} + +func (p stochasticProcessor) Process(state executor.State[stochastic.Data], ctx *executor.Context) error { + if p.cfg.Debug && state.Block >= p.cfg.DebugFrom { + p.EnableDebug() + } + + p.Execute(state.Block, state.Transaction, state.Data, ctx.State) + return nil +} + +func runStochasticReplay( + cfg *utils.Config, + provider executor.Provider[stochastic.Data], + stateDb state.StateDB, + processor executor.Processor[stochastic.Data], + extra []executor.Extension[stochastic.Data], +) error { + // order of extensionList has to be maintained + var extensionList = []executor.Extension[stochastic.Data]{ + profiler.MakeCpuProfiler[stochastic.Data](cfg), + profiler.MakeDiagnosticServer[stochastic.Data](cfg), + } + + if stateDb == nil { + extensionList = append( + extensionList, + statedb.MakeStateDbManager[stochastic.Data](cfg), + tracker.MakeDbLogger[stochastic.Data](cfg), + ) + } + + extensionList = append(extensionList, extra...) + + extensionList = append(extensionList, []executor.Extension[stochastic.Data]{ + profiler.MakeThreadLocker[stochastic.Data](), + aidadb.MakeAidaDbManager[stochastic.Data](cfg), + profiler.MakeVirtualMachineStatisticsPrinter[stochastic.Data](cfg), + tracker.MakeProgressLogger[stochastic.Data](cfg, 15*time.Second), + tracker.MakeErrorLogger[stochastic.Data](cfg), + primer.MakeStateDbPrimer[stochastic.Data](cfg), + profiler.MakeMemoryUsagePrinter[stochastic.Data](cfg), + profiler.MakeMemoryProfiler[stochastic.Data](cfg), + validator.MakeStateHashValidator[stochastic.Data](cfg), + + profiler.MakeOperationProfiler[stochastic.Data](cfg), + }..., + ) + + return executor.NewExecutor(provider, cfg.LogLevel).Run( + executor.Params{ + From: int(cfg.First), + To: int(cfg.Last) + 1, + NumWorkers: 1, // stochastic can run only with one worker + State: stateDb, + ParallelismGranularity: executor.BlockLevel, + }, + processor, + extensionList, + ) +} diff --git a/cmd/aida-stochastic-sdb/replay_test.go b/cmd/aida-stochastic-sdb/replay_test.go new file mode 100644 index 000000000..5b54c6f5d --- /dev/null +++ b/cmd/aida-stochastic-sdb/replay_test.go @@ -0,0 +1,185 @@ +package main + +import ( + "errors" + "math/rand" + "strings" + "testing" + + "github.com/Fantom-foundation/Aida/executor" + "github.com/Fantom-foundation/Aida/state" + "github.com/Fantom-foundation/Aida/stochastic" + "github.com/Fantom-foundation/Aida/stochastic/generator" + "github.com/Fantom-foundation/Aida/utils" + "github.com/ethereum/go-ethereum/common" + "go.uber.org/mock/gomock" +) + +var simulation = &stochastic.EstimationModelJSON{ + FileId: "1", + Operations: []string{}, + StochasticMatrix: [][]float64{{1.0}, {2.0}}, + Contracts: stochastic.EstimationStatsJSON{ + NumKeys: generator.MinRandomAccessSize, + Lambda: 1.1, + QueueDistribution: []float64{1.0, 2.0}, + }, + Keys: stochastic.EstimationStatsJSON{ + NumKeys: generator.MinRandomAccessSize, + Lambda: 1.1, + QueueDistribution: []float64{1.0, 2.0}, + }, + Values: stochastic.EstimationStatsJSON{ + NumKeys: generator.MinRandomAccessSize, + Lambda: 1.1, + QueueDistribution: []float64{1.0, 2.0}, + }, + SnapshotLambda: 1, +} + +var rg = rand.New(rand.NewSource(1)) + +func TestVmSdb_Substate_AllDbEventsAreIssuedInOrder(t *testing.T) { + ctrl := gomock.NewController(t) + provider := executor.NewMockProvider[stochastic.Data](ctrl) + db := state.NewMockStateDB(ctrl) + cfg := &utils.Config{ + First: 2, + Last: 4, + ChainID: utils.MainnetChainID, + SkipPriming: true, + ContinueOnFailure: true, + LogLevel: "Critical", + } + + // Simulate the execution of three transactions in two blocks. + provider.EXPECT(). + Run(2, 5, gomock.Any()). + DoAndReturn(func(_ int, _ int, consumer executor.Consumer[stochastic.Data]) error { + // Block 2 + consumer(executor.TransactionInfo[stochastic.Data]{Block: 2, Transaction: 1, Data: existData}) + consumer(executor.TransactionInfo[stochastic.Data]{Block: 2, Transaction: 2, Data: beginTransactionData}) + // Block 3 + consumer(executor.TransactionInfo[stochastic.Data]{Block: 3, Transaction: 1, Data: beginBlockData}) + // Block 4 + consumer(executor.TransactionInfo[stochastic.Data]{Block: 4, Transaction: utils.PseudoTx, Data: addBalanceData}) + return nil + }) + + // The expectation is that all of those blocks and transactions + // are properly opened, prepared, executed, and closed. + gomock.InOrder( + db.EXPECT().Exist(common.Address{byte(0)}), + db.EXPECT().BeginTransaction(uint32(2)), + db.EXPECT().BeginBlock(uint64(3)), + db.EXPECT().AddBalance(common.Address{byte(0)}, executor.WithBigIntOfAnySize()), + ) + + // since we are working with mock transactions, run logically fails on 'intrinsic gas too low' + // since this is a test that tests orded of the db events, we can ignore this error + err := runStochasticReplay(cfg, provider, db, makeStochasticProcessor(cfg, simulation, rg), nil) + if err != nil { + errors.Unwrap(err) + if strings.Contains(err.Error(), "intrinsic gas too low") { + return + } + t.Fatal("run failed") + } +} + +func TestVmSdb_Substate_AllTransactionsAreProcessedInOrder(t *testing.T) { + ctrl := gomock.NewController(t) + provider := executor.NewMockProvider[stochastic.Data](ctrl) + db := state.NewMockStateDB(ctrl) + ext := executor.NewMockExtension[stochastic.Data](ctrl) + processor := executor.NewMockProcessor[stochastic.Data](ctrl) + cfg := &utils.Config{ + First: 2, + Last: 4, + ChainID: utils.MainnetChainID, + LogLevel: "Critical", + SkipPriming: true, + } + + // Simulate the execution of three transactions in two blocks. + provider.EXPECT(). + Run(2, 5, gomock.Any()). + DoAndReturn(func(_ int, _ int, consumer executor.Consumer[stochastic.Data]) error { + // Block 2 + consumer(executor.TransactionInfo[stochastic.Data]{Block: 2, Transaction: 1, Data: stochastic.Data{}}) + consumer(executor.TransactionInfo[stochastic.Data]{Block: 2, Transaction: 2, Data: stochastic.Data{}}) + // Block 3 + consumer(executor.TransactionInfo[stochastic.Data]{Block: 3, Transaction: 1, Data: stochastic.Data{}}) + // Block 4 + consumer(executor.TransactionInfo[stochastic.Data]{Block: 4, Transaction: utils.PseudoTx, Data: stochastic.Data{}}) + return nil + }) + + // The expectation is that all of those blocks and transactions + // are properly opened, prepared, executed, and closed. + // Since we are running sequential mode with 1 worker, + // all block and transactions need to be in order. + gomock.InOrder( + ext.EXPECT().PreRun(executor.AtBlock[stochastic.Data](2), gomock.Any()), + + // Block 2 + // Tx 1 + ext.EXPECT().PreBlock(executor.AtBlock[stochastic.Data](2), gomock.Any()), + ext.EXPECT().PreTransaction(executor.AtTransaction[stochastic.Data](2, 1), gomock.Any()), + processor.EXPECT().Process(executor.AtTransaction[stochastic.Data](2, 1), gomock.Any()), + ext.EXPECT().PostTransaction(executor.AtTransaction[stochastic.Data](2, 1), gomock.Any()), + ext.EXPECT().PreTransaction(executor.AtTransaction[stochastic.Data](2, 2), gomock.Any()), + // Tx 2 + processor.EXPECT().Process(executor.AtTransaction[stochastic.Data](2, 2), gomock.Any()), + ext.EXPECT().PostTransaction(executor.AtTransaction[stochastic.Data](2, 2), gomock.Any()), + ext.EXPECT().PostBlock(executor.AtTransaction[stochastic.Data](2, 2), gomock.Any()), + + // Block 3 + ext.EXPECT().PreBlock(executor.AtBlock[stochastic.Data](3), gomock.Any()), + ext.EXPECT().PreTransaction(executor.AtTransaction[stochastic.Data](3, 1), gomock.Any()), + processor.EXPECT().Process(executor.AtTransaction[stochastic.Data](3, 1), gomock.Any()), + ext.EXPECT().PostTransaction(executor.AtTransaction[stochastic.Data](3, 1), gomock.Any()), + ext.EXPECT().PostBlock(executor.AtTransaction[stochastic.Data](3, 1), gomock.Any()), + + // Block 4 + ext.EXPECT().PreBlock(executor.AtBlock[stochastic.Data](4), gomock.Any()), + ext.EXPECT().PreTransaction(executor.AtTransaction[stochastic.Data](4, utils.PseudoTx), gomock.Any()), + processor.EXPECT().Process(executor.AtTransaction[stochastic.Data](4, utils.PseudoTx), gomock.Any()), + ext.EXPECT().PostTransaction(executor.AtTransaction[stochastic.Data](4, utils.PseudoTx), gomock.Any()), + ext.EXPECT().PostBlock(executor.AtTransaction[stochastic.Data](4, utils.PseudoTx), gomock.Any()), + + ext.EXPECT().PostRun(executor.AtBlock[stochastic.Data](5), gomock.Any(), nil), + ) + + if err := runStochasticReplay(cfg, provider, db, processor, []executor.Extension[stochastic.Data]{ext}); err != nil { + t.Errorf("run failed: %v", err) + } +} + +var beginBlockData = stochastic.Data{ + Operation: stochastic.BeginBlockID, + Address: 0, + Key: 0, + Value: 0, +} + +var beginTransactionData = stochastic.Data{ + Operation: stochastic.BeginTransactionID, + Address: 0, + Key: 0, + Value: 0, +} + +var existData = stochastic.Data{ + Operation: stochastic.ExistID, + Address: 0, + Key: 0, + Value: 0, +} + +var addBalanceData = stochastic.Data{ + Operation: stochastic.AddBalanceID, + Address: 0, + Key: 0, + Value: 1, +} diff --git a/cmd/aida-stochastic-sdb/stochastic/replay.go b/cmd/aida-stochastic-sdb/stochastic/replay.go deleted file mode 100644 index 4203c97d7..000000000 --- a/cmd/aida-stochastic-sdb/stochastic/replay.go +++ /dev/null @@ -1,131 +0,0 @@ -package stochastic - -import ( - "fmt" - "os" - "strconv" - "time" - - "github.com/Fantom-foundation/Aida/logger" - "github.com/Fantom-foundation/Aida/state/proxy" - "github.com/Fantom-foundation/Aida/stochastic" - "github.com/Fantom-foundation/Aida/tracer/context" - "github.com/Fantom-foundation/Aida/utils" - "github.com/urfave/cli/v2" -) - -// StochasticReplayCommand data structure for the replay app. -var StochasticReplayCommand = cli.Command{ - Action: stochasticReplayAction, - Name: "replay", - Usage: "Simulates StateDB operations using a random generator with realistic distributions", - ArgsUsage: " ", - Flags: []cli.Flag{ - &utils.BalanceRangeFlag, - &utils.CarmenSchemaFlag, - &utils.ContinueOnFailureFlag, - &utils.CpuProfileFlag, - &utils.DebugFromFlag, - &utils.MemoryBreakdownFlag, - &utils.NonceRangeFlag, - &utils.RandomSeedFlag, - &utils.StateDbImplementationFlag, - &utils.StateDbVariantFlag, - &utils.DbTmpFlag, - &utils.StateDbLoggingFlag, - &utils.TraceFileFlag, - &utils.TraceDebugFlag, - &utils.TraceFlag, - &utils.ShadowDbImplementationFlag, - &utils.ShadowDbVariantFlag, - &logger.LogLevelFlag, - }, - Description: ` -The stochastic replay command requires two argument: - - - determines the number of blocks - contains the simulation parameters produced by the stochastic estimator.`, -} - -// stochasticReplayAction implements the replay command. The user provides simulation file and -// the number of blocks that should be replayed as arguments. -func stochasticReplayAction(ctx *cli.Context) error { - // parse command-line arguments - if ctx.Args().Len() != 2 { - return fmt.Errorf("missing simulation file and simulation length as parameter") - } - simLength, perr := strconv.ParseInt(ctx.Args().Get(0), 10, 64) - if perr != nil { - return fmt.Errorf("simulation length is not an integer; %v", perr) - } - - // process configuration - cfg, err := utils.NewConfig(ctx, utils.LastBlockArg) - if err != nil { - return err - } - if cfg.DbImpl == "memory" { - return fmt.Errorf("db-impl memory is not supported") - } - log := logger.NewLogger(cfg.LogLevel, "Stochastic Replay") - - // start CPU profiling if requested. - if err := utils.StartCPUProfile(cfg); err != nil { - return err - } - defer utils.StopCPUProfile(cfg) - - // read simulation file - simulation, serr := stochastic.ReadSimulation(ctx.Args().Get(1)) - if serr != nil { - return fmt.Errorf("failed reading simulation; %v", serr) - } - - // create a directory for the store to place all its files, and - // instantiate the state DB under testing. - log.Notice("Create StateDB") - db, stateDbDir, err := utils.PrepareStateDB(cfg) - if err != nil { - return err - } - defer os.RemoveAll(stateDbDir) - - // Enable tracing if debug flag is set - if cfg.Trace { - rCtx, err := context.NewRecord(cfg.TraceFile, uint64(0)) - if err != nil { - return err - } - defer rCtx.Close() - db = proxy.NewRecorderProxy(db, rCtx) - } - - // run simulation. - log.Info("Run simulation") - runErr := stochastic.RunStochasticReplay(db, simulation, int(simLength), cfg, logger.NewLogger(cfg.LogLevel, "Stochastic")) - - // print memory usage after simulation - if cfg.MemoryBreakdown { - if usage := db.GetMemoryUsage(); usage != nil { - log.Noticef("State DB memory usage: %d byte\n%s", usage.UsedBytes, usage.Breakdown) - } else { - log.Info("Utilized storage solution does not support memory breakdowns") - } - } - - // close the DB and print disk usage - start := time.Now() - if err := db.Close(); err != nil { - log.Criticalf("Failed to close database; %v", err) - } - log.Infof("Closing DB took %v", time.Since(start)) - - size, err := utils.GetDirectorySize(stateDbDir) - if err != nil { - return fmt.Errorf("cannot size of state-db (%v); %v", stateDbDir, err) - } - log.Noticef("Final disk usage: %v MiB", float32(size)/float32(1024*1024)) - - return runErr -} diff --git a/cmd/util-worldstate/state/dump.go b/cmd/util-worldstate/state/dump.go index 4dae4802d..433fb4c85 100644 --- a/cmd/util-worldstate/state/dump.go +++ b/cmd/util-worldstate/state/dump.go @@ -26,7 +26,7 @@ import ( // - Balance // - Nonce // - Code (hash + separate storage) -// - Contract Storage +// - Address Storage var CmdDumpState = cli.Command{ Action: dumpState, Name: "dump", @@ -36,7 +36,7 @@ var CmdDumpState = cli.Command{ - Balance - Nonce - Code (separate storage slot is used to store code data) - - Contract Storage`, + - Address Storage`, ArgsUsage: " ", Flags: []cli.Flag{ &utils.OperaDbFlag, diff --git a/executor/matcher.go b/executor/matcher.go index e57206591..599a12bfd 100644 --- a/executor/matcher.go +++ b/executor/matcher.go @@ -3,6 +3,7 @@ package executor import ( "errors" "fmt" + "math/big" "github.com/Fantom-foundation/Aida/state" substate "github.com/Fantom-foundation/Substate" @@ -44,6 +45,12 @@ func Gt(limit float64) gomock.Matcher { return gt{limit} } +// WithBigIntOfAnySize any size of big.Int. This is used for stochastic +// testing, where we cannot concretely determine the value +func WithBigIntOfAnySize() gomock.Matcher { + return withBigIntOfAnySize{} +} + // ---------------------------------------------------------------------------- type atBlock[T any] struct { @@ -146,3 +153,14 @@ func (m gt) Matches(value any) bool { func (m gt) String() string { return fmt.Sprintf("greater than %v", m.limit) } + +type withBigIntOfAnySize struct{} + +func (m withBigIntOfAnySize) Matches(value any) bool { + _, ok := value.(*big.Int) + return ok +} + +func (m withBigIntOfAnySize) String() string { + return fmt.Sprintf("must be big.Int") +} diff --git a/executor/stochastic_provider.go b/executor/stochastic_provider.go new file mode 100644 index 000000000..3cc969438 --- /dev/null +++ b/executor/stochastic_provider.go @@ -0,0 +1,48 @@ +package executor + +import ( + "math/rand" + + "github.com/Fantom-foundation/Aida/stochastic" + "github.com/urfave/cli/v2" +) + +func OpenSimulations(e *stochastic.EstimationModelJSON, ctxt *cli.Context, rg *rand.Rand) (res Provider[stochastic.Data], err error) { + return simulationsProvider{ + ctxt: ctxt, + simulation: e, + rg: rg, + }, nil +} + +type simulationsProvider struct { + ctxt *cli.Context + simulation *stochastic.EstimationModelJSON + rg *rand.Rand +} + +func (s simulationsProvider) Run(from int, to int, consumer Consumer[stochastic.Data]) error { + operations, matrix, markovianState := stochastic.GetStochasticMatrix(s.simulation) + block := from + for { + data := stochastic.DecodeOpcode(operations[markovianState]) + if data.Operation == stochastic.BeginBlockID { + block++ + } + if block >= to { + return nil + } + + err := consumer(TransactionInfo[stochastic.Data]{block, markovianState, data}) + if err != nil { + return err + } + + markovianState = stochastic.NextState(s.rg, matrix, markovianState) + } + +} + +func (s simulationsProvider) Close() { + // ignored +} diff --git a/stochastic/event_registry.go b/stochastic/event_registry.go index 7b68eaaf2..27442471d 100644 --- a/stochastic/event_registry.go +++ b/stochastic/event_registry.go @@ -22,7 +22,7 @@ type EventRegistry struct { // Transition frequencies between two subsequent argument-encoded operations transitFreq [numArgOps][numArgOps]uint64 - // Contract-address access statistics + // Address-address access statistics contracts statistics.Access[common.Address] // Storage-key access statistics diff --git a/stochastic/fuzzing_test.go b/stochastic/fuzzing_test.go index a9a776bd6..17729a86e 100644 --- a/stochastic/fuzzing_test.go +++ b/stochastic/fuzzing_test.go @@ -97,27 +97,27 @@ func FuzzStochastic(f *testing.F) { rg := rand.New(fSrc) // create a stochastic state - ss := createState(&cfg, &e, db, rg, logger.NewLogger("INFO", "Fuzzing Stochastic")) + ss := CreateState(&e, rg, logger.NewLogger("INFO", "Fuzzing Stochastic")) // get stochastic matrix - operations, A, state := getStochasticMatrix(&e) + operations, A, state := GetStochasticMatrix(&e) // generate operations/random parameters from fuzzing string for !fSrc.End() { // decode opcode - op, addrCl, keyCl, valueCl := DecodeOpcode(operations[state]) + data := DecodeOpcode(operations[state]) - // execute operation with its argument classes - ss.execute(op, addrCl, keyCl, valueCl) + // Execute operation with its argument classes + ss.Execute(0, 0, data, db) // check for errors - if err := ss.db.Error(); err != nil { + if err = db.Error(); err != nil { f.Errorf("failed fuzzing. Error: %v", err) } // transit to next state in Markovian process - state = nextState(rg, A, state) + state = NextState(rg, A, state) } }) } diff --git a/stochastic/generator/random_access.go b/stochastic/generator/random_access.go index 994b42de3..9ff26c767 100644 --- a/stochastic/generator/random_access.go +++ b/stochastic/generator/random_access.go @@ -9,10 +9,10 @@ import ( "github.com/Fantom-foundation/Aida/stochastic/statistics" ) -// minRandomAccessSize must be substantially larger than statistics.QueueLen +// MinRandomAccessSize must be substantially larger than statistics.QueueLen // (Otherwise sampling for arguments with class RandomValueID may // take a very long time and would slow down the simulation.) -const minRandomAccessSize = 10 * statistics.QueueLen +const MinRandomAccessSize = 10 * statistics.QueueLen // RandomAccess data structure for producing random index accesses. type RandomAccess struct { @@ -36,7 +36,7 @@ type RandomAccess struct { // NewAccess creates a new access index. func NewRandomAccess(rg *rand.Rand, numElem int64, lambda float64, qpdf []float64) *RandomAccess { - if numElem < minRandomAccessSize { + if numElem < MinRandomAccessSize { return nil } @@ -120,7 +120,7 @@ func (a *RandomAccess) DeleteIndex(v int64) error { // reduce cardinality by one a.numElem-- - if a.numElem < minRandomAccessSize { + if a.numElem < MinRandomAccessSize { return fmt.Errorf("DeleteIndex: cardinality of set too low") } diff --git a/stochastic/generator/random_access_test.go b/stochastic/generator/random_access_test.go index a687357cf..2e54d4fc6 100644 --- a/stochastic/generator/random_access_test.go +++ b/stochastic/generator/random_access_test.go @@ -66,7 +66,7 @@ func TestRandomAccessSimple(t *testing.T) { qpdf[i] = 1.0 / float64(statistics.QueueLen) } ra = NewRandomAccess(rg, 1000, 5.0, qpdf) - for i := 0; i < minRandomAccessSize; i++ { + for i := 0; i < MinRandomAccessSize; i++ { copy(queue, ra.queue) if idx := ra.NextIndex(statistics.RecentValueID); idx < 1 || idx > ra.numElem || !containsQ(queue, idx-1) { t.Fatalf("index access not in queue") @@ -228,14 +228,14 @@ func TestRandomAccessLimits(t *testing.T) { if idx := ra.NextIndex(statistics.NewValueID); idx != -1 { t.Fatalf("Fails to detect cardinality integer overflow.") } - ra = NewRandomAccess(rg, minRandomAccessSize, 5.0, qpdf) + ra = NewRandomAccess(rg, MinRandomAccessSize, 5.0, qpdf) if err := ra.DeleteIndex(0); err == nil { t.Fatalf("Fails to detect deleting zero element.") } if err := ra.DeleteIndex(1); err == nil { t.Fatalf("Fails to detect depletion of elements.") } - if ra := NewRandomAccess(rg, minRandomAccessSize-1, 5.0, qpdf); ra != nil { + if ra := NewRandomAccess(rg, MinRandomAccessSize-1, 5.0, qpdf); ra != nil { t.Fatalf("Fails to detect low cardinality.") } } diff --git a/stochastic/operation.go b/stochastic/operation.go index 8b3f5478e..696af928c 100644 --- a/stochastic/operation.go +++ b/stochastic/operation.go @@ -176,6 +176,13 @@ var argId = map[byte]int{ 'r': statistics.RandomValueID, } +type Data struct { + Operation int + Address int + Key int + Value int +} + // OpMnemo returns the mnemonic code for an operation. func OpMnemo(op int) string { if op < 0 || op >= NumOps { @@ -185,11 +192,11 @@ func OpMnemo(op int) string { } // checkArgOp checks whether op/argument combination is valid. -func checkArgOp(op int, contract int, key int, value int) bool { +func checkArgOp(op int, address int, key int, value int) bool { if op < 0 || op >= NumOps { return false } - if contract < 0 || contract >= statistics.NumClasses { + if address < 0 || address >= statistics.NumClasses { return false } if key < 0 || key >= statistics.NumClasses { @@ -200,19 +207,19 @@ func checkArgOp(op int, contract int, key int, value int) bool { } switch opNumArgs[op] { case 0: - return contract == statistics.NoArgID && + return address == statistics.NoArgID && key == statistics.NoArgID && value == statistics.NoArgID case 1: - return contract != statistics.NoArgID && + return address != statistics.NoArgID && key == statistics.NoArgID && value == statistics.NoArgID case 2: - return contract != statistics.NoArgID && + return address != statistics.NoArgID && key != statistics.NoArgID && value == statistics.NoArgID case 3: - return contract != statistics.NoArgID && + return address != statistics.NoArgID && key != statistics.NoArgID && value != statistics.NoArgID default: @@ -225,8 +232,8 @@ func IsValidArgOp(argop int) bool { if argop < 0 || argop >= numArgOps { return false } - op, contract, key, value := DecodeArgOp(argop) - return checkArgOp(op, contract, key, value) + op, address, key, value := DecodeArgOp(argop) + return checkArgOp(op, address, key, value) } // EncodeArgOp encodes operation and argument classes via Horner's scheme to a single value. @@ -276,7 +283,7 @@ func validateArg(argMnemo byte) bool { } // DecodeOpcode decodes opcode producing the operation id and its argument classes -func DecodeOpcode(opc string) (int, int, int, int) { +func DecodeOpcode(opc string) Data { mnemo := opc[:2] op, ok := opId[mnemo] if !ok { @@ -285,25 +292,25 @@ func DecodeOpcode(opc string) (int, int, int, int) { if len(opc) != 2+opNumArgs[op] { log.Fatalf("DecodeOpcode: wrong opcode length for %v", opc) } - var contract, key, value int + var address, key, value int switch len(opc) - 2 { case 0: - contract, key, value = statistics.NoArgID, statistics.NoArgID, statistics.NoArgID + address, key, value = statistics.NoArgID, statistics.NoArgID, statistics.NoArgID case 1: if !validateArg(opc[2]) { log.Fatalf("DecodeOpcode: wrong argument code") } - contract, key, value = argId[opc[2]], statistics.NoArgID, statistics.NoArgID + address, key, value = argId[opc[2]], statistics.NoArgID, statistics.NoArgID case 2: if !validateArg(opc[2]) || !validateArg(opc[3]) { log.Fatalf("DecodeOpcode: wrong argument code") } - contract, key, value = argId[opc[2]], argId[opc[3]], statistics.NoArgID + address, key, value = argId[opc[2]], argId[opc[3]], statistics.NoArgID case 3: if !validateArg(opc[2]) || !validateArg(opc[3]) || !validateArg(opc[4]) { log.Fatalf("DecodeOpcode: wrong argument code") } - contract, key, value = argId[opc[2]], argId[opc[3]], argId[opc[4]] + address, key, value = argId[opc[2]], argId[opc[3]], argId[opc[4]] } - return op, contract, key, value + return Data{op, address, key, value} } diff --git a/stochastic/replay.go b/stochastic/replay.go index fadaf7347..8d61b7cbe 100644 --- a/stochastic/replay.go +++ b/stochastic/replay.go @@ -2,17 +2,14 @@ package stochastic import ( "encoding/binary" - "fmt" "math/big" "math/rand" - "time" "github.com/Fantom-foundation/Aida/logger" "github.com/Fantom-foundation/Aida/state" "github.com/Fantom-foundation/Aida/stochastic/exponential" "github.com/Fantom-foundation/Aida/stochastic/generator" "github.com/Fantom-foundation/Aida/stochastic/statistics" - "github.com/Fantom-foundation/Aida/utils" "github.com/ethereum/go-ethereum/common" ) @@ -28,16 +25,13 @@ const ( FinaliseFlag = true // flag for Finalise() StateDB operation ) -// stochasticState keeps the execution state for the stochastic simulation -type stochasticState struct { - db state.StateDB // StateDB database +// State keeps the execution state for the stochastic simulation +type State struct { contracts *generator.IndirectAccess // index access generator for contracts keys *generator.RandomAccess // index access generator for keys values *generator.RandomAccess // index access generator for values snapshotLambda float64 // lambda parameter for snapshot delta distribution totalTx uint64 // total number of transactions - txNum uint32 // current transaction number - blockNum uint64 // current block number syncPeriodNum uint64 // current sync-period number snapshot []int // stack of active snapshots suicided []int64 // list of suicided accounts @@ -56,8 +50,8 @@ func find[T comparable](a []T, x T) int { return -1 } -// createState creates a stochastic state and primes the StateDB -func createState(cfg *utils.Config, e *EstimationModelJSON, db state.StateDB, rg *rand.Rand, log logger.Logger) *stochasticState { +// CreateState creates a stochastic state and primes the StateDB +func CreateState(e *EstimationModelJSON, rg *rand.Rand, log logger.Logger) *State { // produce random access generators for contract addresses, // storage-keys, and storage addresses. // (NB: Contracts need an indirect access wrapper because @@ -82,7 +76,7 @@ func createState(cfg *utils.Config, e *EstimationModelJSON, db state.StateDB, rg ) // setup state - ss := NewStochasticState(rg, db, contracts, keys, values, e.SnapshotLambda, log) + ss := NewStochasticState(rg, contracts, keys, values, e.SnapshotLambda, log) // create accounts in StateDB ss.prime() @@ -90,8 +84,8 @@ func createState(cfg *utils.Config, e *EstimationModelJSON, db state.StateDB, rg return &ss } -// getStochasticMatrix returns the stochastic matrix with its operations and the initial state -func getStochasticMatrix(e *EstimationModelJSON) ([]string, [][]float64, int) { +// GetStochasticMatrix returns the stochastic matrix with its operations and the initial state +func GetStochasticMatrix(e *EstimationModelJSON) ([]string, [][]float64, int) { operations := e.Operations A := e.StochasticMatrix // and set initial state to BeginSyncPeriod @@ -102,136 +96,16 @@ func getStochasticMatrix(e *EstimationModelJSON) ([]string, [][]float64, int) { return operations, A, state } -// retrieve operations and stochastic matrix from simulation object - -// RunStochasticReplay runs the stochastic simulation for StateDB operations. -// It requires the simulation model and simulation length. The trace-debug flag -// enables/disables the printing of StateDB operations and their arguments on -// the screen. -func RunStochasticReplay(db state.StateDB, e *EstimationModelJSON, nBlocks int, cfg *utils.Config, log logger.Logger) error { - var ( - opFrequency [NumOps]uint64 // operation frequency - numOps uint64 // total number of operations - ) - - if db.GetShadowDB() == nil { - log.Notice("No validation with a shadow DB.") - } - log.Noticef("balance range %d", cfg.BalanceRange) - BalanceRange = cfg.BalanceRange - - log.Noticef("nonce range %d", cfg.NonceRange) - NonceRange = cfg.NonceRange - - // random generator - rg := rand.New(rand.NewSource(cfg.RandomSeed)) - log.Noticef("using random seed %d", cfg.RandomSeed) - - // create a stochastic state - ss := createState(cfg, e, db, rg, log) - - // get stochastic matrix - operations, A, state := getStochasticMatrix(e) - - // progress message setup - var ( - start time.Time - sec float64 - lastSec float64 - runErr error - errCount int - ) - - start = time.Now() - sec = time.Since(start).Seconds() - lastSec = time.Since(start).Seconds() - // if block after priming is greater or equal to debug block, enable debug. - if cfg.Debug && ss.blockNum >= cfg.DebugFrom { - ss.enableDebug() - } - - block := 0 - // inclusive range - log.Noticef("Simulation block range: first %v, last %v", ss.blockNum, ss.blockNum+uint64(nBlocks-1)) - for { - - // decode opcode - op, addrCl, keyCl, valueCl := DecodeOpcode(operations[state]) - - // keep track of stats - numOps++ - opFrequency[op]++ - - // execute operation with its argument classes - ss.execute(op, addrCl, keyCl, valueCl) - - // check for end of simulation - if op == EndBlockID { - block++ - if block >= nBlocks { - break - } - // if current block is greater or equal to debug block, enable debug. - if cfg.Debug && !ss.traceDebug && ss.blockNum >= cfg.DebugFrom { - ss.enableDebug() - } - } - - // report progress - sec = time.Since(start).Seconds() - if sec-lastSec >= 15 { - log.Debugf("Elapsed time: %.0f s, at block %v", sec, block) - lastSec = sec - } - - // check for errors - if err := ss.db.Error(); err != nil { - errCount++ - if runErr == nil { - runErr = fmt.Errorf("error: stochastic replay failed.") - } - - runErr = fmt.Errorf("%v\n\tBlock %v Tx %v: %v", runErr, ss.blockNum, ss.txNum, err) - if !cfg.ContinueOnFailure { - break - } - } - - // transit to next state in Markovian process - state = nextState(rg, A, state) - } - - // print progress summary - log.Noticef("Total elapsed time: %.3f s, processed %v blocks", sec, block) - if errCount > 0 { - log.Warningf("%v errors were found", errCount) - } - - // print statistics - log.Noticef("SyncPeriods: %v", ss.syncPeriodNum) - log.Noticef("Blocks: %v", ss.blockNum) - log.Noticef("Transactions: %v", ss.totalTx) - log.Noticef("Operations: %v", numOps) - log.Noticef("Operation Frequencies:") - for op := 0; op < NumOps; op++ { - log.Noticef("\t%v: %v", opText[op], opFrequency[op]) - } - return runErr -} - // NewStochasticState creates a new state for execution StateDB operations -func NewStochasticState(rg *rand.Rand, db state.StateDB, contracts *generator.IndirectAccess, keys *generator.RandomAccess, values *generator.RandomAccess, snapshotLambda float64, log logger.Logger) stochasticState { - +func NewStochasticState(rg *rand.Rand, contracts *generator.IndirectAccess, keys *generator.RandomAccess, values *generator.RandomAccess, snapshotLambda float64, log logger.Logger) State { // return stochastic state - return stochasticState{ - db: db, + return State{ contracts: contracts, keys: keys, values: values, snapshotLambda: snapshotLambda, traceDebug: false, suicided: []int64{}, - blockNum: 1, syncPeriodNum: 1, rg: rg, log: log, @@ -239,79 +113,79 @@ func NewStochasticState(rg *rand.Rand, db state.StateDB, contracts *generator.In } // prime StateDB accounts using account information -func (ss *stochasticState) prime() { - numInitialAccounts := ss.contracts.NumElem() + 1 - ss.log.Notice("Start priming...") - ss.log.Noticef("\tinitializing %v accounts\n", numInitialAccounts) - pt := utils.NewProgressTracker(int(numInitialAccounts), ss.log) - db := ss.db - db.BeginSyncPeriod(0) - db.BeginBlock(0) - db.BeginTransaction(0) - - // initialise accounts in memory with balances greater than zero - for i := int64(0); i <= numInitialAccounts; i++ { - addr := toAddress(i) - db.CreateAccount(addr) - db.AddBalance(addr, big.NewInt(ss.rg.Int63n(BalanceRange))) - pt.PrintProgress() - } - ss.log.Notice("Finalizing...") - db.EndTransaction() - db.EndBlock() - db.EndSyncPeriod() - ss.log.Notice("End priming...") +func (ss *State) prime() { + // todo is priming done in the extension? + //numInitialAccounts := ss.contracts.NumElem() + 1 + //ss.log.Notice("Start priming...") + //ss.log.Noticef("\tinitializing %v accounts\n", numInitialAccounts) + //pt := utils.NewProgressTracker(int(numInitialAccounts), ss.log) + //db := ss.db + //db.BeginSyncPeriod(0) + //db.BeginBlock(0) + //db.BeginTransaction(0) + // + //// initialise accounts in memory with balances greater than zero + //for i := int64(0); i <= numInitialAccounts; i++ { + // addr := toAddress(i) + // db.CreateAccount(addr) + // db.AddBalance(addr, big.NewInt(ss.rg.Int63n(BalanceRange))) + // pt.PrintProgress() + //} + //ss.log.Notice("Finalizing...") + //db.EndTransaction() + //db.EndBlock() + //db.EndSyncPeriod() + //ss.log.Notice("End priming...") } // EnableDebug set traceDebug flag to true, and enable debug message when executing an operation -func (ss *stochasticState) enableDebug() { +func (ss *State) EnableDebug() { ss.traceDebug = true } -// execute StateDB operations on a stochastic state. -func (ss *stochasticState) execute(op int, addrCl int, keyCl int, valueCl int) { +// Execute StateDB operations on a stochastic state. +func (ss *State) Execute(block, transaction int, data Data, db state.StateDB) { var ( addr common.Address key common.Hash value common.Hash - db = ss.db rg = ss.rg ) // fetch indexes from index access generators - addrIdx := ss.contracts.NextIndex(addrCl) - keyIdx := ss.keys.NextIndex(keyCl) - valueIdx := ss.values.NextIndex(valueCl) + addrIdx := ss.contracts.NextIndex(data.Address) + keyIdx := ss.keys.NextIndex(data.Key) + valueIdx := ss.values.NextIndex(data.Value) // convert index to address/hashes - if addrCl != statistics.NoArgID { + if data.Address != statistics.NoArgID { addr = toAddress(addrIdx) } - if keyCl != statistics.NoArgID { + if data.Key != statistics.NoArgID { key = toHash(keyIdx) } - if valueCl != statistics.NoArgID { + if data.Value != statistics.NoArgID { value = toHash(valueIdx) } // print opcode and its arguments if ss.traceDebug { // print operation - ss.log.Infof("opcode:%v (%v)", opText[op], EncodeOpcode(op, addrCl, keyCl, valueCl)) + ss.log.Infof("opcode:%v (%v)", opText[data.Operation], EncodeOpcode(data.Operation, data.Address, data.Key, data.Value)) // print indexes of contract address, storage key, and storage value. - if addrCl != statistics.NoArgID { + if data.Address != statistics.NoArgID { ss.log.Infof(" addr-idx: %v", addrIdx) } - if keyCl != statistics.NoArgID { + if data.Key != statistics.NoArgID { ss.log.Infof(" key-idx: %v", keyIdx) } - if valueCl != statistics.NoArgID { + if data.Value != statistics.NoArgID { ss.log.Infof(" value-idx: %v", valueIdx) } } - switch op { + switch data.Operation { case AddBalanceID: value := rg.Int63n(BalanceRange) if ss.traceDebug { @@ -321,10 +195,9 @@ func (ss *stochasticState) execute(op int, addrCl int, keyCl int, valueCl int) { case BeginBlockID: if ss.traceDebug { - ss.log.Infof(" id: %v", ss.blockNum) + ss.log.Infof(" id: %v", block) } - db.BeginBlock(ss.blockNum) - ss.txNum = 0 + db.BeginBlock(uint64(block)) ss.suicided = []int64{} case BeginSyncPeriodID: @@ -335,9 +208,9 @@ func (ss *stochasticState) execute(op int, addrCl int, keyCl int, valueCl int) { case BeginTransactionID: if ss.traceDebug { - ss.log.Infof(" id: %v", ss.txNum) + ss.log.Infof(" id: %v", transaction) } - db.BeginTransaction(ss.txNum) + db.BeginTransaction(uint32(transaction)) ss.snapshot = []int{} ss.suicided = []int64{} @@ -349,7 +222,6 @@ func (ss *stochasticState) execute(op int, addrCl int, keyCl int, valueCl int) { case EndBlockID: db.EndBlock() - ss.blockNum++ ss.deleteAccounts() case EndSyncPeriodID: @@ -358,7 +230,6 @@ func (ss *stochasticState) execute(op int, addrCl int, keyCl int, valueCl int) { case EndTransactionID: db.EndTransaction() - ss.txNum++ ss.totalTx++ case ExistID: @@ -459,8 +330,8 @@ func (ss *stochasticState) execute(op int, addrCl int, keyCl int, valueCl int) { } } -// nextState produces the next state in the Markovian process. -func nextState(rg *rand.Rand, A [][]float64, i int) int { +// NextState produces the next state in the Markovian process. +func NextState(rg *rand.Rand, A [][]float64, i int) int { // Retrieve a random number in [0,1.0). r := rg.Float64() @@ -519,7 +390,7 @@ func toHash(idx int64) common.Hash { } // delete account information when suicide was invoked -func (ss *stochasticState) deleteAccounts() { +func (ss *State) deleteAccounts() { // remove account information when suicide was invoked in the block. for _, addrIdx := range ss.suicided { if err := ss.contracts.DeleteIndex(addrIdx); err != nil { diff --git a/stochastic/replay_test.go b/stochastic/replay_test.go index f2f22d4ee..797cb7009 100644 --- a/stochastic/replay_test.go +++ b/stochastic/replay_test.go @@ -15,10 +15,10 @@ func TestDeterministicNextState(t *testing.T) { rg := rand.New(rand.NewSource(999)) var A = [][]float64{{0.0, 1.0}, {1.0, 0.0}} - if nextState(rg, A, 0) != 1 { + if NextState(rg, A, 0) != 1 { t.Fatalf("Illegal state transition (row 0)") } - if nextState(rg, A, 1) != 0 { + if NextState(rg, A, 1) != 0 { t.Fatalf("Illegal state transition (row 1)") } } @@ -33,13 +33,13 @@ func TestDeterministicNextState2(t *testing.T) { {0.0, 0.0, 1.0}, {1.0, 0.0, 0.0}, } - if nextState(rg, A, 0) != 1 { + if NextState(rg, A, 0) != 1 { t.Fatalf("Illegal state transition (row 0)") } - if nextState(rg, A, 1) != 2 { + if NextState(rg, A, 1) != 2 { t.Fatalf("Illegal state transition (row 1)") } - if nextState(rg, A, 2) != 0 { + if NextState(rg, A, 2) != 0 { t.Fatalf("Illegal state transition (row 1)") } } @@ -51,10 +51,10 @@ func TestNextStateFail(t *testing.T) { rg := rand.New(rand.NewSource(999)) var A = [][]float64{{0.0, 0.0}, {math.NaN(), 0.0}} - if nextState(rg, A, 0) != -1 { + if NextState(rg, A, 0) != -1 { t.Fatalf("Could not capture faulty stochastic matrix") } - if nextState(rg, A, 1) != -1 { + if NextState(rg, A, 1) != -1 { t.Fatalf("Could not capture faulty stochastic matrix") } } @@ -78,7 +78,7 @@ func checkMarkovChain(A [][]float64, numSteps int) error { state := 0 for steps := 0; steps < numSteps; steps++ { oldState := state - state = nextState(rg, A, state) + state = NextState(rg, A, state) if state != -1 { counts[state]++ } else { diff --git a/stochastic/visualizer/model.go b/stochastic/visualizer/model.go index 53aca1b22..34cb7fa6b 100644 --- a/stochastic/visualizer/model.go +++ b/stochastic/visualizer/model.go @@ -73,17 +73,17 @@ func (e *EventData) PopulateEventData(d *stochastic.EventRegistryJSON) { // Sort entries of the stationary distribution and populate n := len(d.Operations) stationary, _ := stationary.ComputeDistribution(d.StochasticMatrix) - data := []OpData{} + opData := []OpData{} for i := 0; i < n; i++ { - data = append(data, OpData{ + opData = append(opData, OpData{ label: d.Operations[i], value: stationary[i], }) } - sort.Slice(data, func(i, j int) bool { - return data[i].value < data[j].value + sort.Slice(opData, func(i, j int) bool { + return opData[i].value < opData[j].value }) - e.Stationary = data + e.Stationary = opData // compute average number of operations per transaction @@ -92,14 +92,14 @@ func (e *EventData) PopulateEventData(d *stochastic.EventRegistryJSON) { blockProb := 0.0 syncPeriodProb := 0.0 for i := 0; i < n; i++ { - sop, _, _, _ := stochastic.DecodeOpcode(d.Operations[i]) - if sop == stochastic.BeginTransactionID { + data := stochastic.DecodeOpcode(d.Operations[i]) + if data.Operation == stochastic.BeginTransactionID { txProb = stationary[i] } - if sop == stochastic.BeginBlockID { + if data.Operation == stochastic.BeginBlockID { blockProb = stationary[i] } - if sop == stochastic.BeginSyncPeriodID { + if data.Operation == stochastic.BeginSyncPeriodID { syncPeriodProb = stationary[i] } } @@ -118,7 +118,7 @@ func (e *EventData) PopulateEventData(d *stochastic.EventRegistryJSON) { // sum all versions of an operation and normalize the value with the transaction's probability sum := 0.0 for i := 0; i < n; i++ { - if sop, _, _, _ := stochastic.DecodeOpcode(d.Operations[i]); sop == op { + if data := stochastic.DecodeOpcode(d.Operations[i]); data.Operation == op { sum += stationary[i] } } @@ -145,14 +145,14 @@ func (e *EventData) PopulateEventData(d *stochastic.EventRegistryJSON) { // reduce stochastic matrix to a simplified matrix for i := 0; i < n; i++ { - iop, _, _, _ := stochastic.DecodeOpcode(d.Operations[i]) + iData := stochastic.DecodeOpcode(d.Operations[i]) for j := 0; j < n; j++ { - jop, _, _, _ := stochastic.DecodeOpcode(d.Operations[j]) - e.SimplifiedMatrix[iop][jop] += d.StochasticMatrix[i][j] + jData := stochastic.DecodeOpcode(d.Operations[j]) + e.SimplifiedMatrix[iData.Operation][jData.Operation] += d.StochasticMatrix[i][j] } } - // normalize row data after reduction + // normalize row opData after reduction for i := 0; i < stochastic.NumOps; i++ { sum := 0.0 for j := 0; j < stochastic.NumOps; j++ { diff --git a/stochastic/visualizer/renderer.go b/stochastic/visualizer/renderer.go index 8de2963fa..31c711cd6 100644 --- a/stochastic/visualizer/renderer.go +++ b/stochastic/visualizer/renderer.go @@ -92,7 +92,7 @@ func newCountingChart(title string, subtitle string, lambda float64, ecdf [][2]f // renderCounting renders counting statistics. func renderCounting(w http.ResponseWriter, r *http.Request) { events := GetEventsData() - contracts := newCountingChart("Counting Statistics", "for Contract-Addresses", + contracts := newCountingChart("Counting Statistics", "for Address-Addresses", events.Contracts.Lambda, events.Contracts.ECdf, events.Contracts.Cdf) @@ -174,7 +174,7 @@ func renderQueuing(w http.ResponseWriter, r *http.Request) { Title: "Queuing Probabilities", Subtitle: "for contract-addresses, storage-keys, and storage-values", })) - scatter.AddSeries("Contract", convertQueuingData(events.Contracts.QPdf)).AddSeries("Keys", convertQueuingData(events.Keys.QPdf)).AddSeries("Values", convertQueuingData(events.Values.QPdf)) + scatter.AddSeries("Address", convertQueuingData(events.Contracts.QPdf)).AddSeries("Keys", convertQueuingData(events.Keys.QPdf)).AddSeries("Values", convertQueuingData(events.Values.QPdf)) scatter.Render(w) } diff --git a/tracer/context/context.go b/tracer/context/context.go index 1f2f57c7e..10fc84228 100644 --- a/tracer/context/context.go +++ b/tracer/context/context.go @@ -94,7 +94,7 @@ func (ctx *Record) Close() { } //////////////////////////////////////////////////////////////// -// Contract methods +// Address methods //////////////////////////////////////////////////////////////// // EncodeContract encodes a given contract address and returns contract's address. diff --git a/utils/config.go b/utils/config.go index 330d20cdb..bdc1b5add 100644 --- a/utils/config.go +++ b/utils/config.go @@ -105,98 +105,99 @@ type Config struct { First uint64 // first block Last uint64 // last block - AidaDb string // directory to profiling database containing substate, update, delete accounts data - ArchiveMaxQueryAge int // the maximum age for archive queries (in blocks) - ArchiveMode bool // enable archive mode - ArchiveQueryRate int // the queries per second send to the archive - ArchiveVariant string // selects the implementation variant of the archive - BalanceRange int64 // balance range for stochastic simulation/replay - BasicBlockProfiling bool // enable profiling of basic block - BlockLength uint64 // length of a block in number of transactions - CPUProfile string // pprof cpu profile output file name - CPUProfilePerInterval bool // a different CPU profile is taken per 100k block interval - Cache int // Cache for StateDb or Priming - CarmenSchema int // the current DB schema ID to use in Carmen - ChainID ChainID // Blockchain ID (mainnet: 250/testnet: 4002) - ChannelBufferSize int // set a buffer size for profiling channel - CompactDb bool // compact database after merging - ContinueOnFailure bool // continue validation when an error detected - ContractNumber int64 // number of contracts to create - DbComponent string // options for util-db info are 'all', 'substate', 'delete', 'update', 'state-hash' - DbImpl string // storage implementation - DbLogging string // set to true if all DB operations should be logged - DbTmp string // path to temporary database - DbVariant string // database variant - Debug bool // enable trace debug flag - DebugFrom uint64 // the first block to print trace debug - DeleteSourceDbs bool // delete source databases - DeletionDb string // directory of deleted account database - DiagnosticServer int64 // if not zero, the port used for hosting a HTTP server for performance diagnostics - ErrorLogging string // if defined, error logging to file is enabled - Genesis string // genesis file - IncludeStorage bool // represents a flag for contract storage inclusion in an operation - IsExistingStateDb bool // this is true if we are using an existing StateDb - KeepDb bool // set to true if db is kept after run - KeysNumber int64 // number of keys to generate - LogLevel string // level of the logging of the app action - MaxNumErrors int // maximum number of errors when ContinueOnFailure is enabled - MaxNumTransactions int // the maximum number of processed transactions - MemoryBreakdown bool // enable printing of memory breakdown - MemoryProfile string // capture the memory heap profile into the file - MicroProfiling bool // enable micro-profiling of EVM - NoHeartbeatLogging bool // disables heartbeat logging - NonceRange int // nonce range for stochastic simulation/replay - OnlySuccessful bool // only runs transactions that have been successful - OperaBinary string // path to opera binary - OperaDb string // path to opera database - Output string // output directory for aida-db patches or path to events.json file in stochastic generation - PathToStateDb string // Path to a working state-db directory - PrimeRandom bool // enable randomized priming - PrimeThreshold int // set account threshold before commit - Profile bool // enable micro profiling - ProfileBlocks bool // enables block profiler extension - ProfileDB string // profile db for parallel transaction execution - ProfileDepth int // 0 = Interval, 1 = Interval+Block, 2 = Interval+Block+Tx - ProfileEVMCall bool // enable profiling for EVM call - ProfileFile string // output file containing profiling result - ProfileInterval uint64 // interval of printing profile result - ProfileSqlite3 string // output profiling results to sqlite3 DB - ProfilingDbName string // set a database name for storing micro-profiling results - RandomSeed int64 // set random seed for stochastic testing - RpcRecordingFile string // path to source file with recorded RPC requests - ShadowDb bool // defines we want to open an existing db as shadow - ShadowImpl string // implementation of the shadow DB to use, empty if disabled - ShadowVariant string // database variant of the shadow DB to be used - SkipMetadata bool // skip metadata insert/getting into AidaDb - SkipPriming bool // skip priming of the state DB - SkipStateHashScrapping bool // if enabled, then state-hashes are not loaded from rpc - SnapshotDepth int // depth of snapshot history - SourceTableName string // represents the name of a source DB table - SrcDbReadonly bool // if false, make a copy the source statedb - StateDbSrc string // directory to load an existing State DB data - StateValidationMode ValidationMode // state validation mode - SubstateDb string // substate directory - SyncPeriodLength uint64 // length of a sync-period in number of blocks - TargetBlock uint64 // represents the ID of target block to be reached by state evolve process or in dump state - TargetDb string // represents the path of a target DB - TargetEpoch uint64 // represents the ID of target epoch to be reached by autogen patch generator - Trace bool // trace flag - TraceDirectory string // name of trace directory - TraceFile string // name of trace file - TrackProgress bool // enables track progress logging - TransactionLength uint64 // determines indirectly the length of a transaction - TrieRootHash string // represents a hash of a state trie root to be decoded - UpdateBufferSize uint64 // cache size in Bytes - UpdateDb string // update-set directory - UpdateOnFailure bool // if enabled and continue-on-failure is also enabled, this updates any error found in StateDb - UpdateType string // download datatype - Validate bool // validate validate aida-db - ValidateStateHashes bool // if this is true state hash validation is enabled in Executor - ValidateTxState bool // validate stateDB before and after transaction - ValuesNumber int64 // number of values to generate - VmImpl string // vm implementation (geth/lfvm) - Workers int // number of worker threads - WorldStateDb string // path to worldstate + AidaDb string // directory to profiling database containing substate, update, delete accounts data + ArchiveMaxQueryAge int // the maximum age for archive queries (in blocks) + ArchiveMode bool // enable archive mode + ArchiveQueryRate int // the queries per second send to the archive + ArchiveVariant string // selects the implementation variant of the archive + BalanceRange int64 // balance range for stochastic simulation/replay + BasicBlockProfiling bool // enable profiling of basic block + BlockLength uint64 // length of a block in number of transactions + CPUProfile string // pprof cpu profile output file name + CPUProfilePerInterval bool // a different CPU profile is taken per 100k block interval + Cache int // Cache for StateDb or Priming + CarmenSchema int // the current DB schema ID to use in Carmen + ChainID ChainID // Blockchain ID (mainnet: 250/testnet: 4002) + ChannelBufferSize int // set a buffer size for profiling channel + CompactDb bool // compact database after merging + ContinueOnFailure bool // continue validation when an error detected + ContractNumber int64 // number of contracts to create + DbComponent string // options for util-db info are 'all', 'substate', 'delete', 'update', 'state-hash' + DbImpl string // storage implementation + DbLogging string // set to true if all DB operations should be logged + DbTmp string // path to temporary database + DbVariant string // database variant + Debug bool // enable trace debug flag + DebugFrom int // the first block to print trace debug + DeleteSourceDbs bool // delete source databases + DeletionDb string // directory of deleted account database + DiagnosticServer int64 // if not zero, the port used for hosting a HTTP server for performance diagnostics + ErrorLogging string // if defined, error logging to file is enabled + Genesis string // genesis file + IncludeStorage bool // represents a flag for contract storage inclusion in an operation + IsExistingStateDb bool // this is true if we are using an existing StateDb + KeepDb bool // set to true if db is kept after run + KeysNumber int64 // number of keys to generate + LogLevel string // level of the logging of the app action + MaxNumErrors int // maximum number of errors when ContinueOnFailure is enabled + MaxNumTransactions int // the maximum number of processed transactions + MemoryBreakdown bool // enable printing of memory breakdown + MemoryProfile string // capture the memory heap profile into the file + MicroProfiling bool // enable micro-profiling of EVM + NoHeartbeatLogging bool // disables heartbeat logging + NonceRange int // nonce range for stochastic simulation/replay + OnlySuccessful bool // only runs transactions that have been successful + OperaBinary string // path to opera binary + OperaDb string // path to opera database + Output string // output directory for aida-db patches or path to events.json file in stochastic generation + PathToStateDb string // Path to a working state-db directory + PrimeRandom bool // enable randomized priming + PrimeThreshold int // set account threshold before commit + Profile bool // enable micro profiling + ProfileBlocks bool // enables block profiler extension + ProfileDB string // profile db for parallel transaction execution + ProfileDepth int // 0 = Interval, 1 = Interval+Block, 2 = Interval+Block+Tx + ProfileEVMCall bool // enable profiling for EVM call + ProfileFile string // output file containing profiling result + ProfileInterval uint64 // interval of printing profile result + ProfileSqlite3 string // output profiling results to sqlite3 DB + ProfilingDbName string // set a database name for storing micro-profiling results + RandomSeed int64 // set random seed for stochastic testing + RpcRecordingFile string // path to source file with recorded RPC requests + ShadowDb bool // defines we want to open an existing db as shadow + ShadowImpl string // implementation of the shadow DB to use, empty if disabled + ShadowVariant string // database variant of the shadow DB to be used + SkipMetadata bool // skip metadata insert/getting into AidaDb + SkipPriming bool // skip priming of the state DB + SkipStateHashScrapping bool // if enabled, then state-hashes are not loaded from rpc + SnapshotDepth int // depth of snapshot history + SourceTableName string // represents the name of a source DB table + SrcDbReadonly bool // if false, make a copy the source statedb + StateDbSrc string // directory to load an existing State DB data + StateValidationMode ValidationMode // state validation mode + StochasticSimulationFile string // path to stochastic simulation file + SubstateDb string // substate directory + SyncPeriodLength uint64 // length of a sync-period in number of blocks + TargetBlock uint64 // represents the ID of target block to be reached by state evolve process or in dump state + TargetDb string // represents the path of a target DB + TargetEpoch uint64 // represents the ID of target epoch to be reached by autogen patch generator + Trace bool // trace flag + TraceDirectory string // name of trace directory + TraceFile string // name of trace file + TrackProgress bool // enables track progress logging + TransactionLength uint64 // determines indirectly the length of a transaction + TrieRootHash string // represents a hash of a state trie root to be decoded + UpdateBufferSize uint64 // cache size in Bytes + UpdateDb string // update-set directory + UpdateOnFailure bool // if enabled and continue-on-failure is also enabled, this updates any error found in StateDb + UpdateType string // download datatype + Validate bool // validate validate aida-db + ValidateStateHashes bool // if this is true state hash validation is enabled in Executor + ValidateTxState bool // validate stateDB before and after transaction + ValuesNumber int64 // number of values to generate + VmImpl string // vm implementation (geth/lfvm) + Workers int // number of worker threads + WorldStateDb string // path to worldstate } type configContext struct { diff --git a/utils/default_config.go b/utils/default_config.go index e049478b4..58ce85297 100644 --- a/utils/default_config.go +++ b/utils/default_config.go @@ -13,96 +13,97 @@ func createConfigFromFlags(ctx *cli.Context) *Config { AppName: ctx.App.HelpName, CommandName: ctx.Command.Name, - AidaDb: getFlagValue(ctx, AidaDbFlag).(string), - ArchiveMaxQueryAge: getFlagValue(ctx, ArchiveMaxQueryAgeFlag).(int), - ArchiveMode: getFlagValue(ctx, ArchiveModeFlag).(bool), - ArchiveQueryRate: getFlagValue(ctx, ArchiveQueryRateFlag).(int), - ArchiveVariant: getFlagValue(ctx, ArchiveVariantFlag).(string), - BalanceRange: getFlagValue(ctx, BalanceRangeFlag).(int64), - BasicBlockProfiling: getFlagValue(ctx, BasicBlockProfilingFlag).(bool), - BlockLength: getFlagValue(ctx, BlockLengthFlag).(uint64), - CPUProfile: getFlagValue(ctx, CpuProfileFlag).(string), - CPUProfilePerInterval: getFlagValue(ctx, CpuProfilePerIntervalFlag).(bool), - Cache: getFlagValue(ctx, CacheFlag).(int), - CarmenSchema: getFlagValue(ctx, CarmenSchemaFlag).(int), - ChainID: ChainID(getFlagValue(ctx, ChainIDFlag).(int)), - ChannelBufferSize: getFlagValue(ctx, ChannelBufferSizeFlag).(int), - CompactDb: getFlagValue(ctx, CompactDbFlag).(bool), - ContinueOnFailure: getFlagValue(ctx, ContinueOnFailureFlag).(bool), - ContractNumber: getFlagValue(ctx, ContractNumberFlag).(int64), - DbComponent: getFlagValue(ctx, DbComponentFlag).(string), - DbImpl: getFlagValue(ctx, StateDbImplementationFlag).(string), - DbLogging: getFlagValue(ctx, StateDbLoggingFlag).(string), - DbTmp: getFlagValue(ctx, DbTmpFlag).(string), - DbVariant: getFlagValue(ctx, StateDbVariantFlag).(string), - Debug: getFlagValue(ctx, TraceDebugFlag).(bool), - DebugFrom: getFlagValue(ctx, DebugFromFlag).(uint64), - DeleteSourceDbs: getFlagValue(ctx, DeleteSourceDbsFlag).(bool), - DeletionDb: getFlagValue(ctx, DeletionDbFlag).(string), - DiagnosticServer: getFlagValue(ctx, DiagnosticServerFlag).(int64), - ErrorLogging: getFlagValue(ctx, ErrorLoggingFlag).(string), - Genesis: getFlagValue(ctx, GenesisFlag).(string), - IncludeStorage: getFlagValue(ctx, IncludeStorageFlag).(bool), - KeepDb: getFlagValue(ctx, KeepDbFlag).(bool), - KeysNumber: getFlagValue(ctx, KeysNumberFlag).(int64), - LogLevel: getFlagValue(ctx, logger.LogLevelFlag).(string), - MaxNumErrors: getFlagValue(ctx, MaxNumErrorsFlag).(int), - MaxNumTransactions: getFlagValue(ctx, MaxNumTransactionsFlag).(int), - MemoryBreakdown: getFlagValue(ctx, MemoryBreakdownFlag).(bool), - MemoryProfile: getFlagValue(ctx, MemoryProfileFlag).(string), - MicroProfiling: getFlagValue(ctx, MicroProfilingFlag).(bool), - NoHeartbeatLogging: getFlagValue(ctx, NoHeartbeatLoggingFlag).(bool), - NonceRange: getFlagValue(ctx, NonceRangeFlag).(int), - OnlySuccessful: getFlagValue(ctx, OnlySuccessfulFlag).(bool), - OperaBinary: getFlagValue(ctx, OperaBinaryFlag).(string), - OperaDb: getFlagValue(ctx, OperaDbFlag).(string), - Output: getFlagValue(ctx, OutputFlag).(string), - PrimeRandom: getFlagValue(ctx, RandomizePrimingFlag).(bool), - PrimeThreshold: getFlagValue(ctx, PrimeThresholdFlag).(int), - Profile: getFlagValue(ctx, ProfileFlag).(bool), - ProfileBlocks: getFlagValue(ctx, ProfileBlocksFlag).(bool), - ProfileDB: getFlagValue(ctx, ProfileDBFlag).(string), - ProfileDepth: getFlagValue(ctx, ProfileDepthFlag).(int), - ProfileEVMCall: getFlagValue(ctx, ProfileEVMCallFlag).(bool), - ProfileFile: getFlagValue(ctx, ProfileFileFlag).(string), - ProfileInterval: getFlagValue(ctx, ProfileIntervalFlag).(uint64), - ProfileSqlite3: getFlagValue(ctx, ProfileSqlite3Flag).(string), - ProfilingDbName: getFlagValue(ctx, ProfilingDbNameFlag).(string), - RandomSeed: getFlagValue(ctx, RandomSeedFlag).(int64), - RpcRecordingFile: getFlagValue(ctx, RpcRecordingFileFlag).(string), - ShadowDb: getFlagValue(ctx, ShadowDb).(bool), - ShadowImpl: getFlagValue(ctx, ShadowDbImplementationFlag).(string), - ShadowVariant: getFlagValue(ctx, ShadowDbVariantFlag).(string), - SkipMetadata: getFlagValue(ctx, flags.SkipMetadata).(bool), - SkipPriming: getFlagValue(ctx, SkipPrimingFlag).(bool), - SkipStateHashScrapping: getFlagValue(ctx, SkipStateHashScrappingFlag).(bool), - SnapshotDepth: getFlagValue(ctx, SnapshotDepthFlag).(int), - SourceTableName: getFlagValue(ctx, SourceTableNameFlag).(string), - SrcDbReadonly: false, - StateDbSrc: getFlagValue(ctx, StateDbSrcFlag).(string), - StateValidationMode: EqualityCheck, - SubstateDb: getFlagValue(ctx, substate.SubstateDbFlag).(string), - SyncPeriodLength: getFlagValue(ctx, SyncPeriodLengthFlag).(uint64), - TargetBlock: getFlagValue(ctx, TargetBlockFlag).(uint64), - TargetDb: getFlagValue(ctx, TargetDbFlag).(string), - TargetEpoch: getFlagValue(ctx, TargetEpochFlag).(uint64), - Trace: getFlagValue(ctx, TraceFlag).(bool), - TraceDirectory: getFlagValue(ctx, TraceDirectoryFlag).(string), - TraceFile: getFlagValue(ctx, TraceFileFlag).(string), - TrackProgress: getFlagValue(ctx, TrackProgressFlag).(bool), - TransactionLength: getFlagValue(ctx, TransactionLengthFlag).(uint64), - TrieRootHash: getFlagValue(ctx, TrieRootHashFlag).(string), - UpdateBufferSize: getFlagValue(ctx, UpdateBufferSizeFlag).(uint64), - UpdateDb: getFlagValue(ctx, UpdateDbFlag).(string), - UpdateOnFailure: getFlagValue(ctx, UpdateOnFailure).(bool), - UpdateType: getFlagValue(ctx, UpdateTypeFlag).(string), - Validate: getFlagValue(ctx, ValidateFlag).(bool), - ValidateStateHashes: getFlagValue(ctx, ValidateStateHashesFlag).(bool), - ValidateTxState: getFlagValue(ctx, ValidateTxStateFlag).(bool), - ValuesNumber: getFlagValue(ctx, ValuesNumberFlag).(int64), - VmImpl: getFlagValue(ctx, VmImplementation).(string), - Workers: getFlagValue(ctx, substate.WorkersFlag).(int), - WorldStateDb: getFlagValue(ctx, WorldStateFlag).(string), + AidaDb: getFlagValue(ctx, AidaDbFlag).(string), + ArchiveMaxQueryAge: getFlagValue(ctx, ArchiveMaxQueryAgeFlag).(int), + ArchiveMode: getFlagValue(ctx, ArchiveModeFlag).(bool), + ArchiveQueryRate: getFlagValue(ctx, ArchiveQueryRateFlag).(int), + ArchiveVariant: getFlagValue(ctx, ArchiveVariantFlag).(string), + BalanceRange: getFlagValue(ctx, BalanceRangeFlag).(int64), + BasicBlockProfiling: getFlagValue(ctx, BasicBlockProfilingFlag).(bool), + BlockLength: getFlagValue(ctx, BlockLengthFlag).(uint64), + CPUProfile: getFlagValue(ctx, CpuProfileFlag).(string), + CPUProfilePerInterval: getFlagValue(ctx, CpuProfilePerIntervalFlag).(bool), + Cache: getFlagValue(ctx, CacheFlag).(int), + CarmenSchema: getFlagValue(ctx, CarmenSchemaFlag).(int), + ChainID: ChainID(getFlagValue(ctx, ChainIDFlag).(int)), + ChannelBufferSize: getFlagValue(ctx, ChannelBufferSizeFlag).(int), + CompactDb: getFlagValue(ctx, CompactDbFlag).(bool), + ContinueOnFailure: getFlagValue(ctx, ContinueOnFailureFlag).(bool), + ContractNumber: getFlagValue(ctx, ContractNumberFlag).(int64), + DbComponent: getFlagValue(ctx, DbComponentFlag).(string), + DbImpl: getFlagValue(ctx, StateDbImplementationFlag).(string), + DbLogging: getFlagValue(ctx, StateDbLoggingFlag).(string), + DbTmp: getFlagValue(ctx, DbTmpFlag).(string), + DbVariant: getFlagValue(ctx, StateDbVariantFlag).(string), + Debug: getFlagValue(ctx, TraceDebugFlag).(bool), + DebugFrom: getFlagValue(ctx, DebugFromFlag).(int), + DeleteSourceDbs: getFlagValue(ctx, DeleteSourceDbsFlag).(bool), + DeletionDb: getFlagValue(ctx, DeletionDbFlag).(string), + DiagnosticServer: getFlagValue(ctx, DiagnosticServerFlag).(int64), + ErrorLogging: getFlagValue(ctx, ErrorLoggingFlag).(string), + Genesis: getFlagValue(ctx, GenesisFlag).(string), + IncludeStorage: getFlagValue(ctx, IncludeStorageFlag).(bool), + KeepDb: getFlagValue(ctx, KeepDbFlag).(bool), + KeysNumber: getFlagValue(ctx, KeysNumberFlag).(int64), + LogLevel: getFlagValue(ctx, logger.LogLevelFlag).(string), + MaxNumErrors: getFlagValue(ctx, MaxNumErrorsFlag).(int), + MaxNumTransactions: getFlagValue(ctx, MaxNumTransactionsFlag).(int), + MemoryBreakdown: getFlagValue(ctx, MemoryBreakdownFlag).(bool), + MemoryProfile: getFlagValue(ctx, MemoryProfileFlag).(string), + MicroProfiling: getFlagValue(ctx, MicroProfilingFlag).(bool), + NoHeartbeatLogging: getFlagValue(ctx, NoHeartbeatLoggingFlag).(bool), + NonceRange: getFlagValue(ctx, NonceRangeFlag).(int), + OnlySuccessful: getFlagValue(ctx, OnlySuccessfulFlag).(bool), + OperaBinary: getFlagValue(ctx, OperaBinaryFlag).(string), + OperaDb: getFlagValue(ctx, OperaDbFlag).(string), + Output: getFlagValue(ctx, OutputFlag).(string), + PrimeRandom: getFlagValue(ctx, RandomizePrimingFlag).(bool), + PrimeThreshold: getFlagValue(ctx, PrimeThresholdFlag).(int), + Profile: getFlagValue(ctx, ProfileFlag).(bool), + ProfileBlocks: getFlagValue(ctx, ProfileBlocksFlag).(bool), + ProfileDB: getFlagValue(ctx, ProfileDBFlag).(string), + ProfileDepth: getFlagValue(ctx, ProfileDepthFlag).(int), + ProfileEVMCall: getFlagValue(ctx, ProfileEVMCallFlag).(bool), + ProfileFile: getFlagValue(ctx, ProfileFileFlag).(string), + ProfileInterval: getFlagValue(ctx, ProfileIntervalFlag).(uint64), + ProfileSqlite3: getFlagValue(ctx, ProfileSqlite3Flag).(string), + ProfilingDbName: getFlagValue(ctx, ProfilingDbNameFlag).(string), + RandomSeed: getFlagValue(ctx, RandomSeedFlag).(int64), + RpcRecordingFile: getFlagValue(ctx, RpcRecordingFileFlag).(string), + ShadowDb: getFlagValue(ctx, ShadowDb).(bool), + ShadowImpl: getFlagValue(ctx, ShadowDbImplementationFlag).(string), + ShadowVariant: getFlagValue(ctx, ShadowDbVariantFlag).(string), + SkipMetadata: getFlagValue(ctx, flags.SkipMetadata).(bool), + SkipPriming: getFlagValue(ctx, SkipPrimingFlag).(bool), + SkipStateHashScrapping: getFlagValue(ctx, SkipStateHashScrappingFlag).(bool), + SnapshotDepth: getFlagValue(ctx, SnapshotDepthFlag).(int), + SourceTableName: getFlagValue(ctx, SourceTableNameFlag).(string), + SrcDbReadonly: false, + StateDbSrc: getFlagValue(ctx, StateDbSrcFlag).(string), + StateValidationMode: EqualityCheck, + SubstateDb: getFlagValue(ctx, substate.SubstateDbFlag).(string), + StochasticSimulationFile: getFlagValue(ctx, StochasticSimulationFileFlag).(string), + SyncPeriodLength: getFlagValue(ctx, SyncPeriodLengthFlag).(uint64), + TargetBlock: getFlagValue(ctx, TargetBlockFlag).(uint64), + TargetDb: getFlagValue(ctx, TargetDbFlag).(string), + TargetEpoch: getFlagValue(ctx, TargetEpochFlag).(uint64), + Trace: getFlagValue(ctx, TraceFlag).(bool), + TraceDirectory: getFlagValue(ctx, TraceDirectoryFlag).(string), + TraceFile: getFlagValue(ctx, TraceFileFlag).(string), + TrackProgress: getFlagValue(ctx, TrackProgressFlag).(bool), + TransactionLength: getFlagValue(ctx, TransactionLengthFlag).(uint64), + TrieRootHash: getFlagValue(ctx, TrieRootHashFlag).(string), + UpdateBufferSize: getFlagValue(ctx, UpdateBufferSizeFlag).(uint64), + UpdateDb: getFlagValue(ctx, UpdateDbFlag).(string), + UpdateOnFailure: getFlagValue(ctx, UpdateOnFailure).(bool), + UpdateType: getFlagValue(ctx, UpdateTypeFlag).(string), + Validate: getFlagValue(ctx, ValidateFlag).(bool), + ValidateStateHashes: getFlagValue(ctx, ValidateStateHashesFlag).(bool), + ValidateTxState: getFlagValue(ctx, ValidateTxStateFlag).(bool), + ValuesNumber: getFlagValue(ctx, ValuesNumberFlag).(int64), + VmImpl: getFlagValue(ctx, VmImplementation).(string), + Workers: getFlagValue(ctx, substate.WorkersFlag).(int), + WorldStateDb: getFlagValue(ctx, WorldStateFlag).(string), } return cfg diff --git a/utils/flags.go b/utils/flags.go index 686f8dccc..0ccf58614 100644 --- a/utils/flags.go +++ b/utils/flags.go @@ -154,6 +154,10 @@ var ( Name: "db-src", Usage: "sets the directory contains source state DB data", } + StochasticSimulationFileFlag = cli.PathFlag{ + Name: "simulation", + Usage: "sets the path to stochastic simulation file", + } DbTmpFlag = cli.PathFlag{ Name: "db-tmp", Usage: "sets the temporary directory where to place DB data; uses system default if empty",