diff --git a/hive_integration/nodocker/engine/node.nim b/hive_integration/nodocker/engine/node.nim index 43bc494329..46ef2a0e24 100644 --- a/hive_integration/nodocker/engine/node.nim +++ b/hive_integration/nodocker/engine/node.nim @@ -42,8 +42,6 @@ proc processBlock( ## implementations (but can be savely removed, as well.) ## variant of `processBlock()` where the `header` argument is explicitely set. template header: Header = blk.header - var dbTx = vmState.com.db.ctx.txFrameBegin() - defer: dbTx.dispose() let com = vmState.com if com.daoForkSupport and @@ -64,7 +62,7 @@ proc processBlock( discard com.db.persistUncles(blk.uncles) # EIP-3675: no reward for miner in POA/POS - if com.proofOfStake(header): + if com.proofOfStake(header, vmState.stateDB.txFrame): vmState.calculateReward(header, blk.uncles) vmState.mutateStateDB: @@ -75,10 +73,10 @@ proc processBlock( ok() -proc getVmState(c: ChainRef, header: Header): +proc getVmState(c: ChainRef, header: Header, txFrame: CoreDbTxRef): Result[BaseVMState, void] = let vmState = BaseVMState() - if not vmState.init(header, c.com, storeSlotHash = false): + if not vmState.init(header, c.com, txFrame, storeSlotHash = false): debug "Cannot initialise VmState", number = header.number return err() @@ -94,17 +92,17 @@ proc setBlock*(c: ChainRef; blk: Block): Result[void, string] = # Needed for figuring out whether KVT cleanup is due (see at the end) let - vmState = c.getVmState(header).valueOr: + vmState = c.getVmState(header, txFrame).valueOr: return err("no vmstate") ? vmState.processBlock(blk) - ? c.db.persistHeaderAndSetHead(header, c.com.startOfHistory) + ? txFrame.persistHeaderAndSetHead(header, c.com.startOfHistory) - c.db.persistTransactions(header.number, header.txRoot, blk.transactions) - c.db.persistReceipts(header.receiptsRoot, vmState.receipts) + txFrame.persistTransactions(header.number, header.txRoot, blk.transactions) + txFrame.persistReceipts(header.receiptsRoot, vmState.receipts) if blk.withdrawals.isSome: - c.db.persistWithdrawals(header.withdrawalsRoot.get, blk.withdrawals.get) + txFrame.persistWithdrawals(header.withdrawalsRoot.get, blk.withdrawals.get) # update currentBlock *after* we persist it # so the rpc return consistent result diff --git a/nimbus/beacon/api_handler/api_exchangeconf.nim b/nimbus/beacon/api_handler/api_exchangeconf.nim index 0b54bc50ab..9b1d871e8e 100644 --- a/nimbus/beacon/api_handler/api_exchangeconf.nim +++ b/nimbus/beacon/api_handler/api_exchangeconf.nim @@ -43,7 +43,7 @@ proc exchangeConf*(ben: BeaconEngineRef, terminalBlockHash = conf.terminalBlockHash if terminalBlockHash != default(Hash32): - let headerHash = db.getBlockHash(terminalBlockNumber).valueOr: + let headerHash = db.baseTxFrame().getBlockHash(terminalBlockNumber).valueOr: raise newException(ValueError, "cannot get terminal block hash, number $1, msg: $2" % [$terminalBlockNumber, error]) @@ -51,7 +51,7 @@ proc exchangeConf*(ben: BeaconEngineRef, raise newException(ValueError, "invalid terminal block hash, got $1 want $2" % [$terminalBlockHash, $headerHash]) - let header = db.getBlockHeader(headerHash).valueOr: + let header = db.baseTxFrame().getBlockHeader(headerHash).valueOr: raise newException(ValueError, "cannot get terminal block header, hash $1, msg: $2" % [$terminalBlockHash, error]) diff --git a/nimbus/beacon/api_handler/api_forkchoice.nim b/nimbus/beacon/api_handler/api_forkchoice.nim index d88ad4c950..1712f12665 100644 --- a/nimbus/beacon/api_handler/api_forkchoice.nim +++ b/nimbus/beacon/api_handler/api_forkchoice.nim @@ -76,7 +76,7 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef, ForkchoiceUpdatedResponse = let com = ben.com - db = com.db + db = com.db.baseTxFrame() # TODO forkedChain! chain = ben.chain blockHash = update.headBlockHash @@ -125,6 +125,7 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef, let blockNumber = header.number if header.difficulty > 0.u256 or blockNumber == 0'u64: let + # TODO this chould be forkedchain! td = db.getScore(blockHash) ptd = db.getScore(header.parentHash) ttd = com.ttd.get(high(UInt256)) diff --git a/nimbus/beacon/api_handler/api_newpayload.nim b/nimbus/beacon/api_handler/api_newpayload.nim index e06e73db3c..4ebd42238e 100644 --- a/nimbus/beacon/api_handler/api_newpayload.nim +++ b/nimbus/beacon/api_handler/api_newpayload.nim @@ -106,7 +106,7 @@ proc newPayload*(ben: BeaconEngineRef, let com = ben.com - db = com.db + db = com.db.baseTxFrame() # TODO this should be forkedchain! timestamp = ethTime payload.timestamp version = payload.version requestsHash = calcRequestsHash(executionRequests) @@ -185,7 +185,7 @@ proc newPayload*(ben: BeaconEngineRef, warn "State not available, ignoring new payload", hash = blockHash, number = header.number - let blockHash = latestValidHash(db, parent, ttd) + let blockHash = latestValidHash(com.db, parent, ttd) return acceptedStatus(blockHash) trace "Inserting block without sethead", @@ -195,10 +195,10 @@ proc newPayload*(ben: BeaconEngineRef, warn "Error importing block", number = header.number, hash = blockHash.short, - parent = header.parentHash.short, + parent = header.parentHash.short, error = vres.error() ben.setInvalidAncestor(header, blockHash) - let blockHash = latestValidHash(db, parent, ttd) + let blockHash = latestValidHash(com.db, parent, ttd) return invalidStatus(blockHash, vres.error()) info "New payload received and validated", diff --git a/nimbus/beacon/api_handler/api_utils.nim b/nimbus/beacon/api_handler/api_utils.nim index 5c1d2cef66..06d5808e13 100644 --- a/nimbus/beacon/api_handler/api_utils.nim +++ b/nimbus/beacon/api_handler/api_utils.nim @@ -177,7 +177,8 @@ proc latestValidHash*(db: CoreDbRef, ttd: DifficultyInt): Hash32 = if parent.isGenesis: return default(Hash32) - let ptd = db.getScore(parent.parentHash).valueOr(0.u256) + # TODO shouldn't this be in forkedchainref? + let ptd = db.baseTxFrame().getScore(parent.parentHash).valueOr(0.u256) if ptd >= ttd: parent.blockHash else: diff --git a/nimbus/common/common.nim b/nimbus/common/common.nim index 965e83dfc9..0902cd9559 100644 --- a/nimbus/common/common.nim +++ b/nimbus/common/common.nim @@ -125,10 +125,10 @@ func daoCheck(conf: ChainConfig) = conf.daoForkBlock = conf.homesteadBlock proc initializeDb(com: CommonRef) = - let kvt = com.db.ctx.getKvt() - proc contains(kvt: CoreDbKvtRef; key: openArray[byte]): bool = - kvt.hasKeyRc(key).expect "valid bool" - if canonicalHeadHashKey().toOpenArray notin kvt: + let txFrame = com.db.baseTxFrame() + proc contains(txFrame: CoreDbTxRef; key: openArray[byte]): bool = + txFrame.hasKeyRc(key).expect "valid bool" + if canonicalHeadHashKey().toOpenArray notin txFrame: info "Writing genesis to DB", blockHash = com.genesisHeader.rlpHash, stateRoot = com.genesisHeader.stateRoot, @@ -138,23 +138,23 @@ proc initializeDb(com: CommonRef) = nonce = com.genesisHeader.nonce doAssert(com.genesisHeader.number == 0.BlockNumber, "can't commit genesis block with number > 0") - com.db.persistHeaderAndSetHead(com.genesisHeader, + txFrame.persistHeaderAndSetHead(com.genesisHeader, startOfHistory=com.genesisHeader.parentHash). expect("can persist genesis header") - doAssert(canonicalHeadHashKey().toOpenArray in kvt) + doAssert(canonicalHeadHashKey().toOpenArray in txFrame) # The database must at least contain the base and head pointers - the base # is implicitly considered finalized let - baseNum = com.db.getSavedStateBlockNumber() - base = com.db.getBlockHeader(baseNum).valueOr: + baseNum = txFrame.getSavedStateBlockNumber() + base = txFrame.getBlockHeader(baseNum).valueOr: fatal "Cannot load base block header", baseNum, err = error quit 1 - finalized = com.db.finalizedHeader().valueOr: + finalized = txFrame.finalizedHeader().valueOr: debug "No finalized block stored in database, reverting to base" base - head = com.db.getCanonicalHead().valueOr: + head = txFrame.getCanonicalHead().valueOr: fatal "Cannot load canonical block header", err = error quit 1 @@ -196,10 +196,12 @@ proc init(com : CommonRef, time: Opt.some(genesis.timestamp) ) fork = toHardFork(com.forkTransitionTable, forkDeterminer) + txFrame = db.baseTxFrame() # Must not overwrite the global state on the single state DB - com.genesisHeader = db.getBlockHeader(0.BlockNumber).valueOr: - toGenesisHeader(genesis, fork, com.db) + + com.genesisHeader = txFrame.getBlockHeader(0.BlockNumber).valueOr: + toGenesisHeader(genesis, fork, txFrame) com.setForkId(com.genesisHeader) com.pos.timestamp = genesis.timestamp @@ -209,13 +211,13 @@ proc init(com : CommonRef, com.initializeDb() -proc isBlockAfterTtd(com: CommonRef, header: Header): bool = +proc isBlockAfterTtd(com: CommonRef, header: Header, txFrame: CoreDbTxRef): bool = if com.config.terminalTotalDifficulty.isNone: return false let ttd = com.config.terminalTotalDifficulty.get() - ptd = com.db.getScore(header.parentHash).valueOr: + ptd = txFrame.getScore(header.parentHash).valueOr: return false td = ptd + header.difficulty ptd >= ttd and td >= ttd @@ -325,7 +327,7 @@ func isCancunOrLater*(com: CommonRef, t: EthTime): bool = func isPragueOrLater*(com: CommonRef, t: EthTime): bool = com.config.pragueTime.isSome and t >= com.config.pragueTime.get -proc proofOfStake*(com: CommonRef, header: Header): bool = +proc proofOfStake*(com: CommonRef, header: Header, txFrame: CoreDbTxRef): bool = if com.config.posBlock.isSome: # see comments of posBlock in common/hardforks.nim header.number >= com.config.posBlock.get @@ -333,7 +335,7 @@ proc proofOfStake*(com: CommonRef, header: Header): bool = header.number >= com.config.mergeNetsplitBlock.get else: # This costly check is only executed from test suite - com.isBlockAfterTtd(header) + com.isBlockAfterTtd(header, txFrame) func depositContractAddress*(com: CommonRef): Address = com.config.depositContractAddress.get(default(Address)) diff --git a/nimbus/common/genesis.nim b/nimbus/common/genesis.nim index c551359226..834694b4a8 100644 --- a/nimbus/common/genesis.nim +++ b/nimbus/common/genesis.nim @@ -24,7 +24,7 @@ import proc toGenesisHeader*( g: Genesis; - db: CoreDbRef; + db: CoreDbTxRef; fork: HardFork; ): Header = ## Initialise block chain DB accounts derived from the `genesis.alloc` table @@ -81,16 +81,16 @@ proc toGenesisHeader*( proc toGenesisHeader*( genesis: Genesis; fork: HardFork; - db = CoreDbRef(nil)): Header = + db = CoreDbTxRef(nil)): Header = ## Generate the genesis block header from the `genesis` and `config` ## argument value. let - db = if db.isNil: AristoDbMemory.newCoreDbRef() else: db + db = if db.isNil: AristoDbMemory.newCoreDbRef().ctx.txFrameBegin(nil) else: db toGenesisHeader(genesis, db, fork) proc toGenesisHeader*( params: NetworkParams; - db = CoreDbRef(nil) + db = CoreDbTxRef(nil) ): Header = ## Generate the genesis block header from the `genesis` and `config` ## argument value. diff --git a/nimbus/core/chain/forked_chain.nim b/nimbus/core/chain/forked_chain.nim index bf0b9588c6..4fb342ae58 100644 --- a/nimbus/core/chain/forked_chain.nim +++ b/nimbus/core/chain/forked_chain.nim @@ -41,7 +41,7 @@ template shouldNotKeyError(info: string, body: untyped) = except KeyError as exc: raiseAssert info & ": name=" & $exc.name & " msg=" & exc.msg -proc deleteLineage(c: ForkedChainRef; top: Hash32) = +proc deleteLineage(c: ForkedChainRef; top: Hash32, commit: bool = false) = ## Starting at argument `top`, delete all entries from `c.blocks[]` along ## the ancestor chain. ## @@ -50,6 +50,12 @@ proc deleteLineage(c: ForkedChainRef; top: Hash32) = c.blocks.withValue(parent, val): let w = parent parent = val.blk.header.parentHash + # TODO when committing, blocks that refer to the committed frame need to + # update their parent field / get a new frame .. + if commit: + val[].txFrame.commit() + else: + val[].txFrame.dispose() c.blocks.del(w) continue break @@ -60,15 +66,16 @@ proc deleteLineage(c: ForkedChainRef; top: Hash32) = proc processBlock(c: ForkedChainRef, parent: Header, + txFrame: CoreDbTxRef, blk: Block): Result[seq[Receipt], string] = template header(): Header = blk.header let vmState = BaseVMState() - vmState.init(parent, header, c.com) + vmState.init(parent, header, c.com, txFrame) if c.extraValidation: - ?c.com.validateHeaderAndKinship(blk, vmState.parent) + ?c.com.validateHeaderAndKinship(blk, vmState.parent, txFrame) ?vmState.processBlock( blk, @@ -80,7 +87,7 @@ proc processBlock(c: ForkedChainRef, # We still need to write header to database # because validateUncles still need it let blockHash = header.blockHash() - ?c.db.persistHeader( + ?txFrame.persistHeader( blockHash, header, c.com.startOfHistory) @@ -120,6 +127,7 @@ func updateCursorHeads(c: ForkedChainRef, func updateCursor(c: ForkedChainRef, blk: Block, + txFrame: CoreDbTxRef, receipts: sink seq[Receipt]) = template header(): Header = blk.header @@ -134,25 +142,22 @@ func updateCursor(c: ForkedChainRef, # New block => update head c.blocks[c.cursorHash] = BlockDesc( blk: blk, + txFrame: txFrame, receipts: move(receipts)) c.updateCursorHeads(c.cursorHash, header) proc validateBlock(c: ForkedChainRef, parent: Header, - blk: Block, - updateCursor: bool = true): Result[void, string] = - let dbTx = c.db.ctx.txFrameBegin() - defer: - dbTx.dispose() + parentFrame: CoreDbTxRef, + blk: Block): Result[void, string] = + let txFrame = parentFrame.ctx.txFrameBegin(parentFrame) - var res = c.processBlock(parent, blk) + var res = c.processBlock(parent, txFrame, blk) if res.isErr: - dbTx.rollback() + txFrame.rollback() return err(res.error) - dbTx.commit() - if updateCursor: - c.updateCursor(blk, move(res.value)) + c.updateCursor(blk, txFrame, move(res.value)) let blkHash = blk.header.blockHash for i, tx in blk.transactions: @@ -160,48 +165,6 @@ proc validateBlock(c: ForkedChainRef, ok() -proc replaySegment*(c: ForkedChainRef, target: Hash32) = - # Replay from base+1 to target block - var - prevHash = target - chain = newSeq[Block]() - - shouldNotKeyError "replaySegment(target)": - while prevHash != c.baseHash: - chain.add c.blocks[prevHash].blk - prevHash = chain[^1].header.parentHash - - c.stagingTx.rollback() - c.stagingTx = c.db.ctx.txFrameBegin() - c.cursorHeader = c.baseHeader - for i in countdown(chain.high, chain.low): - c.validateBlock(c.cursorHeader, chain[i], - updateCursor = false).expect("have been validated before") - c.cursorHeader = chain[i].header - c.cursorHash = target - -proc replaySegment(c: ForkedChainRef, - target: Hash32, - parent: Header, - parentHash: Hash32) = - # Replay from parent+1 to target block - # with assumption last state is at parent - var - prevHash = target - chain = newSeq[Block]() - - shouldNotKeyError "replaySegment(target,parent)": - while prevHash != parentHash: - chain.add c.blocks[prevHash].blk - prevHash = chain[^1].header.parentHash - - c.cursorHeader = parent - for i in countdown(chain.high, chain.low): - c.validateBlock(c.cursorHeader, chain[i], - updateCursor = false).expect("have been validated before") - c.cursorHeader = chain[i].header - c.cursorHash = target - proc writeBaggage(c: ForkedChainRef, target: Hash32) = # Write baggage from base+1 to target block template header(): Header = @@ -211,12 +174,14 @@ proc writeBaggage(c: ForkedChainRef, target: Hash32) = var prevHash = target var count = 0'u64 while prevHash != c.baseHash: - let blk = c.blocks[prevHash] - c.db.persistTransactions(header.number, header.txRoot, blk.blk.transactions) - c.db.persistReceipts(header.receiptsRoot, blk.receipts) - discard c.db.persistUncles(blk.blk.uncles) + let blk = c.blocks[prevHash] + # TODO this is a bit late to be writing the transactions etc into the frame + # since there are probably frames built on top already ... + blk.txFrame.persistTransactions(header.number, header.txRoot, blk.blk.transactions) + blk.txFrame.persistReceipts(header.receiptsRoot, blk.receipts) + discard blk.txFrame.persistUncles(blk.blk.uncles) if blk.blk.withdrawals.isSome: - c.db.persistWithdrawals( + blk.txFrame.persistWithdrawals( header.withdrawalsRoot.expect("WithdrawalsRoot should be verified before"), blk.blk.withdrawals.get) for tx in blk.blk.transactions: @@ -230,7 +195,7 @@ proc writeBaggage(c: ForkedChainRef, target: Hash32) = baseNumber = c.baseHeader.number, baseHash = c.baseHash.short -func updateBase(c: ForkedChainRef, pvarc: PivotArc) = +proc updateBase(c: ForkedChainRef, pvarc: PivotArc) = ## Remove obsolete chains, example: ## ## A1 - A2 - A3 D5 - D6 @@ -264,7 +229,7 @@ func updateBase(c: ForkedChainRef, pvarc: PivotArc) = # Cleanup in-memory blocks starting from newBase backward # while blocks from newBase+1 to canonicalCursor not deleted # e.g. B4 onward - c.deleteLineage pvarc.pvHash + c.deleteLineage(pvarc.pvHash, true) # Implied deletion of chain heads (if any) c.cursorHeads.swap newCursorHeads @@ -423,7 +388,7 @@ proc setHead(c: ForkedChainRef, pvarc: PivotArc) = # TODO: db.setHead should not read from db anymore # all canonical chain marking # should be done from here. - discard c.db.setHead(pvarc.pvHash) + # discard c.db.setHead(pvarc.pvHash) # update global syncHighest c.com.syncHighest = pvarc.pvNumber @@ -431,21 +396,12 @@ proc setHead(c: ForkedChainRef, pvarc: PivotArc) = proc updateHeadIfNecessary(c: ForkedChainRef, pvarc: PivotArc) = # update head if the new head is different # from current head or current chain - if c.cursorHash != pvarc.cursor.hash: - if not c.stagingTx.isNil: - c.stagingTx.rollback() - c.stagingTx = c.db.ctx.txFrameBegin() - c.replaySegment(pvarc.pvHash) c.trimCursorArc(pvarc) if c.cursorHash != pvarc.pvHash: c.cursorHeader = pvarc.pvHeader c.cursorHash = pvarc.pvHash - if c.stagingTx.isNil: - # setHead below don't go straight to db - c.stagingTx = c.db.ctx.txFrameBegin() - c.setHead(pvarc) # ------------------------------------------------------------------------------ @@ -471,16 +427,17 @@ proc init*( ## `persistentBlocks()` used for `Era1` or `Era` import. ## let - base = com.db.getSavedStateBlockNumber - baseHash = com.db.getBlockHash(base).expect("baseHash exists") - baseHeader = com.db.getBlockHeader(baseHash).expect("base header exists") + baseTxFrame = com.db.baseTxFrame() + base = baseTxFrame.getSavedStateBlockNumber + baseHash = baseTxFrame.getBlockHash(base).expect("baseHash exists") + baseHeader = baseTxFrame.getBlockHeader(baseHash).expect("base header exists") # update global syncStart com.syncStart = baseHeader.number T(com: com, - db: com.db, baseHeader: baseHeader, + baseTxFrame: baseTxFrame, cursorHash: baseHash, baseHash: baseHash, cursorHeader: baseHeader, @@ -494,10 +451,12 @@ proc newForkedChain*(com: CommonRef, ## This constructor allows to set up the base state which might be needed ## for some particular test or other applications. Otherwise consider ## `init()`. - let baseHash = baseHeader.blockHash + let + baseHash = baseHeader.blockHash + baseTxFrame = com.db.baseTxFrame() let chain = ForkedChainRef( com: com, - db : com.db, + baseTxFrame : baseTxFrame, baseHeader : baseHeader, cursorHash : baseHash, baseHash : baseHash, @@ -512,21 +471,21 @@ proc newForkedChain*(com: CommonRef, proc importBlock*(c: ForkedChainRef, blk: Block): Result[void, string] = # Try to import block to canonical or side chain. # return error if the block is invalid - if c.stagingTx.isNil: - c.stagingTx = c.db.ctx.txFrameBegin() - template header(): Header = blk.header - if header.parentHash == c.cursorHash: - return c.validateBlock(c.cursorHeader, blk) - if header.parentHash == c.baseHash: - c.stagingTx.rollback() - c.stagingTx = c.db.ctx.txFrameBegin() - return c.validateBlock(c.baseHeader, blk) + return c.validateBlock(c.baseHeader, c.baseTxFrame, blk) + + c.blocks.withValue(header.parentHash, bd) do: + # TODO: If engine API keep importing blocks + # but not finalized it, e.g. current chain length > StagedBlocksThreshold + # We need to persist some of the in-memory stuff + # to a "staging area" or disk-backed memory but it must not afect `base`. + # `base` is the point of no return, we only update it on finality. - if header.parentHash notin c.blocks: + ? c.validateBlock(bd.blk.header, bd.txFrame, blk) + do: # If it's parent is an invalid block # there is no hope the descendant is valid debug "Parent block not found", @@ -534,14 +493,7 @@ proc importBlock*(c: ForkedChainRef, blk: Block): Result[void, string] = parentHash = header.parentHash.short return err("Block is not part of valid chain") - # TODO: If engine API keep importing blocks - # but not finalized it, e.g. current chain length > StagedBlocksThreshold - # We need to persist some of the in-memory stuff - # to a "staging area" or disk-backed memory but it must not afect `base`. - # `base` is the point of no return, we only update it on finality. - - c.replaySegment(header.parentHash) - c.validateBlock(c.cursorHeader, blk) + ok() proc forkChoice*(c: ForkedChainRef, headHash: Hash32, @@ -573,25 +525,15 @@ proc forkChoice*(c: ForkedChainRef, # At this point cursorHeader.number > baseHeader.number if newBase.pvHash == c.cursorHash: - # Paranoid check, guaranteed by `newBase.hash == c.cursorHash` - doAssert(not c.stagingTx.isNil) - - # CL decide to move backward and then forward? - if c.cursorHeader.number < pvarc.pvNumber: - c.replaySegment(pvarc.pvHash, c.cursorHeader, c.cursorHash) - # Current segment is canonical chain c.writeBaggage(newBase.pvHash) c.setHead(pvarc) - c.stagingTx.commit() - c.stagingTx = nil - # Move base to newBase c.updateBase(newBase) # Save and record the block number before the last saved block state. - c.db.persistent(newBase.pvNumber).isOkOr: + c.com.db.persistent(newBase.pvNumber).isOkOr: return err("Failed to save state: " & $$error) return ok() @@ -602,28 +544,13 @@ proc forkChoice*(c: ForkedChainRef, doAssert(newBase.pvNumber <= finalizedHeader.number) # Write segment from base+1 to newBase into database - c.stagingTx.rollback() - c.stagingTx = c.db.ctx.txFrameBegin() - if newBase.pvNumber > c.baseHeader.number: - c.replaySegment(newBase.pvHash) c.writeBaggage(newBase.pvHash) - c.stagingTx.commit() - c.stagingTx = nil # Update base forward to newBase c.updateBase(newBase) - c.db.persistent(newBase.pvNumber).isOkOr: + c.com.db.persistent(newBase.pvNumber).isOkOr: return err("Failed to save state: " & $$error) - if c.stagingTx.isNil: - # replaySegment or setHead below don't - # go straight to db - c.stagingTx = c.db.ctx.txFrameBegin() - - # Move chain state forward to current head - if newBase.pvNumber < pvarc.pvNumber: - c.replaySegment(pvarc.pvHash) - c.setHead(pvarc) # Move cursor to current head @@ -646,11 +573,19 @@ proc haveBlockLocally*(c: ForkedChainRef, blockHash: Hash32): bool = return true if c.baseHash == blockHash: return true - c.db.headerExists(blockHash) + c.baseTxFrame.headerExists(blockHash) + +func txFrame*(c: ForkedChainRef, blockHash: Hash32): CoreDbTxRef = + if blockHash == c.baseHash: + return c.baseTxFrame + + c.blocks.withValue(blockHash, bd) do: + return bd[].txFrame + + c.baseTxFrame -func stateReady*(c: ForkedChainRef, header: Header): bool = - let blockHash = header.blockHash - blockHash == c.cursorHash +func txFrame*(c: ForkedChainRef, header: Header): CoreDbTxRef = + c.txFrame(header.blockHash()) func com*(c: ForkedChainRef): CommonRef = c.com @@ -691,7 +626,7 @@ func memoryTransaction*(c: ForkedChainRef, txHash: Hash32): Opt[Transaction] = proc latestBlock*(c: ForkedChainRef): Block = c.blocks.withValue(c.cursorHash, val) do: return val.blk - c.db.getEthBlock(c.cursorHash).expect("cursorBlock exists") + c.baseTxFrame.getEthBlock(c.cursorHash).expect("cursorBlock exists") proc headerByNumber*(c: ForkedChainRef, number: BlockNumber): Result[Header, string] = if number > c.cursorHeader.number: @@ -704,7 +639,7 @@ proc headerByNumber*(c: ForkedChainRef, number: BlockNumber): Result[Header, str return ok(c.baseHeader) if number < c.baseHeader.number: - return c.db.getBlockHeader(number) + return c.baseTxFrame.getBlockHeader(number) shouldNotKeyError "headerByNumber": var prevHash = c.cursorHeader.parentHash @@ -722,7 +657,7 @@ proc headerByHash*(c: ForkedChainRef, blockHash: Hash32): Result[Header, string] do: if c.baseHash == blockHash: return ok(c.baseHeader) - return c.db.getBlockHeader(blockHash) + return c.baseTxFrame.getBlockHeader(blockHash) proc blockByHash*(c: ForkedChainRef, blockHash: Hash32): Result[Block, string] = # used by getPayloadBodiesByHash @@ -731,14 +666,14 @@ proc blockByHash*(c: ForkedChainRef, blockHash: Hash32): Result[Block, string] = c.blocks.withValue(blockHash, val) do: return ok(val.blk) do: - return c.db.getEthBlock(blockHash) + return c.baseTxFrame.getEthBlock(blockHash) proc blockByNumber*(c: ForkedChainRef, number: BlockNumber): Result[Block, string] = if number > c.cursorHeader.number: return err("Requested block number not exists: " & $number) if number < c.baseHeader.number: - return c.db.getEthBlock(number) + return c.baseTxFrame.getEthBlock(number) shouldNotKeyError "blockByNumber": var prevHash = c.cursorHash @@ -793,6 +728,6 @@ proc isCanonicalAncestor*(c: ForkedChainRef, # canonical chain in database should have a marker # and the marker is block number - let canonHash = c.db.getBlockHash(blockNumber).valueOr: + let canonHash = c.baseTxFrame.getBlockHash(blockNumber).valueOr: return false canonHash == blockHash diff --git a/nimbus/core/chain/forked_chain/chain_desc.nim b/nimbus/core/chain/forked_chain/chain_desc.nim index 3689fce401..2c27b2ab4c 100644 --- a/nimbus/core/chain/forked_chain/chain_desc.nim +++ b/nimbus/core/chain/forked_chain/chain_desc.nim @@ -22,6 +22,7 @@ type BlockDesc* = object blk*: Block + txFrame*: CoreDbTxRef receipts*: seq[Receipt] PivotArc* = object @@ -30,13 +31,15 @@ type cursor*: CursorDesc ## Cursor arc containing `pv` item ForkedChainRef* = ref object - stagingTx*: CoreDbTxRef - db*: CoreDbRef com*: CommonRef blocks*: Table[Hash32, BlockDesc] txRecords: Table[Hash32, (Hash32, uint64)] baseHash*: Hash32 baseHeader*: Header + baseTxFrame*: CoreDbTxRef + # Frame that skips all in-memory state that ForkecChain holds - used to + # lookup items straight from the database + cursorHash*: Hash32 cursorHeader*: Header cursorHeads*: seq[CursorDesc] diff --git a/nimbus/core/chain/forked_chain/chain_kvt.nim b/nimbus/core/chain/forked_chain/chain_kvt.nim index 07c4e2fa4f..adc139d887 100644 --- a/nimbus/core/chain/forked_chain/chain_kvt.nim +++ b/nimbus/core/chain/forked_chain/chain_kvt.nim @@ -26,47 +26,51 @@ import proc fcKvtAvailable*(c: ForkedChainRef): bool = ## Returns `true` if `kvt` data can be saved persistently. - c.db.txFrameLevel() == 0 + false # TODO c.db.txFrameLevel() == 0 proc fcKvtPersistent*(c: ForkedChainRef): bool = ## Save cached `kvt` data if possible. This function has the side effect ## that it saves all cached db data including `Aristo` data (although there ## should not be any.) ## - if c.fcKvtAvailable(): - c.db.persistent(c.db.getSavedStateBlockNumber()).isOkOr: - raiseAssert "fcKvtPersistent: persistent() failed: " & $$error - return true + # if c.fcKvtAvailable(): + # c.db.persistent(c.db.getSavedStateBlockNumber()).isOkOr: + # raiseAssert "fcKvtPersistent: persistent() failed: " & $$error + # return true + discard # TODO proc fcKvtHasKey*(c: ForkedChainRef, key: openArray[byte]): bool = ## Check whether the argument `key` exists on the `kvt` table (i.e. `get()` ## would succeed.) ## - c.db.ctx.getKvt().hasKey(key) + # c.db.ctx.getKvt().hasKey(key) + discard # TODO proc fcKvtGet*(c: ForkedChainRef, key: openArray[byte]): Opt[seq[byte]] = ## Fetch data entry from `kvt` table. ## - var w = c.db.ctx.getKvt().get(key).valueOr: - return err() - ok(move w) + # var w = c.db.ctx.getKvt().get(key).valueOr: + # return err() + # ok(move w) + discard # TODO proc fcKvtPut*(c: ForkedChainRef, key, data: openArray[byte]): bool = ## Cache data on the `kvt` table marked for saving persistently. If the `kvt` ## table is unavailable, this function does nothing and returns `false`. ## - if c.fcKvtAvailable(): - c.db.ctx.getKvt().put(key, data).isOkOr: - raiseAssert "fcKvtPut: put() failed: " & $$error - return true + # if c.fcKvtAvailable(): + # c.db.ctx.getKvt().put(key, data).isOkOr: + # raiseAssert "fcKvtPut: put() failed: " & $$error + # return true + discard # TODO proc fcKvtDel*(c: ForkedChainRef, key: openArray[byte]): bool = ## Cache key for deletion on the `kvt` table. If the `kvt` table is ## unavailable, this function does nothing and returns `false`. ## - if c.fcKvtAvailable(): - c.db.ctx.getKvt().del(key).isOkOr: - raiseAssert "fcKvtDel: del() failed: " & $$error - return true - + # if c.fcKvtAvailable(): + # c.db.ctx.getKvt().del(key).isOkOr: + # raiseAssert "fcKvtDel: del() failed: " & $$error + # return true + discard # TODO # End diff --git a/nimbus/core/chain/persist_blocks.nim b/nimbus/core/chain/persist_blocks.nim index 8d1d231140..ced1108960 100644 --- a/nimbus/core/chain/persist_blocks.nim +++ b/nimbus/core/chain/persist_blocks.nim @@ -46,7 +46,6 @@ type flags: PersistBlockFlags vmState: BaseVMState - dbTx: CoreDbTxRef stats*: PersistStats parent: Header @@ -64,29 +63,29 @@ proc getVmState( ): Result[BaseVMState, string] = if p.vmState == nil: let vmState = BaseVMState() - if not vmState.init(header, p.c.com, storeSlotHash = storeSlotHash): + if not vmState.init( + header, p.c.com, p.c.db.baseTxFrame(), storeSlotHash = storeSlotHash + ): return err("Could not initialise VMState") p.vmState = vmState else: if header.number != p.parent.number + 1: return err("Only linear histories supported by Persister") - if not p.vmState.reinit(p.parent, header, linear = true): + if not p.vmState.reinit(p.parent, header): return err("Could not update VMState for new block") ok(p.vmState) proc dispose*(p: var Persister) = - if p.dbTx != nil: - p.dbTx.dispose() - p.dbTx = nil + p.c.db.baseTxFrame().rollback() proc init*(T: type Persister, c: ChainRef, flags: PersistBlockFlags): T = T(c: c, flags: flags) proc checkpoint*(p: var Persister): Result[void, string] = if NoValidation notin p.flags: - let stateRoot = p.c.db.ctx.getAccounts().getStateRoot().valueOr: + let stateRoot = p.c.db.baseTxFrame().getStateRoot().valueOr: return err($$error) if p.parent.stateRoot != stateRoot: @@ -101,10 +100,6 @@ proc checkpoint*(p: var Persister): Result[void, string] = "stateRoot mismatch, expect: " & $p.parent.stateRoot & ", got: " & $stateRoot ) - if p.dbTx != nil: - p.dbTx.commit() - p.dbTx = nil - # Save and record the block number before the last saved block state. p.c.db.persistent(p.parent.number).isOkOr: return err("Failed to save state: " & $$error) @@ -117,9 +112,6 @@ proc persistBlock*(p: var Persister, blk: Block): Result[void, string] = let c = p.c - if p.dbTx == nil: - p.dbTx = p.c.db.ctx.txFrameBegin() - # Full validation means validating the state root at every block and # performing the more expensive hash computations on the block itself, ie # verifying that the transaction and receipts roots are valid - when not @@ -139,6 +131,7 @@ proc persistBlock*(p: var Persister, blk: Block): Result[void, string] = let skipValidation = NoValidation in p.flags vmState = ?p.getVmState(header, storeSlotHash = NoPersistSlotHashes notin p.flags) + txFrame = vmState.stateDB.txFrame # TODO even if we're skipping validation, we should perform basic sanity # checks on the block and header - that fields are sanely set for the @@ -146,7 +139,7 @@ proc persistBlock*(p: var Persister, blk: Block): Result[void, string] = # sanity checks should be performed early in the processing pipeline no # matter their provenance. if not skipValidation: - ?c.com.validateHeaderAndKinship(blk, vmState.parent) + ?c.com.validateHeaderAndKinship(blk, vmState.parent, txFrame) # Generate receipts for storage or validation but skip them otherwise ?vmState.processBlock( @@ -159,16 +152,16 @@ proc persistBlock*(p: var Persister, blk: Block): Result[void, string] = if NoPersistHeader notin p.flags: let blockHash = header.blockHash() - ?c.db.persistHeaderAndSetHead(blockHash, header, c.com.startOfHistory) + ?txFrame.persistHeaderAndSetHead(blockHash, header, c.com.startOfHistory) if NoPersistTransactions notin p.flags: - c.db.persistTransactions(header.number, header.txRoot, blk.transactions) + txFrame.persistTransactions(header.number, header.txRoot, blk.transactions) if NoPersistReceipts notin p.flags: - c.db.persistReceipts(header.receiptsRoot, vmState.receipts) + txFrame.persistReceipts(header.receiptsRoot, vmState.receipts) if NoPersistWithdrawals notin p.flags and blk.withdrawals.isSome: - c.db.persistWithdrawals( + txFrame.persistWithdrawals( header.withdrawalsRoot.expect("WithdrawalsRoot should be verified before"), blk.withdrawals.get, ) diff --git a/nimbus/core/executor/process_block.nim b/nimbus/core/executor/process_block.nim index 262b40a159..141bd8f227 100644 --- a/nimbus/core/executor/process_block.nim +++ b/nimbus/core/executor/process_block.nim @@ -173,7 +173,7 @@ proc procBlkPreamble( # TODO It's strange that we persist uncles before processing block but the # rest after... if not skipUncles: - let h = vmState.com.db.persistUncles(blk.uncles) + let h = vmState.stateDB.txFrame.persistUncles(blk.uncles) if h != header.ommersHash: return err("ommersHash mismatch") elif not skipValidation and rlpHash(blk.uncles) != header.ommersHash: @@ -277,7 +277,7 @@ proc processBlock*( ?vmState.procBlkPreamble(blk, skipValidation, skipReceipts, skipUncles, taskpool) # EIP-3675: no reward for miner in POA/POS - if not vmState.com.proofOfStake(blk.header): + if not vmState.com.proofOfStake(blk.header, vmState.stateDB.txFrame): vmState.calculateReward(blk.header, blk.uncles) ?vmState.procBlkEpilogue(blk, skipValidation, skipReceipts) diff --git a/nimbus/core/tx_pool/tx_desc.nim b/nimbus/core/tx_pool/tx_desc.nim index 501d09a91d..71da57193d 100644 --- a/nimbus/core/tx_pool/tx_desc.nim +++ b/nimbus/core/tx_pool/tx_desc.nim @@ -110,7 +110,7 @@ proc gasLimitsGet(com: CommonRef; parent: Header): GasInt = gasFloor = com.gasLimit, gasCeil = com.gasLimit) -proc setupVMState(com: CommonRef; parent: Header): BaseVMState = +proc setupVMState(com: CommonRef; parent: Header, parentFrame: CoreDbTxRef): BaseVMState = # do hardfork transition before # BaseVMState querying any hardfork/consensus from CommonRef @@ -130,10 +130,12 @@ proc setupVMState(com: CommonRef; parent: Header): BaseVMState = BaseVMState.new( parent = parent, blockCtx = blockCtx, - com = com) + com = com, + txFrame = com.db.ctx.txFrameBegin(parentFrame) + ) proc update(xp: TxPoolRef; parent: Header) = - xp.vmState = setupVMState(xp.vmState.com, parent) + xp.vmState = setupVMState(xp.vmState.com, parent, xp.chain.txFrame(parent)) # ------------------------------------------------------------------------------ # Public functions, constructor @@ -144,7 +146,7 @@ proc init*(xp: TxPoolRef; chain: ForkedChainRef) = xp.startDate = getTime().utc.toTime let head = chain.latestHeader - xp.vmState = setupVMState(chain.com, head) + xp.vmState = setupVMState(chain.com, head, chain.txFrame(head)) xp.txDB = TxTabsRef.new xp.lifeTime = txItemLifeTime diff --git a/nimbus/core/tx_pool/tx_packer.nim b/nimbus/core/tx_pool/tx_packer.nim index 46ab9ce5bc..321e1fdc1d 100644 --- a/nimbus/core/tx_pool/tx_packer.nim +++ b/nimbus/core/tx_pool/tx_packer.nim @@ -296,7 +296,7 @@ proc packerVmExec*(xp: TxPoolRef): Result[TxPacker, string] ## Rebuild `packed` bucket by selection items from the `staged` bucket ## after executing them in the VM. let db = xp.vmState.com.db - let dbTx = db.ctx.txFrameBegin() + let dbTx = db.ctx.txFrameBegin(nil) # TODO use the correct parent frame here! defer: dbTx.dispose() var pst = xp.vmExecInit.valueOr: diff --git a/nimbus/core/validate.nim b/nimbus/core/validate.nim index 32e485ec5c..eae40c565f 100644 --- a/nimbus/core/validate.nim +++ b/nimbus/core/validate.nim @@ -41,6 +41,7 @@ proc validateHeader( com: CommonRef; blk: Block; parentHeader: Header; + txFrame: CoreDbTxRef; ): Result[void,string] = template header: Header = blk.header # TODO this code is used for validating uncles also, though these get passed @@ -76,7 +77,7 @@ proc validateHeader( if header.extraData != daoForkBlockExtraData: return err("header extra data should be marked DAO") - if com.proofOfStake(header): + if com.proofOfStake(header, txFrame): # EIP-4399 and EIP-3675 # no need to check mixHash because EIP-4399 override this field # checking rule @@ -100,7 +101,7 @@ proc validateHeader( ok() -proc validateUncles(com: CommonRef; header: Header; +proc validateUncles(com: CommonRef; header: Header; txFrame: CoreDbTxRef, uncles: openArray[Header]): Result[void,string] {.gcsafe, raises: [].} = let hasUncles = uncles.len > 0 @@ -125,9 +126,8 @@ proc validateUncles(com: CommonRef; header: Header; uncleSet.incl uncleHash let - chainDB = com.db - recentAncestorHashes = ?chainDB.getAncestorsHashes(MAX_UNCLE_DEPTH + 1, header) - recentUncleHashes = ?chainDB.getUncleHashes(recentAncestorHashes) + recentAncestorHashes = ?txFrame.getAncestorsHashes(MAX_UNCLE_DEPTH + 1, header) + recentUncleHashes = ?txFrame.getUncleHashes(recentAncestorHashes) blockHash = header.blockHash for uncle in uncles: @@ -154,13 +154,13 @@ proc validateUncles(com: CommonRef; header: Header; return err("uncle block number larger than current block number") # check uncle against own parent - let parent = ?chainDB.getBlockHeader(uncle.parentHash) + let parent = ?txFrame.getBlockHeader(uncle.parentHash) if uncle.timestamp <= parent.timestamp: return err("Uncle's parent must me older") - let uncleParent = ?chainDB.getBlockHeader(uncle.parentHash) + let uncleParent = ?txFrame.getBlockHeader(uncle.parentHash) ? com.validateHeader( - Block.init(uncle, BlockBody()), uncleParent) + Block.init(uncle, BlockBody()), uncleParent, txFrame) ok() @@ -364,6 +364,7 @@ proc validateHeaderAndKinship*( com: CommonRef; blk: Block; parent: Header; + txFrame: CoreDbTxRef ): Result[void, string] {.gcsafe, raises: [].} = template header: Header = blk.header @@ -373,13 +374,13 @@ proc validateHeaderAndKinship*( return err("Header.extraData larger than 32 bytes") return ok() - ? com.validateHeader(blk, parent) + ? com.validateHeader(blk, parent, txFrame) if blk.uncles.len > MAX_UNCLES: return err("Number of uncles exceed limit.") - if not com.proofOfStake(header): - ? com.validateUncles(header, blk.uncles) + if not com.proofOfStake(header, txFrame): + ? com.validateUncles(header, txFrame, blk.uncles) ok() diff --git a/nimbus/db/aristo/aristo_api.nim b/nimbus/db/aristo/aristo_api.nim index d81c835716..52f4147dcc 100644 --- a/nimbus/db/aristo/aristo_api.nim +++ b/nimbus/db/aristo/aristo_api.nim @@ -49,7 +49,7 @@ type ## was any. AristoApiDeleteAccountRecordFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; ): Result[void,AristoError] {.noRaise.} @@ -58,7 +58,7 @@ type ## as well. AristoApiDeleteStorageDataFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[bool,AristoError] @@ -71,7 +71,7 @@ type ## case only the function will return `true`. AristoApiDeleteStorageTreeFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; ): Result[void,AristoError] {.noRaise.} @@ -79,7 +79,7 @@ type ## associated to the account argument `accPath`. AristoApiFetchLastSavedStateFn* = - proc(db: AristoDbRef + proc(db: AristoTxRef ): Result[SavedState,AristoError] {.noRaise.} ## The function returns the state of the last saved state. This is a @@ -87,20 +87,20 @@ type ## (may be interpreted as block number.) AristoApiFetchAccountRecordFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; ): Result[AristoAccount,AristoError] {.noRaise.} ## Fetch an account record from the database indexed by `accPath`. AristoApiFetchStateRootFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; ): Result[Hash32,AristoError] {.noRaise.} ## Fetch the Merkle hash of the account root. AristoApiFetchStorageDataFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[UInt256,AristoError] @@ -109,7 +109,7 @@ type ## record from the database indexed by `stoPath`. AristoApiFetchStorageRootFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; ): Result[Hash32,AristoError] {.noRaise.} @@ -130,7 +130,7 @@ type ## This distructor may be used on already *destructed* descriptors. AristoApiForgetFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; ): Result[void,AristoError] {.noRaise.} ## Destruct the non centre argument `db` descriptor (see comments on @@ -140,14 +140,14 @@ type ## also# comments on `fork()`.) AristoApiHashifyFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; ): Result[void,(VertexID,AristoError)] {.noRaise.} ## Add keys to the `Patricia Trie` so that it becomes a `Merkle ## Patricia Tree`. AristoApiHasPathAccountFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; ): Result[bool,AristoError] {.noRaise.} @@ -155,7 +155,7 @@ type ## exists on the database. AristoApiHasPathStorageFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[bool,AristoError] @@ -164,29 +164,15 @@ type ## data record indexed by `stoPath` exists on the database. AristoApiHasStorageDataFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; ): Result[bool,AristoError] {.noRaise.} ## For a storage tree related to account `accPath`, query whether there ## is a non-empty data storage area at all. - AristoApiIsTopFn* = - proc(tx: AristoTxRef; - ): bool - {.noRaise.} - ## Getter, returns `true` if the argument `tx` referes to the current - ## top level transaction. - - AristoApiTxFrameLevelFn* = - proc(db: AristoDbRef; - ): int - {.noRaise.} - ## Getter, non-negative nesting level (i.e. number of pending - ## transactions) - AristoApiMergeAccountRecordFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; accRec: AristoAccount; ): Result[bool,AristoError] @@ -199,7 +185,7 @@ type ## `false` otherwise. AristoApiMergeStorageDataFn* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; stoPath: Hash32; stoData: UInt256; @@ -210,7 +196,7 @@ type ## and `stoPath` is the slot path of the corresponding storage area. AristoApiPartAccountTwig* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; ): Result[(seq[seq[byte]],bool), AristoError] {.noRaise.} @@ -222,7 +208,7 @@ type ## Errors will only be returned for invalid paths. AristoApiPartStorageTwig* = - proc(db: AristoDbRef; + proc(db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[(seq[seq[byte]],bool), AristoError] @@ -285,24 +271,23 @@ type ## is returned if there was any. AristoApiTxFrameBeginFn* = - proc(db: AristoDbRef; + proc(db: AristoDbRef; parent: AristoTxRef ): Result[AristoTxRef,AristoError] {.noRaise.} ## Starts a new transaction. ## ## Example: ## :: - ## proc doSomething(db: AristoDbRef) = + ## proc doSomething(db: AristoTxRef) = ## let tx = db.begin ## defer: tx.rollback() ## ... continue using db ... ## tx.commit() - AristoApiTxFrameTopFn* = + AristoApiBaseTxFrameFn* = proc(db: AristoDbRef; - ): Result[AristoTxRef,AristoError] + ): AristoTxRef {.noRaise.} - ## Getter, returns top level transaction if there is any. AristoApiRef* = ref AristoApiObj AristoApiObj* = object of RootObj @@ -325,9 +310,6 @@ type hasPathStorage*: AristoApiHasPathStorageFn hasStorageData*: AristoApiHasStorageDataFn - isTop*: AristoApiIsTopFn - txFrameLevel*: AristoApiTxFrameLevelFn - mergeAccountRecord*: AristoApiMergeAccountRecordFn mergeStorageData*: AristoApiMergeStorageDataFn @@ -340,7 +322,7 @@ type persist*: AristoApiPersistFn rollback*: AristoApiRollbackFn txFrameBegin*: AristoApiTxFrameBeginFn - txFrameTop*: AristoApiTxFrameTopFn + baseTxFrame*: AristoApiBaseTxFrameFn AristoApiProfNames* = enum @@ -365,9 +347,6 @@ type AristoApiProfHasPathStorageFn = "hasPathStorage" AristoApiProfHasStorageDataFn = "hasStorageData" - AristoApiProfIsTopFn = "isTop" - AristoApiProfLevelFn = "level" - AristoApiProfMergeAccountRecordFn = "mergeAccountRecord" AristoApiProfMergeStorageDataFn = "mergeStorageData" @@ -380,7 +359,7 @@ type AristoApiProfPersistFn = "persist" AristoApiProfRollbackFn = "rollback" AristoApiProfTxFrameBeginFn = "txFrameBegin" - AristoApiProfTxFrameTopFn = "txFrameTop" + AristoApiProfBaseTxFrameFn = "baseTxFrame" AristoApiProfBeGetVtxFn = "be/getVtx" AristoApiProfBeGetKeyFn = "be/getKey" @@ -449,9 +428,6 @@ func init*(api: var AristoApiObj) = api.hasPathStorage = hasPathStorage api.hasStorageData = hasStorageData - api.isTop = isTop - api.txFrameLevel = txFrameLevel - api.mergeAccountRecord = mergeAccountRecord api.mergeStorageData = mergeStorageData @@ -464,7 +440,8 @@ func init*(api: var AristoApiObj) = api.persist = persist api.rollback = rollback api.txFrameBegin = txFrameBegin - api.txFrameTop = txFrameTop + api.baseTxFrame = baseTxFrame + when AutoValidateApiHooks: api.validate @@ -490,7 +467,7 @@ func init*( ## This constructor creates a profiling API descriptor to be derived from ## an initialised `api` argument descriptor. For profiling the DB backend, ## the field `.be` of the result descriptor must be assigned to the - ## `.backend` field of the `AristoDbRef` descriptor. + ## `.backend` field of the `AristoTxRef` descriptor. ## ## The argument desctiptors `api` and `be` will not be modified and can be ## used to restore the previous set up. @@ -511,92 +488,82 @@ func init*( result = api.commit(a) profApi.deleteAccountRecord = - proc(a: AristoDbRef; b: Hash32): auto = + proc(a: AristoTxRef; b: Hash32): auto = AristoApiProfDeleteAccountRecordFn.profileRunner: result = api.deleteAccountRecord(a, b) profApi.deleteStorageData = - proc(a: AristoDbRef; b: Hash32, c: Hash32): auto = + proc(a: AristoTxRef; b: Hash32, c: Hash32): auto = AristoApiProfDeleteStorageDataFn.profileRunner: result = api.deleteStorageData(a, b, c) profApi.deleteStorageTree = - proc(a: AristoDbRef; b: Hash32): auto = + proc(a: AristoTxRef; b: Hash32): auto = AristoApiProfDeleteStorageTreeFn.profileRunner: result = api.deleteStorageTree(a, b) profApi.fetchLastSavedState = - proc(a: AristoDbRef): auto = + proc(a: AristoTxRef): auto = AristoApiProfFetchLastSavedStateFn.profileRunner: result = api.fetchLastSavedState(a) profApi.fetchAccountRecord = - proc(a: AristoDbRef; b: Hash32): auto = + proc(a: AristoTxRef; b: Hash32): auto = AristoApiProfFetchAccountRecordFn.profileRunner: result = api.fetchAccountRecord(a, b) profApi.fetchStateRoot = - proc(a: AristoDbRef; b: bool): auto = + proc(a: AristoTxRef; b: bool): auto = AristoApiProfFetchStateRootFn.profileRunner: result = api.fetchStateRoot(a, b) profApi.fetchStorageData = - proc(a: AristoDbRef; b, stoPath: Hash32): auto = + proc(a: AristoTxRef; b, stoPath: Hash32): auto = AristoApiProfFetchStorageDataFn.profileRunner: result = api.fetchStorageData(a, b, stoPath) profApi.fetchStorageRoot = - proc(a: AristoDbRef; b: Hash32): auto = + proc(a: AristoTxRef; b: Hash32): auto = AristoApiProfFetchStorageRootFn.profileRunner: result = api.fetchStorageRoot(a, b) profApi.finish = - proc(a: AristoDbRef; b = false) = + proc(a: AristoTxRef; b = false) = AristoApiProfFinishFn.profileRunner: api.finish(a, b) profApi.hasPathAccount = - proc(a: AristoDbRef; b: Hash32): auto = + proc(a: AristoTxRef; b: Hash32): auto = AristoApiProfHasPathAccountFn.profileRunner: result = api.hasPathAccount(a, b) profApi.hasPathStorage = - proc(a: AristoDbRef; b, c: Hash32): auto = + proc(a: AristoTxRef; b, c: Hash32): auto = AristoApiProfHasPathStorageFn.profileRunner: result = api.hasPathStorage(a, b, c) profApi.hasStorageData = - proc(a: AristoDbRef; b: Hash32): auto = + proc(a: AristoTxRef; b: Hash32): auto = AristoApiProfHasStorageDataFn.profileRunner: result = api.hasStorageData(a, b) - profApi.isTop = - proc(a: AristoTxRef): auto = - AristoApiProfIsTopFn.profileRunner: - result = api.isTop(a) - - profApi.level = - proc(a: AristoDbRef): auto = - AristoApiProfLevelFn.profileRunner: - result = api.level(a) - profApi.mergeAccountRecord = - proc(a: AristoDbRef; b: Hash32; c: AristoAccount): auto = + proc(a: AristoTxRef; b: Hash32; c: AristoAccount): auto = AristoApiProfMergeAccountRecordFn.profileRunner: result = api.mergeAccountRecord(a, b, c) profApi.mergeStorageData = - proc(a: AristoDbRef; b, c: Hash32, d: UInt256): auto = + proc(a: AristoTxRef; b, c: Hash32, d: UInt256): auto = AristoApiProfMergeStorageDataFn.profileRunner: result = api.mergeStorageData(a, b, c, d) profApi.partAccountTwig = - proc(a: AristoDbRef; b: Hash32): auto = + proc(a: AristoTxRef; b: Hash32): auto = AristoApiProfPartAccountTwigFn.profileRunner: result = api.partAccountTwig(a, b) profApi.partStorageTwig = - proc(a: AristoDbRef; b: Hash32; c: Hash32): auto = + proc(a: AristoTxRef; b: Hash32; c: Hash32): auto = AristoApiProfPartStorageTwigFn.profileRunner: result = api.partStorageTwig(a, b, c) @@ -616,7 +583,7 @@ func init*( result = api.pathAsBlob(a) profApi.persist = - proc(a: AristoDbRef; b = 0u64): auto = + proc(a: AristoTxRef; b = 0u64): auto = AristoApiProfPersistFn.profileRunner: result = api.persist(a, b) @@ -626,14 +593,14 @@ func init*( result = api.rollback(a) profApi.txFrameBegin = - proc(a: AristoDbRef): auto = + proc(a: AristoTxRef): auto = AristoApiProfTxFrameBeginFn.profileRunner: result = api.txFrameBegin(a) - profApi.txFrameTop = - proc(a: AristoDbRef): auto = - AristoApiProfTxFrameTopFn.profileRunner: - result = api.txFrameTop(a) + profApi.baseTxFrame = + proc(a: AristoTxRef): auto = + AristoApiProfBaseTxFrameFn.profileRunner: + result = api.baseTxFrame(a) let beDup = be.dup() if beDup.isNil: diff --git a/nimbus/db/aristo/aristo_blobify.nim b/nimbus/db/aristo/aristo_blobify.nim index 7bc68c50af..0c11d05a8d 100644 --- a/nimbus/db/aristo/aristo_blobify.nim +++ b/nimbus/db/aristo/aristo_blobify.nim @@ -13,6 +13,7 @@ import results, stew/[arrayops, endians2], + eth/common/accounts, ./aristo_desc export aristo_desc, results diff --git a/nimbus/db/aristo/aristo_check.nim b/nimbus/db/aristo/aristo_check.nim index 31f69f9679..ceb845985d 100644 --- a/nimbus/db/aristo/aristo_check.nim +++ b/nimbus/db/aristo/aristo_check.nim @@ -25,7 +25,7 @@ import # ------------------------------------------------------------------------------ proc checkTop*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer proofMode = false; # Has proof nodes ): Result[void,(VertexID,AristoError)] = ## Verify that the cache structure is correct as it would be after `merge()` @@ -76,18 +76,18 @@ proc checkBE*( proc check*( - db: AristoDbRef; # Database + db: AristoTxRef; # Database relax = false; # Check existing hashes only cache = true; # Also verify against top layer cache proofMode = false; # Has proof nodes ): Result[void,(VertexID,AristoError)] = ## Shortcut for running `checkTop()` followed by `checkBE()` ? db.checkTop(proofMode = proofMode) - ? db.checkBE() + # ? db.checkBE() ok() proc check*( - db: AristoDbRef; # Database + db: AristoTxRef; # Database accPath: Hash32; # Account key ): Result[void,AristoError] = ## Check accounts tree path `accPath` against portal proof generation and @@ -98,7 +98,7 @@ proc check*( db.checkTwig(accPath) proc check*( - db: AristoDbRef; # Database + db: AristoTxRef; # Database accPath: Hash32; # Account key stoPath: Hash32; # Storage key ): Result[void,AristoError] = diff --git a/nimbus/db/aristo/aristo_check/check_be.nim b/nimbus/db/aristo/aristo_check/check_be.nim index 8e830fb469..792d71845c 100644 --- a/nimbus/db/aristo/aristo_check/check_be.nim +++ b/nimbus/db/aristo/aristo_check/check_be.nim @@ -51,7 +51,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef]( for (rvid,key) in T.walkKeyBe db: if topVidBe.vid < rvid.vid: topVidBe = rvid - let _ = db.getVtxBE(rvid).valueOr: + let _ = db.getVtxBe(rvid).valueOr: return err((rvid.vid,CheckBeVtxMissing)) # Compare calculated `vTop` against database state @@ -76,25 +76,25 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef]( block: var topVidCache: RootedVertexID = (VertexID(0), VertexID(0)) - # Check structural table - for (rvid,vtx) in db.layersWalkVtx: - if vtx.isValid and topVidCache.vid < rvid.vid: - topVidCache = rvid - let (key, _) = db.layersGetKey(rvid).valueOr: (VOID_HASH_KEY, 0) - if not vtx.isValid: - # Some vertex is to be deleted, the key must be empty - if key.isValid: - return err((rvid.vid,CheckBeCacheKeyNonEmpty)) + # # Check structural table + # for (rvid,vtx) in db.layersWalkVtx: + # if vtx.isValid and topVidCache.vid < rvid.vid: + # topVidCache = rvid + # let (key, _) = db.layersGetKey(rvid).valueOr: (VOID_HASH_KEY, 0) + # if not vtx.isValid: + # # Some vertex is to be deleted, the key must be empty + # if key.isValid: + # return err((rvid.vid,CheckBeCacheKeyNonEmpty)) - # Check key table - var list: seq[RootedVertexID] - for (rvid,key) in db.layersWalkKey: - if key.isValid and topVidCache.vid < rvid.vid: - topVidCache = rvid - list.add rvid - let vtx = db.getVtx rvid - if db.layersGetVtx(rvid).isErr and not vtx.isValid: - return err((rvid.vid,CheckBeCacheKeyDangling)) + # # Check key table + # var list: seq[RootedVertexID] + # for (rvid,key) in db.layersWalkKey: + # if key.isValid and topVidCache.vid < rvid.vid: + # topVidCache = rvid + # list.add rvid + # let vtx = db.getVtx rvid + # if db.layersGetVtx(rvid).isErr and not vtx.isValid: + # return err((rvid.vid,CheckBeCacheKeyDangling)) # Check vTop # TODO diff --git a/nimbus/db/aristo/aristo_check/check_top.nim b/nimbus/db/aristo/aristo_check/check_top.nim index 331db8a02a..0ebbee9a23 100644 --- a/nimbus/db/aristo/aristo_check/check_top.nim +++ b/nimbus/db/aristo/aristo_check/check_top.nim @@ -21,7 +21,7 @@ import # ------------------------------------------------------------------------------ proc checkTopStrict*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer ): Result[void,(VertexID,AristoError)] = # No need to specify zero keys if implied by a leaf path with valid target # vertex ID (i.e. not deleted). @@ -55,7 +55,7 @@ proc checkTopStrict*( proc checkTopProofMode*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer ): Result[void,(VertexID,AristoError)] = for (rvid,key) in db.layersWalkKey: if key.isValid: # Otherwise to be deleted @@ -69,13 +69,13 @@ proc checkTopProofMode*( proc checkTopCommon*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer ): Result[void,(VertexID,AristoError)] = # Some `kMap[]` entries may ne void indicating backend deletion let kMapCount = db.layersWalkKey.toSeq.mapIt(it[1]).filterIt(it.isValid).len kMapNilCount = db.layersWalkKey.toSeq.len - kMapCount - vTop = db.vTop + vTop = db.layer.vTop var topVid = VertexID(0) stoRoots: HashSet[VertexID] diff --git a/nimbus/db/aristo/aristo_check/check_twig.nim b/nimbus/db/aristo/aristo_check/check_twig.nim index 664b9ec33a..a9c2f2108f 100644 --- a/nimbus/db/aristo/aristo_check/check_twig.nim +++ b/nimbus/db/aristo/aristo_check/check_twig.nim @@ -20,7 +20,7 @@ import # ------------------------------------------------------------------------------ proc checkTwig*( - db: AristoDbRef; # Database + db: AristoTxRef; # Database accPath: Hash32; # Data path ): Result[void,AristoError] = let @@ -31,7 +31,7 @@ proc checkTwig*( ok() proc checkTwig*( - db: AristoDbRef; # Database + db: AristoTxRef; # Database accPath: Hash32; # Account key stoPath: Hash32; # Storage key ): Result[void,AristoError] = diff --git a/nimbus/db/aristo/aristo_compute.nim b/nimbus/db/aristo/aristo_compute.nim index ecb4c12c99..67834916e0 100644 --- a/nimbus/db/aristo/aristo_compute.nim +++ b/nimbus/db/aristo/aristo_compute.nim @@ -63,7 +63,7 @@ func leave(batch: var WriteBatch, nibble: uint8) = batch.depth -= 1 proc putKeyAtLevel( - db: AristoDbRef, + db: AristoTxRef, rvid: RootedVertexID, vtx: VertexRef, key: HashKey, @@ -76,10 +76,10 @@ proc putKeyAtLevel( ## corresponding hash!) if level == -2: - ?batch.putVtx(db, rvid, vtx, key) + ?batch.putVtx(db.db, rvid, vtx, key) if batch.count mod batchSize == 0: - ?batch.flush(db) + ?batch.flush(db.db) if batch.count mod (batchSize * 100) == 0: info "Writing computeKey cache", keys = batch.count, accounts = batch.progress @@ -121,10 +121,10 @@ template encodeExt(w: var RlpWriter, pfx: NibblesBuf, branchKey: HashKey): HashK w.finish().digestTo(HashKey) proc getKey( - db: AristoDbRef, rvid: RootedVertexID, skipLayers: static bool + db: AristoTxRef, rvid: RootedVertexID, skipLayers: static bool ): Result[((HashKey, VertexRef), int), AristoError] = ok when skipLayers: - (?db.getKeyUbe(rvid, {GetVtxFlag.PeekCache}), -2) + (?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), -2) else: ?db.getKeyRc(rvid, {}) @@ -140,7 +140,7 @@ template childVid(v: VertexRef): VertexID = v.startVid proc computeKeyImpl( - db: AristoDbRef, + db: AristoTxRef, rvid: RootedVertexID, batch: var WriteBatch, vtx: VertexRef, @@ -277,11 +277,11 @@ proc computeKeyImpl( ok (key, level) proc computeKeyImpl( - db: AristoDbRef, rvid: RootedVertexID, skipLayers: static bool + db: AristoTxRef, rvid: RootedVertexID, skipLayers: static bool ): Result[HashKey, AristoError] = let (keyvtx, level) = when skipLayers: - (?db.getKeyUbe(rvid, {GetVtxFlag.PeekCache}), -2) + (?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), -2) else: ?db.getKeyRc(rvid, {}) @@ -291,7 +291,7 @@ proc computeKeyImpl( var batch: WriteBatch let res = computeKeyImpl(db, rvid, batch, keyvtx[1], level, skipLayers = skipLayers) if res.isOk: - ?batch.flush(db) + ?batch.flush(db.db) if batch.count > 0: if batch.count >= batchSize * 100: @@ -302,7 +302,7 @@ proc computeKeyImpl( ok (?res)[0] proc computeKey*( - db: AristoDbRef, # Database, top layer + db: AristoTxRef, # Database, top layer rvid: RootedVertexID, # Vertex to convert ): Result[HashKey, AristoError] = ## Compute the key for an arbitrary vertex ID. If successful, the length of @@ -312,7 +312,7 @@ proc computeKey*( ## 32 byte value. computeKeyImpl(db, rvid, skipLayers = false) -proc computeKeys*(db: AristoDbRef, root: VertexID): Result[void, AristoError] = +proc computeKeys*(db: AristoTxRef, root: VertexID): Result[void, AristoError] = ## Ensure that key cache is topped up with the latest state root discard db.computeKeyImpl((root, root), skipLayers = true) diff --git a/nimbus/db/aristo/aristo_debug.nim b/nimbus/db/aristo/aristo_debug.nim index bec16559f5..aa93e627d7 100644 --- a/nimbus/db/aristo/aristo_debug.nim +++ b/nimbus/db/aristo/aristo_debug.nim @@ -24,8 +24,8 @@ import # Private functions # ------------------------------------------------------------------------------ -func orDefault(db: AristoDbRef): AristoDbRef = - if db.isNil: AristoDbRef(top: LayerRef.init()) else: db +func orDefault(db: AristoTxRef): AristoTxRef = + if db.isNil: AristoTxRef(layer: LayerRef.init()) else: db # -------------------------- @@ -80,17 +80,17 @@ func stripZeros(a: string; toExp = false): string = # --------------------- func ppKeyOk( - db: AristoDbRef; + db: AristoTxRef; key: HashKey; rvid: RootedVertexID; ): string = if key.isValid and rvid.isValid: - let rv = db.xMap.getOrVoid key + let rv = db.db.xMap.getOrVoid key if rv.isValid: if rvid != rv: result = "(!)" return - db.xMap[key] = rvid + db.db.xMap[key] = rvid func ppVid(vid: VertexID; pfx = true): string = if pfx: @@ -130,7 +130,7 @@ func ppVidList(vLst: openArray[VertexID]): string = result &= vLst[^100 .. ^1].mapIt(it.ppVid).join(",") result &= "]" -proc ppKey(key: HashKey; db: AristoDbRef; pfx = true): string = +proc ppKey(key: HashKey; db: AristoTxRef; pfx = true): string = if pfx: result = "£" if key.to(Hash32) == default(Hash32): @@ -139,7 +139,7 @@ proc ppKey(key: HashKey; db: AristoDbRef; pfx = true): string = result &= "ø" else: # Reverse lookup - let rvid = db.xMap.getOrVoid key + let rvid = db.db.xMap.getOrVoid key if rvid.isValid: result &= rvid.ppVid(pfx=false) let vtx = db.getVtx rvid @@ -153,7 +153,7 @@ proc ppKey(key: HashKey; db: AristoDbRef; pfx = true): string = let tag = if key.len < 32: "[#" & $key.len & "]" else: "" result &= @(key.data).toHex.squeeze(hex=true,ignLen=true) & tag -func ppLeafTie(lty: LeafTie, db: AristoDbRef): string = +func ppLeafTie(lty: LeafTie, db: AristoTxRef): string = let pfx = lty.path.to(NibblesBuf) "@" & lty.root.ppVid(pfx=false) & ":" & ($pfx).squeeze(hex=true,ignLen=(pfx.len==64)) @@ -165,7 +165,7 @@ func ppPathPfx(pfx: NibblesBuf): string = func ppNibble(n: int8): string = if n < 0: "ø" elif n < 10: $n else: n.toHexLsb -proc ppEthAccount(a: Account, db: AristoDbRef): string = +proc ppEthAccount(a: Account, db: AristoTxRef): string = result = "(" result &= ($a.nonce).stripZeros(toExp=true) & "," result &= ($a.balance).stripZeros(toExp=true) & "," @@ -178,14 +178,14 @@ func ppAriAccount(a: AristoAccount): string = result &= ($a.balance).stripZeros(toExp=true) & "," result &= a.codeHash.ppCodeHash & ")" -func ppPayload(p: LeafPayload, db: AristoDbRef): string = +func ppPayload(p: LeafPayload, db: AristoTxRef): string = case p.pType: of AccountData: result = "(" & p.account.ppAriAccount() & "," & p.stoID.ppVid & ")" of StoData: result = ($p.stoData).squeeze -func ppVtx(nd: VertexRef, db: AristoDbRef, rvid: RootedVertexID): string = +func ppVtx(nd: VertexRef, db: AristoTxRef, rvid: RootedVertexID): string = if not nd.isValid: result = "ø" else: @@ -210,7 +210,7 @@ func ppVtx(nd: VertexRef, db: AristoDbRef, rvid: RootedVertexID): string = proc ppNode( nd: NodeRef; - db: AristoDbRef; + db: AristoTxRef; rvid = default(RootedVertexID); ): string = if not nd.isValid: @@ -253,7 +253,7 @@ proc ppNode( func ppXTab[T: VertexRef|NodeRef]( tab: Table[RootedVertexID,T]; - db: AristoDbRef; + db: AristoTxRef; indent = 4; ): string = proc ppT(v: T; r: RootedVertexID): string = @@ -268,7 +268,7 @@ func ppXTab[T: VertexRef|NodeRef]( proc ppXMap*( - db: AristoDbRef; + db: AristoTxRef; kMap: Table[RootedVertexID,HashKey]; indent: int; ): string = @@ -289,7 +289,7 @@ proc ppXMap*( if key == VOID_HASH_KEY: 0 else: - db.xMap[key] = w + db.db.xMap[key] = w let vtx = db.getVtx(w) if not vtx.isValid: 1 @@ -360,7 +360,7 @@ proc ppXMap*( proc ppBalancer( fl: LayerRef; - db: AristoDbRef; + db: AristoTxRef; indent: int; ): string = ## Walk over filter tables @@ -385,7 +385,7 @@ proc ppBalancer( result &= $(1+n) & "(" & vid.ppVid & "," & key.ppKey(db) & ")" result &= "}" -proc ppBe[T](be: T; db: AristoDbRef; limit: int; indent: int): string = +proc ppBe[T](be: T; db: AristoTxRef; limit: int; indent: int): string = ## Walk over backend tables let pfx = indent.toPfx @@ -435,7 +435,7 @@ proc ppBe[T](be: T; db: AristoDbRef; limit: int; indent: int): string = proc ppLayer( layer: LayerRef; - db: AristoDbRef; + db: AristoTxRef; vTopOk: bool; sTabOk: bool; kMapOk: bool; @@ -495,19 +495,19 @@ func pp*(w: Hash32; codeHashOk: bool): string = func pp*(n: NibblesBuf): string = n.ppPathPfx() -proc pp*(w: HashKey; db = AristoDbRef(nil)): string = +proc pp*(w: HashKey; db = AristoTxRef(nil)): string = w.ppKey(db.orDefault) -proc pp*(w: Hash32; db = AristoDbRef(nil)): string = +proc pp*(w: Hash32; db = AristoTxRef(nil)): string = w.to(HashKey).ppKey(db.orDefault) -proc pp*(w: openArray[HashKey]; db = AristoDbRef(nil)): string = +proc pp*(w: openArray[HashKey]; db = AristoTxRef(nil)): string = "[" & @w.mapIt(it.ppKey(db.orDefault)).join(",") & "]" -func pp*(lty: LeafTie, db = AristoDbRef(nil)): string = +func pp*(lty: LeafTie, db = AristoTxRef(nil)): string = lty.ppLeafTie(db.orDefault) -proc pp*(a: Account, db = AristoDbRef(nil)): string = +proc pp*(a: Account, db = AristoTxRef(nil)): string = a.ppEthAccount(db.orDefault) func pp*(vid: VertexID): string = @@ -519,13 +519,13 @@ func pp*(rvid: RootedVertexID): string = func pp*(vLst: openArray[VertexID]): string = vLst.ppVidList -func pp*(p: LeafPayload, db = AristoDbRef(nil)): string = +func pp*(p: LeafPayload, db = AristoTxRef(nil)): string = p.ppPayload(db.orDefault) -func pp*(nd: VertexRef, db = AristoDbRef(nil)): string = +func pp*(nd: VertexRef, db = AristoTxRef(nil)): string = nd.ppVtx(db.orDefault, default(RootedVertexID)) -proc pp*(nd: NodeRef, db = AristoDbRef(nil)): string = +proc pp*(nd: NodeRef, db = AristoTxRef(nil)): string = nd.ppNode(db.orDefault, default(RootedVertexID)) func pp*(e: (VertexID,AristoError)): string = @@ -542,26 +542,26 @@ func pp*[T](rc: Result[T,(VertexID,AristoError)]): string = func pp*( sTab: Table[RootedVertexID,VertexRef]; - db = AristoDbRef(nil); + db = AristoTxRef(nil); indent = 4; ): string = sTab.ppXTab(db.orDefault) -proc pp*(leg: Leg; root: VertexID; db = AristoDbRef(nil)): string = +proc pp*(leg: Leg; root: VertexID; db = AristoTxRef(nil)): string = let db = db.orDefault() result = "(" & leg.wp.vid.ppVid & "," block: let key = db.layersGetKeyOrVoid (root, leg.wp.vid) if not key.isValid: result &= "ø" - elif (root, leg.wp.vid) != db.xMap.getOrVoid key: + elif (root, leg.wp.vid) != db.db.xMap.getOrVoid key: result &= key.ppKey(db) result &= "," if 0 <= leg.nibble: result &= $leg.nibble.ppNibble result &= "," & leg.wp.vtx.pp(db) & ")" -proc pp*(hike: Hike; db = AristoDbRef(nil); indent = 4): string = +proc pp*(hike: Hike; db = AristoTxRef(nil); indent = 4): string = let db = db.orDefault() pfx = indent.toPfx(1) @@ -577,7 +577,7 @@ proc pp*(hike: Hike; db = AristoDbRef(nil); indent = 4): string = func pp*[T: NodeRef|VertexRef|HashKey]( q: seq[(HashKey,T)]; - db = AristoDbRef(nil); + db = AristoTxRef(nil); indent = 4; ): string = let db = db.orDefault @@ -591,7 +591,7 @@ func pp*[T: NodeRef|VertexRef|HashKey]( func pp*[T: NodeRef|VertexRef|HashKey]( t: Table[HashKey,T]; - db = AristoDbRef(nil); + db = AristoTxRef(nil); indent = 4; ): string = ## Sort hash keys by associated vertex ID were possible @@ -611,7 +611,7 @@ func pp*[T: NodeRef|VertexRef|HashKey]( proc pp*[T: HashKey]( t: Table[T,RootedVertexID]; - db = AristoDbRef(nil); + db = AristoTxRef(nil); indent = 4; ): string = ## Sort by second tab item vertex ID @@ -638,14 +638,14 @@ proc pp*[T: HashKey]( func pp*[T: HashKey]( t: TableRef[HashKey,T]; - db = AristoDbRef(nil); + db = AristoTxRef(nil); indent = 4; ): string = pp(t[],db,indent) proc pp*( kMap: Table[RootedVertexID,HashKey]; - db: AristoDbRef; + db: AristoTxRef; indent = 4; ): string = db.ppXMap(kMap, indent) @@ -653,18 +653,18 @@ proc pp*( # --------------------- func pp*(tx: AristoTxRef): string = - result = "(uid=" & $tx.txUid & ",level=" & $tx.level + result = "(" & repr(pointer(addr(tx[]))) if not tx.parent.isNil: - result &= ", par=" & $tx.parent.txUid + result &= ", par=" & pp(tx.parent) result &= ")" -func pp*(wp: VidVtxPair; db: AristoDbRef): string = +func pp*(wp: VidVtxPair; db: AristoTxRef): string = "(" & wp.vid.pp & "," & wp.vtx.pp(db) & ")" proc pp*( layer: LayerRef; - db: AristoDbRef; + db: AristoTxRef; indent = 4; sTabOk = true, kMapOk = true, @@ -675,11 +675,11 @@ proc pp*( proc pp*( be: BackendRef; - db: AristoDbRef; + db: AristoTxRef; limit = 100; indent = 4; ): string = - result = db.balancer.ppBalancer(db, indent+1) & indent.toPfx + result = db.layer.ppBalancer(db, indent+1) & indent.toPfx case be.kind: of BackendMemory: result &= be.MemBackendRef.ppBe(db, limit, indent+1) @@ -689,7 +689,7 @@ proc pp*( result &= "" proc pp*( - db: AristoDbRef; + db: AristoTxRef; indent = 4; backendOk = false; balancerOk = true; @@ -699,29 +699,29 @@ proc pp*( sTabOk = true; limit = 100; ): string = - if topOk: - result = db.layersCc.ppLayer( - db, sTabOk=sTabOk, kMapOk=kMapOk, vTopOk=true, indent=indent) - let stackOnlyOk = stackOk and not (topOk or balancerOk or backendOk) - if not stackOnlyOk: - result &= indent.toPfx(1) & "level=" & $db.stack.len - if (stackOk and 0 < db.stack.len) or stackOnlyOk: - let layers = @[db.top] & db.stack.reversed - var lStr = "" - for n,w in layers: - let - m = layers.len - n - 1 - l = db.layersCc m - a = w.kMap.values.toSeq.filterIt(not it.isValid).len - c = l.kMap.values.toSeq.filterIt(not it.isValid).len - result &= "(" & $(w.kMap.len - a) & "," & $a & ")" - lStr &= " " & $m & "=(" & $(l.kMap.len - c) & "," & $c & ")" - result &= " =>" & lStr - if backendOk: - result &= indent.toPfx & db.backend.pp(db, limit=limit, indent) - elif balancerOk: - result &= indent.toPfx & db.balancer.ppBalancer(db, indent+1) - + # if topOk: + # result = db.layersCc.ppLayer( + # db, sTabOk=sTabOk, kMapOk=kMapOk, vTopOk=true, indent=indent) + # let stackOnlyOk = stackOk and not (topOk or balancerOk or backendOk) + # if not stackOnlyOk: + # result &= indent.toPfx(1) & "level=" & $db.stack.len + # if (stackOk and 0 < db.stack.len) or stackOnlyOk: + # let layers = @[db.top] & db.stack.reversed + # var lStr = "" + # for n,w in layers: + # let + # m = layers.len - n - 1 + # l = db.layersCc m + # a = w.kMap.values.toSeq.filterIt(not it.isValid).len + # c = l.kMap.values.toSeq.filterIt(not it.isValid).len + # result &= "(" & $(w.kMap.len - a) & "," & $a & ")" + # lStr &= " " & $m & "=(" & $(l.kMap.len - c) & "," & $c & ")" + # result &= " =>" & lStr + # if backendOk: + # result &= indent.toPfx & db.backend.pp(db, limit=limit, indent) + # elif balancerOk: + # result &= indent.toPfx & db.balancer.ppBalancer(db, indent+1) + discard #TODO # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/aristo_delete.nim b/nimbus/db/aristo/aristo_delete.nim index 3223e4ed63..91449fb1df 100644 --- a/nimbus/db/aristo/aristo_delete.nim +++ b/nimbus/db/aristo/aristo_delete.nim @@ -46,7 +46,7 @@ proc branchStillNeeded(vtx: VertexRef, removed: int8): Result[int8,void] = # ------------------------------------------------------------------------------ proc deleteImpl( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer hike: Hike; # Fully expanded path ): Result[VertexRef,AristoError] = ## Removes the last node in the hike and returns the updated leaf in case @@ -126,7 +126,7 @@ proc deleteImpl( # ------------------------------------------------------------------------------ proc deleteAccountRecord*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[void,AristoError] = ## Delete the account leaf entry addressed by the argument `path`. If this @@ -156,7 +156,7 @@ proc deleteAccountRecord*( ok() proc deleteGenericData*( - db: AristoDbRef; + db: AristoTxRef; root: VertexID; path: openArray[byte]; ): Result[bool,AristoError] = @@ -187,7 +187,7 @@ proc deleteGenericData*( ok(not db.getVtx((root, root)).isValid) proc deleteGenericTree*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer root: VertexID; # Root vertex ): Result[void,AristoError] = ## Variant of `deleteGenericData()` for purging the whole MPT sub-tree. @@ -203,7 +203,7 @@ proc deleteGenericTree*( db.delSubTreeImpl root proc deleteStorageData*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; # Implies storage data tree stoPath: Hash32; ): Result[bool,AristoError] = @@ -266,7 +266,7 @@ proc deleteStorageData*( ok(true) proc deleteStorageTree*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer accPath: Hash32; # Implies storage data tree ): Result[void,AristoError] = ## Variant of `deleteStorageData()` for purging the whole storage tree diff --git a/nimbus/db/aristo/aristo_delete/delete_subtree.nim b/nimbus/db/aristo/aristo_delete/delete_subtree.nim index 659a6b9ed2..4a080f43ca 100644 --- a/nimbus/db/aristo/aristo_delete/delete_subtree.nim +++ b/nimbus/db/aristo/aristo_delete/delete_subtree.nim @@ -19,7 +19,7 @@ import # ------------------------------------------------------------------------------ proc delSubTreeNow( - db: AristoDbRef; + db: AristoTxRef; rvid: RootedVertexID; ): Result[void,AristoError] = ## Delete sub-tree now @@ -38,7 +38,7 @@ proc delSubTreeNow( proc delStoTreeNow( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer rvid: RootedVertexID; # Root vertex accPath: Hash32; # Accounts cache designator stoPath: NibblesBuf; # Current storage path @@ -70,14 +70,14 @@ proc delStoTreeNow( # ------------------------------------------------------------------------------ proc delSubTreeImpl*( - db: AristoDbRef; + db: AristoTxRef; root: VertexID; ): Result[void,AristoError] = db.delSubTreeNow (root,root) proc delStoTreeImpl*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer rvid: RootedVertexID; # Root vertex accPath: Hash32; ): Result[void,AristoError] = diff --git a/nimbus/db/aristo/aristo_delta.nim b/nimbus/db/aristo/aristo_delta.nim index 3e066c42bd..142e2a882a 100644 --- a/nimbus/db/aristo/aristo_delta.nim +++ b/nimbus/db/aristo/aristo_delta.nim @@ -31,7 +31,7 @@ proc deltaPersistent*( db: AristoDbRef; # Database nxtFid = 0u64; # Next filter ID (if any) ): Result[void,AristoError] = - ## Resolve (i.e. move) the balancer into the physical backend database. + ## Resolve (i.e. move) txRef into the physical backend database. ## ## This needs write permission on the backend DB for the descriptor argument ## `db` (see the function `aristo_desc.isCentre()`.) If the argument flag @@ -47,7 +47,7 @@ proc deltaPersistent*( return err(FilBackendMissing) # Blind or missing filter - if db.balancer.isNil: + if db.txRef.isNil: # Add a blind storage frame. This will do no harm if `Aristo` runs # standalone. Yet it is needed if a `Kvt` is tied to `Aristo` and has # triggered a save cyle already which is to be completed here. @@ -60,30 +60,33 @@ proc deltaPersistent*( return ok() let lSst = SavedState( - key: EMPTY_ROOT_HASH, # placeholder for more + key: emptyRoot, # placeholder for more serial: nxtFid) # Store structural single trie entries let writeBatch = ? be.putBegFn() - for rvid, vtx in db.balancer.sTab: - db.balancer.kMap.withValue(rvid, key) do: + for rvid, vtx in db.txRef.layer.sTab: + db.txRef.layer.kMap.withValue(rvid, key) do: be.putVtxFn(writeBatch, rvid, vtx, key[]) do: be.putVtxFn(writeBatch, rvid, vtx, default(HashKey)) - be.putTuvFn(writeBatch, db.balancer.vTop) + be.putTuvFn(writeBatch, db.txRef.layer.vTop) be.putLstFn(writeBatch, lSst) ? be.putEndFn writeBatch # Finalise write batch # Copy back updated payloads - for accPath, vtx in db.balancer.accLeaves: + for accPath, vtx in db.txRef.layer.accLeaves: db.accLeaves.put(accPath, vtx) - for mixPath, vtx in db.balancer.stoLeaves: + for mixPath, vtx in db.txRef.layer.stoLeaves: db.stoLeaves.put(mixPath, vtx) - # Done with balancer, all saved to backend - db.balancer = LayerRef(nil) + # Done with txRef, all saved to backend + db.txRef.layer.sTab.clear() + db.txRef.layer.kMap.clear() + db.txRef.layer.accLeaves.clear() + db.txRef.layer.stoLeaves.clear() ok() diff --git a/nimbus/db/aristo/aristo_delta/delta_merge.nim b/nimbus/db/aristo/aristo_delta/delta_merge.nim deleted file mode 100644 index 7e86f7140c..0000000000 --- a/nimbus/db/aristo/aristo_delta/delta_merge.nim +++ /dev/null @@ -1,44 +0,0 @@ -# nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed -# except according to those terms. - -import - ".."/[aristo_desc, aristo_layers] - -# ------------------------------------------------------------------------------ -# Public functions -# ------------------------------------------------------------------------------ - -proc deltaMerge*( - upper: LayerRef; # Think of `top`, `nil` is ok - lower: LayerRef; # Think of `balancer`, `nil` is ok - ): LayerRef = - ## Merge argument `upper` into the `lower` filter instance. - ## - ## Note that the namimg `upper` and `lower` indicate that the filters are - ## stacked and the database access is `upper -> lower -> backend`. - ## - if lower.isNil: - # Degenerate case: `upper` is void - upper - - elif upper.isNil: - # Degenerate case: `lower` is void - lower - - else: - # Can modify `lower` which is the prefered action mode but applies only - # in cases where the `lower` argument is not shared. - lower.vTop = upper.vTop - layersMergeOnto(upper, lower[]) - lower - -# ------------------------------------------------------------------------------ -# End -# ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/aristo_desc.nim b/nimbus/db/aristo/aristo_desc.nim index fdb738986b..f69117c6b5 100644 --- a/nimbus/db/aristo/aristo_desc.nim +++ b/nimbus/db/aristo/aristo_desc.nim @@ -43,17 +43,13 @@ type ## Transaction descriptor db*: AristoDbRef ## Database descriptor parent*: AristoTxRef ## Previous transaction - txUid*: uint ## Unique ID among transactions - level*: int ## Stack index for this transaction + layer*: LayerRef AristoDbRef* = ref object ## Three tier database object supporting distributed instances. - top*: LayerRef ## Database working layer, mutable - stack*: seq[LayerRef] ## Stashed immutable parent layers - balancer*: LayerRef ## Balance out concurrent backend access backend*: BackendRef ## Backend database (may well be `nil`) - txRef*: AristoTxRef ## Latest active transaction + txRef*: AristoTxRef ## Bottom-most in-memory frame txUidGen*: uint ## Tx-relative unique number generator accLeaves*: LruCache[Hash32, VertexRef] @@ -128,7 +124,7 @@ func isValid*(layer: LayerRef): bool = layer != LayerRef(nil) func isValid*(root: Hash32): bool = - root != EMPTY_ROOT_HASH + root != emptyRoot func isValid*(key: HashKey): bool = assert key.len != 32 or key.to(Hash32).isValid @@ -156,25 +152,31 @@ func hash*(db: AristoDbRef): Hash = # Public helpers # ------------------------------------------------------------------------------ -iterator rstack*(db: AristoDbRef): LayerRef = +iterator rstack*(tx: AristoTxRef): (LayerRef, int) = # Stack in reverse order - for i in 0.. 0: - doAssert level <= db.stack.len - db.stack[^level] - elif level == -1: - doAssert db.balancer != nil - db.balancer - elif level == -2: + var tx = tx + + var i = 0 + while tx != nil: + let level = if tx.parent == nil: -1 else: i + yield (tx.layer, level) + tx = tx.parent + +proc deltaAtLevel*(db: AristoTxRef, level: int): LayerRef = + if level == -2: nil + elif level == -1: + db.db.txRef.layer else: - raiseAssert "Unknown level " & $level + var + frame = db + level = level + + while level > 0: + frame = frame.parent + level -= 1 + frame.layer # ------------------------------------------------------------------------------ # End diff --git a/nimbus/db/aristo/aristo_desc/desc_structural.nim b/nimbus/db/aristo/aristo_desc/desc_structural.nim index 309feaf647..e2fa26d2e6 100644 --- a/nimbus/db/aristo/aristo_desc/desc_structural.nim +++ b/nimbus/db/aristo/aristo_desc/desc_structural.nim @@ -94,8 +94,8 @@ type key*: Hash32 ## Some state hash (if any) serial*: uint64 ## Generic identifier from application - LayerRef* = ref LayerObj - LayerObj* = object + LayerRef* = ref Layer + Layer* = object ## Delta layers are stacked implying a tables hierarchy. Table entries on ## a higher level take precedence over lower layer table entries. So an ## existing key-value table entry of a layer on top supersedes same key @@ -125,8 +125,6 @@ type accLeaves*: Table[Hash32, VertexRef] ## Account path -> VertexRef stoLeaves*: Table[Hash32, VertexRef] ## Storage path -> VertexRef - txUid*: uint ## Transaction identifier if positive - GetVtxFlag* = enum PeekCache ## Peek into, but don't update cache - useful on work loads that are diff --git a/nimbus/db/aristo/aristo_fetch.nim b/nimbus/db/aristo/aristo_fetch.nim index ecca2c34be..ddb58f5707 100644 --- a/nimbus/db/aristo/aristo_fetch.nim +++ b/nimbus/db/aristo/aristo_fetch.nim @@ -24,7 +24,7 @@ import # ------------------------------------------------------------------------------ proc retrieveLeaf( - db: AristoDbRef; + db: AristoTxRef; root: VertexID; path: Hash32; ): Result[VertexRef,AristoError] = @@ -39,22 +39,22 @@ proc retrieveLeaf( return err(FetchPathNotFound) -proc cachedAccLeaf*(db: AristoDbRef; accPath: Hash32): Opt[VertexRef] = +proc cachedAccLeaf*(db: AristoTxRef; accPath: Hash32): Opt[VertexRef] = # Return vertex from layers or cache, `nil` if it's known to not exist and # none otherwise db.layersGetAccLeaf(accPath) or - db.accLeaves.get(accPath) or + db.db.accLeaves.get(accPath) or Opt.none(VertexRef) -proc cachedStoLeaf*(db: AristoDbRef; mixPath: Hash32): Opt[VertexRef] = +proc cachedStoLeaf*(db: AristoTxRef; mixPath: Hash32): Opt[VertexRef] = # Return vertex from layers or cache, `nil` if it's known to not exist and # none otherwise db.layersGetStoLeaf(mixPath) or - db.stoLeaves.get(mixPath) or + db.db.stoLeaves.get(mixPath) or Opt.none(VertexRef) proc retrieveAccountLeaf( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[VertexRef,AristoError] = if (let leafVtx = db.cachedAccLeaf(accPath); leafVtx.isSome()): @@ -67,27 +67,27 @@ proc retrieveAccountLeaf( let leafVtx = db.retrieveLeaf(VertexID(1), accPath).valueOr: if error == FetchPathNotFound: - db.accLeaves.put(accPath, nil) + db.db.accLeaves.put(accPath, nil) return err(error) - db.accLeaves.put(accPath, leafVtx) + db.db.accLeaves.put(accPath, leafVtx) ok leafVtx proc retrieveMerkleHash( - db: AristoDbRef; + db: AristoTxRef; root: VertexID; ): Result[Hash32,AristoError] = let key = db.computeKey((root, root)).valueOr: if error in [GetVtxNotFound, GetKeyNotFound]: - return ok(EMPTY_ROOT_HASH) + return ok(emptyRoot) return err(error) ok key.to(Hash32) proc hasAccountPayload( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[bool,AristoError] = let error = db.retrieveAccountLeaf(accPath).errorOr: @@ -98,7 +98,7 @@ proc hasAccountPayload( err(error) proc fetchStorageIdImpl( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; enaStoRootMissing = false; ): Result[VertexID,AristoError] = @@ -119,7 +119,7 @@ proc fetchStorageIdImpl( # ------------------------------------------------------------------------------ proc fetchAccountHike*( - db: AristoDbRef; # Database + db: AristoTxRef; # Database accPath: Hash32; # Implies a storage ID (if any) accHike: var Hike ): Result[void,AristoError] = @@ -142,7 +142,7 @@ proc fetchAccountHike*( ok() proc fetchStorageID*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[VertexID,AristoError] = ## Public helper function for retrieving a storage (vertex) ID for a given account. This @@ -152,7 +152,7 @@ proc fetchStorageID*( db.fetchStorageIdImpl(accPath, enaStoRootMissing=true) proc retrieveStoragePayload( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[UInt256,AristoError] = @@ -167,15 +167,15 @@ proc retrieveStoragePayload( # it must have been in the database let leafVtx = db.retrieveLeaf(? db.fetchStorageIdImpl(accPath), stoPath).valueOr: if error == FetchPathNotFound: - db.stoLeaves.put(mixPath, nil) + db.db.stoLeaves.put(mixPath, nil) return err(error) - db.stoLeaves.put(mixPath, leafVtx) + db.db.stoLeaves.put(mixPath, leafVtx) ok leafVtx.lData.stoData proc hasStoragePayload( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[bool,AristoError] = @@ -191,15 +191,16 @@ proc hasStoragePayload( # ------------------------------------------------------------------------------ proc fetchLastSavedState*( - db: AristoDbRef; + db: AristoTxRef; ): Result[SavedState,AristoError] = - ## Wrapper around `getLstUbe()`. The function returns the state of the last + ## Wrapper around `getLstBe()`. The function returns the state of the last ## saved state. This is a Merkle hash tag for vertex with ID 1 and a bespoke ## `uint64` identifier (may be interpreted as block number.) - db.getLstUbe() + # TODO store in frame!! + db.db.getLstBe() proc fetchAccountRecord*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[AristoAccount,AristoError] = ## Fetch an account record from the database indexed by `accPath`. @@ -210,13 +211,13 @@ proc fetchAccountRecord*( ok leafVtx.lData.account proc fetchStateRoot*( - db: AristoDbRef; + db: AristoTxRef; ): Result[Hash32,AristoError] = ## Fetch the Merkle hash of the account root. db.retrieveMerkleHash(VertexID(1)) proc hasPathAccount*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[bool,AristoError] = ## For an account record indexed by `accPath` query whether this record exists @@ -225,7 +226,7 @@ proc hasPathAccount*( db.hasAccountPayload(accPath) proc fetchStorageData*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[UInt256,AristoError] = @@ -235,18 +236,18 @@ proc fetchStorageData*( db.retrieveStoragePayload(accPath, stoPath) proc fetchStorageRoot*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[Hash32,AristoError] = ## Fetch the Merkle hash of the storage root related to `accPath`. let stoID = db.fetchStorageIdImpl(accPath).valueOr: if error == FetchPathNotFound: - return ok(EMPTY_ROOT_HASH) # no sub-tree + return ok(emptyRoot) # no sub-tree return err(error) db.retrieveMerkleHash(stoID) proc hasPathStorage*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[bool,AristoError] = @@ -256,7 +257,7 @@ proc hasPathStorage*( db.hasStoragePayload(accPath, stoPath) proc hasStorageData*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[bool,AristoError] = ## For a storage tree related to account `accPath`, query whether there diff --git a/nimbus/db/aristo/aristo_get.nim b/nimbus/db/aristo/aristo_get.nim index d3e83be377..a0ebc6c725 100644 --- a/nimbus/db/aristo/aristo_get.nim +++ b/nimbus/db/aristo/aristo_get.nim @@ -21,16 +21,16 @@ import # Public functions # ------------------------------------------------------------------------------ -proc getTuvUbe*( +proc getTuvBe*( db: AristoDbRef; ): Result[VertexID,AristoError] = - ## Get the ID generator state from the unfiltered backened if available. + ## Get the ID generator state from the backened if available. let be = db.backend if not be.isNil: return be.getTuvFn() err(GetTuvNotFound) -proc getLstUbe*( +proc getLstBe*( db: AristoDbRef; ): Result[SavedState,AristoError] = ## Get the last saved state @@ -39,23 +39,23 @@ proc getLstUbe*( return be.getLstFn() err(GetLstNotFound) -proc getVtxUbe*( +proc getVtxBe*( db: AristoDbRef; rvid: RootedVertexID; flags: set[GetVtxFlag] = {}; ): Result[VertexRef,AristoError] = - ## Get the vertex from the unfiltered backened if available. + ## Get the vertex from the backened if available. let be = db.backend if not be.isNil: return be.getVtxFn(rvid, flags) err GetVtxNotFound -proc getKeyUbe*( +proc getKeyBe*( db: AristoDbRef; rvid: RootedVertexID; flags: set[GetVtxFlag]; ): Result[(HashKey, VertexRef),AristoError] = - ## Get the Merkle hash/key from the unfiltered backend if available. + ## Get the Merkle hash/key from the backend if available. let be = db.backend if not be.isNil: return be.getKeyFn(rvid, flags) @@ -63,47 +63,8 @@ proc getKeyUbe*( # ------------------ -proc getTuvBE*( - db: AristoDbRef; - ): Result[VertexID,AristoError] = - ## Get the ID generator state the `backened` layer if available. - if not db.balancer.isNil: - return ok(db.balancer.vTop) - db.getTuvUbe() - -proc getVtxBE*( - db: AristoDbRef; - rvid: RootedVertexID; - flags: set[GetVtxFlag] = {}; - ): Result[(VertexRef, int),AristoError] = - ## Get the vertex from the (filtered) backened if available. - if not db.balancer.isNil: - db.balancer.sTab.withValue(rvid, w): - if w[].isValid: - return ok (w[], -1) - return err(GetVtxNotFound) - ok (? db.getVtxUbe(rvid, flags), -2) - -proc getKeyBE*( - db: AristoDbRef; - rvid: RootedVertexID; - flags: set[GetVtxFlag]; - ): Result[((HashKey, VertexRef), int),AristoError] = - ## Get the merkle hash/key from the (filtered) backend if available. - if not db.balancer.isNil: - db.balancer.kMap.withValue(rvid, w): - if w[].isValid: - return ok(((w[], nil), -1)) - db.balancer.sTab.withValue(rvid, s): - if s[].isValid: - return ok(((VOID_HASH_KEY, s[]), -1)) - return err(GetKeyNotFound) - ok ((?db.getKeyUbe(rvid, flags)), -2) - -# ------------------ - proc getVtxRc*( - db: AristoDbRef; + db: AristoTxRef; rvid: RootedVertexID; flags: set[GetVtxFlag] = {}; ): Result[(VertexRef, int),AristoError] = @@ -120,16 +81,16 @@ proc getVtxRc*( else: return err(GetVtxNotFound) - db.getVtxBE(rvid, flags) + ok (?db.db.getVtxBe(rvid, flags), -2) -proc getVtx*(db: AristoDbRef; rvid: RootedVertexID, flags: set[GetVtxFlag] = {}): VertexRef = +proc getVtx*(db: AristoTxRef; rvid: RootedVertexID, flags: set[GetVtxFlag] = {}): VertexRef = ## Cascaded attempt to fetch a vertex from the cache layers or the backend. ## The function returns `nil` on error or failure. ## db.getVtxRc(rvid).valueOr((VertexRef(nil), 0))[0] proc getKeyRc*( - db: AristoDbRef; rvid: RootedVertexID, flags: set[GetVtxFlag]): Result[((HashKey, VertexRef), int),AristoError] = + db: AristoTxRef; rvid: RootedVertexID, flags: set[GetVtxFlag]): Result[((HashKey, VertexRef), int),AristoError] = ## Cascaded attempt to fetch a Merkle hash from the cache layers or the ## backend. This function will never return a `VOID_HASH_KEY` but rather ## some `GetKeyNotFound` or `GetKeyUpdateNeeded` error. @@ -154,9 +115,9 @@ proc getKeyRc*( # The vertex is to be deleted. So is the value key. return err(GetKeyNotFound) - db.getKeyBE(rvid, flags) + ok (?db.db.getKeyBe(rvid, flags), -2) -proc getKey*(db: AristoDbRef; rvid: RootedVertexID): HashKey = +proc getKey*(db: AristoTxRef; rvid: RootedVertexID): HashKey = ## Cascaded attempt to fetch a vertex from the cache layers or the backend. ## The function returns `nil` on error or failure. ## diff --git a/nimbus/db/aristo/aristo_hike.nim b/nimbus/db/aristo/aristo_hike.nim index f627b158e4..07c9d844b7 100644 --- a/nimbus/db/aristo/aristo_hike.nim +++ b/nimbus/db/aristo/aristo_hike.nim @@ -62,7 +62,7 @@ func legsTo*(hike: Hike; numLegs: int; T: type NibblesBuf): T = # -------- proc step*( - path: NibblesBuf, rvid: RootedVertexID, db: AristoDbRef + path: NibblesBuf, rvid: RootedVertexID, db: AristoTxRef ): Result[(VertexRef, NibblesBuf, VertexID), AristoError] = # Fetch next vertex let (vtx, _) = db.getVtxRc(rvid).valueOr: @@ -102,7 +102,7 @@ proc step*( iterator stepUp*( path: NibblesBuf; # Partial path root: VertexID; # Start vertex - db: AristoDbRef; # Database + db: AristoTxRef; # Database ): Result[VertexRef, AristoError] = ## For the argument `path`, iterate over the logest possible path in the ## argument database `db`. @@ -124,7 +124,7 @@ iterator stepUp*( proc hikeUp*( path: NibblesBuf; # Partial path root: VertexID; # Start vertex - db: AristoDbRef; # Database + db: AristoTxRef; # Database leaf: Opt[VertexRef]; hike: var Hike; ): Result[void,(VertexID,AristoError)] = @@ -171,7 +171,7 @@ proc hikeUp*( proc hikeUp*( lty: LeafTie; - db: AristoDbRef; + db: AristoTxRef; leaf: Opt[VertexRef]; hike: var Hike ): Result[void,(VertexID,AristoError)] = @@ -181,7 +181,7 @@ proc hikeUp*( proc hikeUp*( path: openArray[byte]; root: VertexID; - db: AristoDbRef; + db: AristoTxRef; leaf: Opt[VertexRef]; hike: var Hike ): Result[void,(VertexID,AristoError)] = @@ -191,7 +191,7 @@ proc hikeUp*( proc hikeUp*( path: Hash32; root: VertexID; - db: AristoDbRef; + db: AristoTxRef; leaf: Opt[VertexRef]; hike: var Hike ): Result[void,(VertexID,AristoError)] = diff --git a/nimbus/db/aristo/aristo_init/memory_only.nim b/nimbus/db/aristo/aristo_init/memory_only.nim index 6bff63a391..b55320b42b 100644 --- a/nimbus/db/aristo/aristo_init/memory_only.nim +++ b/nimbus/db/aristo/aristo_init/memory_only.nim @@ -53,11 +53,15 @@ proc init*( ): T = ## Memory backend constructor. ## - when B is VoidBackendRef: - AristoDbRef(top: LayerRef.init()) - elif B is MemBackendRef: - AristoDbRef(top: LayerRef.init(), backend: memoryBackend()) + let db = + when B is VoidBackendRef: + AristoDbRef(txRef: AristoTxRef(layer: LayerRef.init())) + + elif B is MemBackendRef: + AristoDbRef(txRef: AristoTxRef(layer: LayerRef.init()), backend: memoryBackend()) + db.txRef.db = db + db proc init*( T: type AristoDbRef; # Target type diff --git a/nimbus/db/aristo/aristo_init/persistent.nim b/nimbus/db/aristo/aristo_init/persistent.nim index 7934b4425e..e6a6f14a26 100644 --- a/nimbus/db/aristo/aristo_init/persistent.nim +++ b/nimbus/db/aristo/aristo_init/persistent.nim @@ -51,12 +51,16 @@ proc newAristoRdbDbRef( be.closeFn(eradicate = false) return err(rc.error) rc.value - ok((AristoDbRef( - top: LayerRef(vTop: vTop), + db = (AristoDbRef( + txRef: AristoTxRef(layer: LayerRef(vTop: vTop)), backend: be, accLeaves: LruCache[Hash32, VertexRef].init(ACC_LRU_SIZE), stoLeaves: LruCache[Hash32, VertexRef].init(ACC_LRU_SIZE), - ), oCfs)) + ), oCfs) + + db[0].txRef.db = db[0] # TODO evaluate if this cyclic ref is worth the convenience + + ok(db) # ------------------------------------------------------------------------------ # Public database constuctors, destructor diff --git a/nimbus/db/aristo/aristo_layers.nim b/nimbus/db/aristo/aristo_layers.nim index 742caf3eb2..d9b685f5b4 100644 --- a/nimbus/db/aristo/aristo_layers.nim +++ b/nimbus/db/aristo/aristo_layers.nim @@ -11,10 +11,11 @@ {.push raises: [].} import - std/[enumerate, sequtils, sets, tables], + std/[sets, tables], eth/common/hashes, results, - ./aristo_desc + ./aristo_desc, + ../../utils/mergeutils # ------------------------------------------------------------------------------ # Private functions @@ -25,87 +26,46 @@ func dup(sTab: Table[RootedVertexID,VertexRef]): Table[RootedVertexID,VertexRef] for (k,v) in sTab.pairs: result[k] = v.dup -# ------------------------------------------------------------------------------ -# Public getters: lazy value lookup for read only versions -# ------------------------------------------------------------------------------ - -func vTop*(db: AristoDbRef): VertexID = - db.top.vTop - -# ------------------------------------------------------------------------------ -# Public getters/helpers -# ------------------------------------------------------------------------------ - -func nLayersVtx*(db: AristoDbRef): int = - ## Number of vertex ID/vertex entries on the cache layers. This is an upper - ## bound for the number of effective vertex ID mappings held on the cache - ## layers as there might be duplicate entries for the same vertex ID on - ## different layers. - ## - db.stack.mapIt(it.sTab.len).foldl(a + b, db.top.sTab.len) - -func nLayersKey*(db: AristoDbRef): int = - ## Number of vertex ID/key entries on the cache layers. This is an upper - ## bound for the number of effective vertex ID mappingss held on the cache - ## layers as there might be duplicate entries for the same vertex ID on - ## different layers. - ## - db.stack.mapIt(it.kMap.len).foldl(a + b, db.top.kMap.len) - # ------------------------------------------------------------------------------ # Public functions: getter variants # ------------------------------------------------------------------------------ -func layersGetVtx*(db: AristoDbRef; rvid: RootedVertexID): Opt[(VertexRef, int)] = +func layersGetVtx*(db: AristoTxRef; rvid: RootedVertexID): Opt[(VertexRef, int)] = ## Find a vertex on the cache layers. An `ok()` result might contain a ## `nil` vertex if it is stored on the cache that way. ## - db.top.sTab.withValue(rvid, item): - return Opt.some((item[], 0)) - - for i, w in enumerate(db.rstack): + for w, level in db.rstack: w.sTab.withValue(rvid, item): - return Opt.some((item[], i + 1)) + return Opt.some((item[], level)) Opt.none((VertexRef, int)) -func layersGetKey*(db: AristoDbRef; rvid: RootedVertexID): Opt[(HashKey, int)] = +func layersGetKey*(db: AristoTxRef; rvid: RootedVertexID): Opt[(HashKey, int)] = ## Find a hash key on the cache layers. An `ok()` result might contain a void ## hash key if it is stored on the cache that way. ## - db.top.kMap.withValue(rvid, item): - return Opt.some((item[], 0)) - if rvid in db.top.sTab: - return Opt.some((VOID_HASH_KEY, 0)) - - for i, w in enumerate(db.rstack): + for w, level in db.rstack: w.kMap.withValue(rvid, item): - return ok((item[], i + 1)) + return ok((item[], level)) if rvid in w.sTab: - return Opt.some((VOID_HASH_KEY, i + 1)) + return Opt.some((VOID_HASH_KEY, level)) Opt.none((HashKey, int)) -func layersGetKeyOrVoid*(db: AristoDbRef; rvid: RootedVertexID): HashKey = +func layersGetKeyOrVoid*(db: AristoTxRef; rvid: RootedVertexID): HashKey = ## Simplified version of `layersGetKey()` (db.layersGetKey(rvid).valueOr (VOID_HASH_KEY, 0))[0] -func layersGetAccLeaf*(db: AristoDbRef; accPath: Hash32): Opt[VertexRef] = - db.top.accLeaves.withValue(accPath, item): - return Opt.some(item[]) - - for w in db.rstack: +func layersGetAccLeaf*(db: AristoTxRef; accPath: Hash32): Opt[VertexRef] = + for w, _ in db.rstack: w.accLeaves.withValue(accPath, item): return Opt.some(item[]) Opt.none(VertexRef) -func layersGetStoLeaf*(db: AristoDbRef; mixPath: Hash32): Opt[VertexRef] = - db.top.stoLeaves.withValue(mixPath, item): - return Opt.some(item[]) - - for w in db.rstack: +func layersGetStoLeaf*(db: AristoTxRef; mixPath: Hash32): Opt[VertexRef] = + for w, _ in db.rstack: w.stoLeaves.withValue(mixPath, item): return Opt.some(item[]) @@ -116,48 +76,47 @@ func layersGetStoLeaf*(db: AristoDbRef; mixPath: Hash32): Opt[VertexRef] = # ------------------------------------------------------------------------------ func layersPutVtx*( - db: AristoDbRef; + db: AristoTxRef; rvid: RootedVertexID; vtx: VertexRef; ) = ## Store a (potentally empty) vertex on the top layer - db.top.sTab[rvid] = vtx - db.top.kMap.del(rvid) + db.layer.sTab[rvid] = vtx + db.layer.kMap.del(rvid) func layersResVtx*( - db: AristoDbRef; + db: AristoTxRef; rvid: RootedVertexID; ) = ## Shortcut for `db.layersPutVtx(vid, VertexRef(nil))`. It is sort of the ## equivalent of a delete function. db.layersPutVtx(rvid, VertexRef(nil)) - func layersPutKey*( - db: AristoDbRef; + db: AristoTxRef; rvid: RootedVertexID; vtx: VertexRef, key: HashKey; ) = ## Store a (potentally void) hash key on the top layer - db.top.sTab[rvid] = vtx - db.top.kMap[rvid] = key + db.layer.sTab[rvid] = vtx + db.layer.kMap[rvid] = key -func layersResKey*(db: AristoDbRef; rvid: RootedVertexID, vtx: VertexRef) = +func layersResKey*(db: AristoTxRef; rvid: RootedVertexID, vtx: VertexRef) = ## Shortcut for `db.layersPutKey(vid, VOID_HASH_KEY)`. It is sort of the ## equivalent of a delete function. db.layersPutVtx(rvid, vtx) -func layersResKeys*(db: AristoDbRef; hike: Hike) = +func layersResKeys*(db: AristoTxRef; hike: Hike) = ## Reset all cached keys along the given hike for i in 1..hike.legs.len: db.layersResKey((hike.root, hike.legs[^i].wp.vid), hike.legs[^i].wp.vtx) -func layersPutAccLeaf*(db: AristoDbRef; accPath: Hash32; leafVtx: VertexRef) = - db.top.accLeaves[accPath] = leafVtx +func layersPutAccLeaf*(db: AristoTxRef; accPath: Hash32; leafVtx: VertexRef) = + db.layer.accLeaves[accPath] = leafVtx -func layersPutStoLeaf*(db: AristoDbRef; mixPath: Hash32; leafVtx: VertexRef) = - db.top.stoLeaves[mixPath] = leafVtx +func layersPutStoLeaf*(db: AristoTxRef; mixPath: Hash32; leafVtx: VertexRef) = + db.layer.stoLeaves[mixPath] = leafVtx # ------------------------------------------------------------------------------ # Public functions @@ -165,64 +124,60 @@ func layersPutStoLeaf*(db: AristoDbRef; mixPath: Hash32; leafVtx: VertexRef) = func isEmpty*(ly: LayerRef): bool = ## Returns `true` if the layer does not contain any changes, i.e. all the - ## tables are empty. The field `txUid` is ignored, here. + ## tables are empty. ly.sTab.len == 0 and ly.kMap.len == 0 and ly.accLeaves.len == 0 and ly.stoLeaves.len == 0 - -func layersMergeOnto*(src: LayerRef; trg: var LayerObj) = - ## Merges the argument `src` into the argument `trg` and returns `trg`. For - ## the result layer, the `txUid` value set to `0`. - ## - trg.txUid = 0 - - for (vid,vtx) in src.sTab.pairs: - trg.sTab[vid] = vtx - trg.kMap.del vid - for (vid,key) in src.kMap.pairs: - trg.kMap[vid] = key +proc mergeAndReset*(trg, src: var Layer) = + ## Merges the argument `src` into the argument `trg` and clears `src`. trg.vTop = src.vTop - for (accPath,leafVtx) in src.accLeaves.pairs: - trg.accLeaves[accPath] = leafVtx - for (mixPath,leafVtx) in src.stoLeaves.pairs: - trg.stoLeaves[mixPath] = leafVtx - -func layersCc*(db: AristoDbRef; level = high(int)): LayerRef = - ## Provide a collapsed copy of layers up to a particular transaction level. - ## If the `level` argument is too large, the maximum transaction level is - ## returned. For the result layer, the `txUid` value set to `0`. - ## - let layers = if db.stack.len <= level: db.stack & @[db.top] - else: db.stack[0 .. level] - - # Set up initial layer (bottom layer) - result = LayerRef( - sTab: layers[0].sTab.dup, # explicit dup for ref values - kMap: layers[0].kMap, - vTop: layers[^1].vTop, - accLeaves: layers[0].accLeaves, - stoLeaves: layers[0].stoLeaves) - - # Consecutively merge other layers on top - for n in 1 ..< layers.len: - for (vid,vtx) in layers[n].sTab.pairs: - result.sTab[vid] = vtx - result.kMap.del vid - for (vid,key) in layers[n].kMap.pairs: - result.kMap[vid] = key - for (accPath,vtx) in layers[n].accLeaves.pairs: - result.accLeaves[accPath] = vtx - for (mixPath,vtx) in layers[n].stoLeaves.pairs: - result.stoLeaves[mixPath] = vtx + + if trg.kMap.len > 0: + # Invalidate cached keys in the lower layer + for vid in src.sTab.keys: + trg.kMap.del vid + + mergeAndReset(trg.sTab, src.sTab) + mergeAndReset(trg.kMap, src.kMap) + mergeAndReset(trg.accLeaves, src.accLeaves) + mergeAndReset(trg.stoLeaves, src.stoLeaves) + +# func layersCc*(db: AristoDbRef; level = high(int)): LayerRef = +# ## Provide a collapsed copy of layers up to a particular transaction level. +# ## If the `level` argument is too large, the maximum transaction level is +# ## returned. +# ## +# let layers = if db.stack.len <= level: db.stack & @[db.top] +# else: db.stack[0 .. level] + +# # Set up initial layer (bottom layer) +# result = LayerRef( +# sTab: layers[0].sTab.dup, # explicit dup for ref values +# kMap: layers[0].kMap, +# vTop: layers[^1].vTop, +# accLeaves: layers[0].accLeaves, +# stoLeaves: layers[0].stoLeaves) + +# # Consecutively merge other layers on top +# for n in 1 ..< layers.len: +# for (vid,vtx) in layers[n].sTab.pairs: +# result.sTab[vid] = vtx +# result.kMap.del vid +# for (vid,key) in layers[n].kMap.pairs: +# result.kMap[vid] = key +# for (accPath,vtx) in layers[n].accLeaves.pairs: +# result.accLeaves[accPath] = vtx +# for (mixPath,vtx) in layers[n].stoLeaves.pairs: +# result.stoLeaves[mixPath] = vtx # ------------------------------------------------------------------------------ # Public iterators # ------------------------------------------------------------------------------ iterator layersWalkVtx*( - db: AristoDbRef; + db: AristoTxRef; seen: var HashSet[VertexID]; ): tuple[rvid: RootedVertexID, vtx: VertexRef] = ## Walk over all `(VertexID,VertexRef)` pairs on the cache layers. Note that @@ -232,18 +187,14 @@ iterator layersWalkVtx*( ## the one with a zero vertex which are othewise skipped by the iterator. ## The `seen` argument must not be modified while the iterator is active. ## - for (rvid,vtx) in db.top.sTab.pairs: - yield (rvid,vtx) - seen.incl rvid.vid - - for w in db.rstack: + for w, _ in db.rstack: for (rvid,vtx) in w.sTab.pairs: if rvid.vid notin seen: yield (rvid,vtx) seen.incl rvid.vid iterator layersWalkVtx*( - db: AristoDbRef; + db: AristoTxRef; ): tuple[rvid: RootedVertexID, vtx: VertexRef] = ## Variant of `layersWalkVtx()`. var seen: HashSet[VertexID] @@ -252,16 +203,12 @@ iterator layersWalkVtx*( iterator layersWalkKey*( - db: AristoDbRef; + db: AristoTxRef; ): tuple[rvid: RootedVertexID, key: HashKey] = ## Walk over all `(VertexID,HashKey)` pairs on the cache layers. Note that ## entries are unsorted. var seen: HashSet[VertexID] - for (rvid,key) in db.top.kMap.pairs: - yield (rvid,key) - seen.incl rvid.vid - - for w in db.rstack: + for w, _ in db.rstack: for (rvid,key) in w.kMap.pairs: if rvid.vid notin seen: yield (rvid,key) diff --git a/nimbus/db/aristo/aristo_merge.nim b/nimbus/db/aristo/aristo_merge.nim index bafc22c6fe..4f507e4d21 100644 --- a/nimbus/db/aristo/aristo_merge.nim +++ b/nimbus/db/aristo/aristo_merge.nim @@ -32,14 +32,14 @@ import proc layersPutLeaf( - db: AristoDbRef, rvid: RootedVertexID, path: NibblesBuf, payload: LeafPayload + db: AristoTxRef, rvid: RootedVertexID, path: NibblesBuf, payload: LeafPayload ): VertexRef = let vtx = VertexRef(vType: Leaf, pfx: path, lData: payload) db.layersPutVtx(rvid, vtx) vtx proc mergePayloadImpl( - db: AristoDbRef, # Database, top layer + db: AristoTxRef, # Database, top layer root: VertexID, # MPT state root path: Hash32, # Leaf item to add to the database leaf: Opt[VertexRef], @@ -171,7 +171,7 @@ proc mergePayloadImpl( # ------------------------------------------------------------------------------ proc mergeAccountRecord*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer accPath: Hash32; # Even nibbled byte path accRec: AristoAccount; # Account data ): Result[bool,AristoError] = @@ -201,7 +201,7 @@ proc mergeAccountRecord*( ok true proc mergeStorageData*( - db: AristoDbRef; # Database, top layer + db: AristoTxRef; # Database, top layer accPath: Hash32; # Needed for accounts payload stoPath: Hash32; # Storage data path (aka key) stoData: UInt256; # Storage data payload value diff --git a/nimbus/db/aristo/aristo_nearby.nim b/nimbus/db/aristo/aristo_nearby.nim index 323ead85cc..29ccfcd9f3 100644 --- a/nimbus/db/aristo/aristo_nearby.nim +++ b/nimbus/db/aristo/aristo_nearby.nim @@ -85,7 +85,7 @@ proc toLeafTiePayload(hike: Hike): (LeafTie,LeafPayload) = proc complete( hike: Hike; # Partially expanded chain of vertices vid: VertexID; # Start ID - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer hikeLenMax: static[int]; # Beware of loops (if any) doLeast: static[bool]; # Direction: *least* or *most* ): Result[Hike,(VertexID,AristoError)] = @@ -124,7 +124,7 @@ proc complete( proc zeroAdjust( hike: Hike; # Partially expanded chain of vertices - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer doLeast: static[bool]; # Direction: *least* or *most* ): Result[Hike,(VertexID,AristoError)] = ## Adjust empty argument path to the first vertex entry to the right. Ths @@ -142,7 +142,7 @@ proc zeroAdjust( else: w.branchNibbleMax n - proc toHike(pfx: NibblesBuf, root: VertexID, db: AristoDbRef): Hike = + proc toHike(pfx: NibblesBuf, root: VertexID, db: AristoTxRef): Hike = when doLeast: discard pfx.pathPfxPad(0).hikeUp(root, db, Opt.none(VertexRef), result) else: @@ -197,7 +197,7 @@ proc zeroAdjust( proc finalise( hike: Hike; # Partially expanded chain of vertices - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer moveRight: static[bool]; # Direction of next vertex ): Result[Hike,(VertexID,AristoError)] = ## Handle some pathological cases after main processing failed @@ -251,7 +251,7 @@ proc finalise( proc nearbyNext( hike: Hike; # Partially expanded chain of vertices - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer hikeLenMax: static[int]; # Beware of loops (if any) moveRight: static[bool]; # Direction of next vertex ): Result[Hike,(VertexID,AristoError)] = @@ -343,7 +343,7 @@ proc nearbyNext( proc nearbyNextLeafTie( lty: LeafTie; # Some `Patricia Trie` path - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer hikeLenMax: static[int]; # Beware of loops (if any) moveRight:static[bool]; # Direction of next vertex ): Result[PathID,(VertexID,AristoError)] = @@ -368,7 +368,7 @@ proc nearbyNextLeafTie( proc right*( hike: Hike; # Partially expanded chain of vertices - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer ): Result[Hike,(VertexID,AristoError)] = ## Extends the maximally extended argument vertices `hike` to the right (i.e. ## with non-decreasing path value). This function does not backtrack if @@ -383,7 +383,7 @@ proc right*( proc right*( lty: LeafTie; # Some `Patricia Trie` path - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer ): Result[LeafTie,(VertexID,AristoError)] = ## Variant of `nearbyRight()` working with a `LeafTie` argument instead ## of a `Hike`. @@ -392,7 +392,7 @@ proc right*( path: ? lty.nearbyNextLeafTie(db, 64, moveRight=true)) iterator rightPairs*( - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer start = low(LeafTie); # Before or at first value ): (LeafTie,LeafPayload) = ## Traverse the sub-trie implied by the argument `start` with increasing @@ -432,7 +432,7 @@ iterator rightPairs*( # End while iterator rightPairsAccount*( - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer start = low(PathID); # Before or at first value ): (PathID,AristoAccount) = ## Variant of `rightPairs()` for accounts tree @@ -440,7 +440,7 @@ iterator rightPairsAccount*( yield (lty.path, pyl.account) iterator rightPairsStorage*( - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer accPath: Hash32; # Account the storage data belong to start = low(PathID); # Before or at first value ): (PathID,UInt256) = @@ -456,7 +456,7 @@ iterator rightPairsStorage*( proc left*( hike: Hike; # Partially expanded chain of vertices - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer ): Result[Hike,(VertexID,AristoError)] = ## Similar to `nearbyRight()`. ## @@ -466,7 +466,7 @@ proc left*( proc left*( lty: LeafTie; # Some `Patricia Trie` path - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer ): Result[LeafTie,(VertexID,AristoError)] = ## Similar to `nearbyRight()` for `LeafTie` argument instead of a `Hike`. ok LeafTie( @@ -474,7 +474,7 @@ proc left*( path: ? lty.nearbyNextLeafTie(db, 64, moveRight=false)) iterator leftPairs*( - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer start = high(LeafTie); # Before or at first value ): (LeafTie,LeafPayload) = ## Traverse the sub-trie implied by the argument `start` with decreasing @@ -523,7 +523,7 @@ iterator leftPairs*( proc rightMissing*( hike: Hike; # Partially expanded chain of vertices - db: AristoDbRef; # Database layer + db: AristoTxRef; # Database layer ): Result[bool,AristoError] = ## Returns `true` if the maximally extended argument vertex `hike` is the ## right most on the hexary trie database. It verifies that there is no more diff --git a/nimbus/db/aristo/aristo_part.nim b/nimbus/db/aristo/aristo_part.nim index 17ecedea0c..1218c0ae90 100644 --- a/nimbus/db/aristo/aristo_part.nim +++ b/nimbus/db/aristo/aristo_part.nim @@ -67,7 +67,7 @@ iterator vkPairs*(ps: PartStateRef): (RootedVertexID, HashKey) = # ------------------------------------------------------------------------------ proc partTwig( - db: AristoDbRef; + db: AristoTxRef; root: VertexID; path: NibblesBuf; ): Result[(seq[seq[byte]],bool), AristoError] = @@ -88,13 +88,13 @@ proc partTwig( err(rc.error) proc partAccountTwig*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; ): Result[(seq[seq[byte]],bool), AristoError] = db.partTwig(VertexID(1), NibblesBuf.fromBytes accPath.data) proc partStorageTwig*( - db: AristoDbRef; + db: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[(seq[seq[byte]],bool), AristoError] = @@ -113,9 +113,7 @@ proc partUntwigPath*( root: Hash32; path: Hash32; ): Result[Opt[seq[byte]],AristoError] = - ## Verify the chain of rlp-encoded nodes and return the payload. If a - ## `Opt.none()` result is returned then the `path` argument does provably - ## not exist relative to `chain`. + ## Variant of `partUntwigGeneric()`. try: let nibbles = NibblesBuf.fromBytes path.data diff --git a/nimbus/db/aristo/aristo_part/part_chain_rlp.nim b/nimbus/db/aristo/aristo_part/part_chain_rlp.nim index c0b70571d2..ff62add735 100644 --- a/nimbus/db/aristo/aristo_part/part_chain_rlp.nim +++ b/nimbus/db/aristo/aristo_part/part_chain_rlp.nim @@ -30,7 +30,7 @@ const # ------------------------------------------------------------------------------ proc chainRlpNodes*( - db: AristoDbRef; + db: AristoTxRef; rvid: RootedVertexID; path: NibblesBuf, chain: var seq[seq[byte]]; diff --git a/nimbus/db/aristo/aristo_part/part_debug.nim b/nimbus/db/aristo/aristo_part/part_debug.nim index 852b149447..d42dec02e9 100644 --- a/nimbus/db/aristo/aristo_part/part_debug.nim +++ b/nimbus/db/aristo/aristo_part/part_debug.nim @@ -76,7 +76,7 @@ proc pp*[T: PrfNode|PrfExtension]( t0: Table[RootedVertexID,(HashKey,T)] t1: Table[HashKey,T] for (key,val) in t.pairs: - ps.db.xMap.withValue(key,rv): + ps.db.db.xMap.withValue(key,rv): t0[rv[]] = (key,val) do: t1[key] = val diff --git a/nimbus/db/aristo/aristo_part/part_desc.nim b/nimbus/db/aristo/aristo_part/part_desc.nim index bdd2d4395d..7ee0305a84 100644 --- a/nimbus/db/aristo/aristo_part/part_desc.nim +++ b/nimbus/db/aristo/aristo_part/part_desc.nim @@ -17,7 +17,7 @@ import type PartStateRef* = ref object of RootRef - db*: AristoDbRef + db*: AristoTxRef core*: Table[VertexID,HashSet[HashKey]] # Existing vertices pureExt*: Table[HashKey,PrfExtension] # On-demand node (usually hidden) byKey*: Table[HashKey,RootedVertexID] # All keys, instead of `kMap[]` @@ -69,7 +69,7 @@ type # Public helpers # ------------------------------------------------------------------------------ -proc init*(T: type PartStateRef; db: AristoDbRef): T = +proc init*(T: type PartStateRef; db: AristoTxRef): T = ## Constructor for a partial database. T(db: db) diff --git a/nimbus/db/aristo/aristo_serialise.nim b/nimbus/db/aristo/aristo_serialise.nim index 0092b7ea55..d013ebb803 100644 --- a/nimbus/db/aristo/aristo_serialise.nim +++ b/nimbus/db/aristo/aristo_serialise.nim @@ -134,7 +134,7 @@ proc digestTo*(node: NodeRef; T: type HashKey): T = wr.finish().digestTo(HashKey) proc serialise*( - db: AristoDbRef; + db: AristoTxRef; root: VertexID; pyl: LeafPayload; ): Result[seq[byte],(VertexID,AristoError)] = diff --git a/nimbus/db/aristo/aristo_tx/tx_frame.nim b/nimbus/db/aristo/aristo_tx/tx_frame.nim index 17610db02e..a67059ccc0 100644 --- a/nimbus/db/aristo/aristo_tx/tx_frame.nim +++ b/nimbus/db/aristo/aristo_tx/tx_frame.nim @@ -17,52 +17,11 @@ import results, ".."/[aristo_desc, aristo_layers] -func isTop*(tx: AristoTxRef): bool - -# ------------------------------------------------------------------------------ -# Private helpers -# ------------------------------------------------------------------------------ - -func getDbDescFromTopTx(tx: AristoTxRef): Result[AristoDbRef,AristoError] = - if not tx.isTop(): - return err(TxNotTopTx) - let db = tx.db - if tx.level != db.stack.len: - return err(TxStackGarbled) - ok db - -proc getTxUid(db: AristoDbRef): uint = - if db.txUidGen == high(uint): - db.txUidGen = 0 - db.txUidGen.inc - db.txUidGen - -# ------------------------------------------------------------------------------ -# Public functions, getters -# ------------------------------------------------------------------------------ - -func txFrameTop*(db: AristoDbRef): Result[AristoTxRef,AristoError] = - ## Getter, returns top level transaction if there is any. - if db.txRef.isNil: - err(TxNoPendingTx) - else: - ok(db.txRef) - -func isTop*(tx: AristoTxRef): bool = - ## Getter, returns `true` if the argument `tx` referes to the current top - ## level transaction. - tx.db.txRef == tx and tx.db.top.txUid == tx.txUid - -func txFrameLevel*(db: AristoDbRef): int = - ## Getter, non-negative nesting level (i.e. number of pending transactions) - if not db.txRef.isNil: - result = db.txRef.level - # ------------------------------------------------------------------------------ # Public functions # ------------------------------------------------------------------------------ -proc txFrameBegin*(db: AristoDbRef): Result[AristoTxRef,AristoError] = +proc txFrameBegin*(db: AristoDbRef, parent: AristoTxRef): Result[AristoTxRef,AristoError] = ## Starts a new transaction. ## ## Example: @@ -73,23 +32,24 @@ proc txFrameBegin*(db: AristoDbRef): Result[AristoTxRef,AristoError] = ## ... continue using db ... ## tx.commit() ## - if db.txFrameLevel != db.stack.len: - return err(TxStackGarbled) - let vTop = db.top.vTop - db.stack.add db.top - db.top = LayerRef( - vTop: vTop, - txUid: db.getTxUid) + let parent = if parent == nil: + db.txRef + else: + parent - db.txRef = AristoTxRef( - db: db, - txUid: db.top.txUid, - parent: db.txRef, - level: db.stack.len) + let + vTop = parent.layer.vTop + layer = LayerRef( + vTop: vTop) - ok db.txRef + ok AristoTxRef( + db: db, + parent: parent, + layer: layer) +proc baseTxFrame*(db: AristoDbRef): AristoTxRef= + db.txRef proc rollback*( tx: AristoTxRef; # Top transaction on database @@ -97,41 +57,20 @@ proc rollback*( ## Given a *top level* handle, this function discards all database operations ## performed for this transactio. The previous transaction is returned if ## there was any. - ## - let db = ? tx.getDbDescFromTopTx() - - # Roll back to previous layer. - db.top = db.stack[^1] - db.stack.setLen(db.stack.len-1) - - db.txRef = db.txRef.parent + # TODO Everyone using this txref should repoint their parent field + tx.layer = LayerRef() ok() proc commit*( tx: AristoTxRef; # Top transaction on database ): Result[void,AristoError] = - ## Given a *top level* handle, this function accepts all database operations - ## performed through this handle and merges it to the previous layer. The - ## previous transaction is returned if there was any. + ## This function pushes all changes done in this frame to its parent ## - let db = ? tx.getDbDescFromTopTx() - - # Pop layer from stack and merge database top layer onto it - let merged = db.stack.pop() - if not merged.isEmpty(): - # No need to update top if we popped an empty layer - if not db.top.isEmpty(): - # Only call `layersMergeOnto()` if layer is empty - db.top.layersMergeOnto merged[] - - # Install `merged` stack top layer and update stack - db.top = merged - - db.txRef = tx.parent - if 0 < db.stack.len: - db.txRef.txUid = db.getTxUid - db.top.txUid = db.txRef.txUid + # TODO Everyone using this txref should repoint their parent field + doAssert tx.parent != nil, "should not commit the base tx" + mergeAndReset(tx.parent.layer[], tx.layer[]) + ok() @@ -147,11 +86,11 @@ proc collapse*( ## if db.txFrameTop.isErr: break ## tx = db.txFrameTop.value ## - let db = ? tx.getDbDescFromTopTx() + # let db = ? tx.getDbDescFromTopTx() - db.top.txUid = 0 - db.stack.setLen(0) - db.txRef = AristoTxRef(nil) + # db.top.txUid = 0 + # db.stack.setLen(0) + # db.txRef = AristoTxRef(nil) ok() # ------------------------------------------------------------------------------ @@ -161,31 +100,31 @@ proc collapse*( iterator walk*(tx: AristoTxRef): (int,AristoTxRef,LayerRef,AristoError) = ## Walk down the transaction stack chain. let db = tx.db - var tx = tx - - block body: - # Start at top layer if tx refers to that - if tx.level == db.stack.len: - if tx.txUid != db.top.txUid: - yield (-1,tx,db.top,TxStackGarbled) - break body - - # Yield the top level - yield (0,tx,db.top,AristoError(0)) - - # Walk down the transaction stack - for level in (tx.level-1).countdown(1): - tx = tx.parent - if tx.isNil or tx.level != level: - yield (-1,tx,LayerRef(nil),TxStackGarbled) - break body - - var layer = db.stack[level] - if tx.txUid != layer.txUid: - yield (-1,tx,layer,TxStackGarbled) - break body - - yield (db.stack.len-level,tx,layer,AristoError(0)) + # var tx = tx + + # block body: + # # Start at top layer if tx refers to that + # if tx.level == db.stack.len: + # if tx.txUid != db.top.txUid: + # yield (-1,tx,db.top,TxStackGarbled) + # break body + + # # Yield the top level + # yield (0,tx,db.top,AristoError(0)) + + # # Walk down the transaction stack + # for level in (tx.level-1).countdown(1): + # tx = tx.parent + # if tx.isNil or tx.level != level: + # yield (-1,tx,LayerRef(nil),TxStackGarbled) + # break body + + # var layer = db.stack[level] + # if tx.txUid != layer.txUid: + # yield (-1,tx,layer,TxStackGarbled) + # break body + + # yield (db.stack.len-level,tx,layer,AristoError(0)) # ------------------------------------------------------------------------------ # End diff --git a/nimbus/db/aristo/aristo_tx/tx_stow.nim b/nimbus/db/aristo/aristo_tx/tx_stow.nim index b141e2bda2..c415b8b19a 100644 --- a/nimbus/db/aristo/aristo_tx/tx_stow.nim +++ b/nimbus/db/aristo/aristo_tx/tx_stow.nim @@ -15,8 +15,7 @@ import results, - ../aristo_delta/delta_merge, - ".."/[aristo_desc, aristo_delta, aristo_layers] + ../[aristo_desc, aristo_delta] # ------------------------------------------------------------------------------ # Private functions @@ -25,10 +24,6 @@ import proc txPersistOk*( db: AristoDbRef; # Database ): Result[void,AristoError] = - if not db.txRef.isNil: - return err(TxPendingTx) - if 0 < db.stack.len: - return err(TxStackGarbled) if not db.deltaPersistentOk(): return err(TxBackendNotWritable) ok() @@ -45,17 +40,7 @@ proc txPersist*( ## ? db.txPersistOk() - if not db.top.isEmpty(): - # Note that `deltaMerge()` will return the `db.top` argument if the - # `db.balancer` is `nil`. Also, the `db.balancer` is read-only. In the - # case that there are no forked peers one can ignore that restriction as - # no balancer is shared. - db.balancer = deltaMerge(db.top, db.balancer) - - # New empty top layer - db.top = LayerRef(vTop: db.balancer.vTop) - - # Merge/move `balancer` into persistent tables (unless missing) + # Merge/move `txRef` into persistent tables (unless missing) ? db.deltaPersistent nxtSid ok() diff --git a/nimbus/db/aristo/aristo_utils.nim b/nimbus/db/aristo/aristo_utils.nim index 912175aa91..3aa4c6cef3 100644 --- a/nimbus/db/aristo/aristo_utils.nim +++ b/nimbus/db/aristo/aristo_utils.nim @@ -24,7 +24,7 @@ import proc toNode*( vtx: VertexRef; # Vertex to convert root: VertexID; # Sub-tree root the `vtx` belongs to - db: AristoDbRef; # Database + db: AristoTxRef; # Database ): Result[NodeRef,seq[VertexID]] = ## Convert argument the vertex `vtx` to a node type. Missing Merkle hash ## keys are searched for on the argument database `db`. diff --git a/nimbus/db/aristo/aristo_vid.nim b/nimbus/db/aristo/aristo_vid.nim index e8afb5691a..74186ec03c 100644 --- a/nimbus/db/aristo/aristo_vid.nim +++ b/nimbus/db/aristo/aristo_vid.nim @@ -20,14 +20,14 @@ import # Public functions # ------------------------------------------------------------------------------ -proc vidFetch*(db: AristoDbRef, n = 1): VertexID = +proc vidFetch*(db: AristoTxRef, n = 1): VertexID = ## Fetch next vertex ID. ## - if db.top.vTop == 0: - db.top.vTop = VertexID(LEAST_FREE_VID) - var ret = db.top.vTop + if db.layer.vTop == 0: + db.layer.vTop = VertexID(LEAST_FREE_VID) + var ret = db.layer.vTop ret.inc - db.top.vTop.inc(n) + db.layer.vTop.inc(n) ret # ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/aristo_walk/persistent.nim b/nimbus/db/aristo/aristo_walk/persistent.nim index 5c6d79fcca..bf2dc11983 100644 --- a/nimbus/db/aristo/aristo_walk/persistent.nim +++ b/nimbus/db/aristo/aristo_walk/persistent.nim @@ -36,7 +36,7 @@ iterator walkVtxBe*[T: RdbBackendRef]( db: AristoDbRef; kinds = {Branch, Leaf}; ): tuple[rvid: RootedVertexID, vtx: VertexRef] = - ## Iterate over filtered RocksDB backend vertices. This function depends on + ## Iterate over RocksDB backend vertices. This function depends on ## the particular backend type name which must match the backend descriptor. for (rvid,vtx) in walkVtxBeImpl[T](db, kinds): yield (rvid,vtx) diff --git a/nimbus/db/aristo/aristo_walk/walk_private.nim b/nimbus/db/aristo/aristo_walk/walk_private.nim index 6933da8077..c961b55189 100644 --- a/nimbus/db/aristo/aristo_walk/walk_private.nim +++ b/nimbus/db/aristo/aristo_walk/walk_private.nim @@ -24,14 +24,14 @@ iterator walkVtxBeImpl*[T]( ): tuple[rvid: RootedVertexID, vtx: VertexRef] = ## Generic iterator when T is VoidBackendRef: - let filter = if db.balancer.isNil: LayerRef() else: db.balancer + let filter = if db.txRef.isNil: LayerRef() else: db.txRef.layer else: mixin walkVtx let filter = LayerRef() - if not db.balancer.isNil: - filter.sTab = db.balancer.sTab # copy table + if not db.txRef.isNil: + filter.sTab = db.txRef.layer.sTab # copy table for (rvid,vtx) in db.backend.T.walkVtx(kinds): if filter.sTab.hasKey rvid: @@ -55,14 +55,14 @@ iterator walkKeyBeImpl*[T]( ): tuple[rvid: RootedVertexID, key: HashKey] = ## Generic iterator when T is VoidBackendRef: - let filter = if db.balancer.isNil: LayerRef() else: db.balancer + let filter = if db.txRef.isNil: LayerRef() else: db.txRef.layer else: mixin walkKey let filter = LayerRef() - if not db.balancer.isNil: - filter.kMap = db.balancer.kMap # copy table + if not db.txRef.isNil: + filter.kMap = db.txRef.layer.kMap # copy table for (rvid,key) in db.backend.T.walkKey: if filter.kMap.hasKey rvid: diff --git a/nimbus/db/core_db/backend/aristo_rocksdb.nim b/nimbus/db/core_db/backend/aristo_rocksdb.nim index 7ad8912538..701e6c5d41 100644 --- a/nimbus/db/core_db/backend/aristo_rocksdb.nim +++ b/nimbus/db/core_db/backend/aristo_rocksdb.nim @@ -167,7 +167,7 @@ proc newAristoRocksDbCoreDbRef*(path: string, opts: DbOptions): CoreDbRef = if opts.rdbKeyCacheSize > 0: # Make sure key cache isn't empty - adb.computeKeys(VertexID(1)).isOkOr: + adb.txRef.computeKeys(VertexID(1)).isOkOr: fatal "Cannot compute root keys", msg=error quit(QuitFailure) diff --git a/nimbus/db/core_db/backend/aristo_trace.nim b/nimbus/db/core_db/backend/aristo_trace.nim index ad3bb9a954..72a37b9278 100644 --- a/nimbus/db/core_db/backend/aristo_trace.nim +++ b/nimbus/db/core_db/backend/aristo_trace.nim @@ -349,7 +349,7 @@ proc kvtTraceRecorder(tr: TraceRecorderRef) = # Update production api tracerApi.get = - proc(kvt: KvtDbRef; key: openArray[byte]): Result[seq[byte],KvtError] = + proc(kvt: KvtTxRef; key: openArray[byte]): Result[seq[byte],KvtError] = const info = KvtApiProfGetFn when CoreDbNoisyCaptJournal: @@ -368,7 +368,7 @@ proc kvtTraceRecorder(tr: TraceRecorderRef) = ok(data) tracerApi.del = - proc(kvt: KvtDbRef; key: openArray[byte]): Result[void,KvtError] = + proc(kvt: KvtTxRef; key: openArray[byte]): Result[void,KvtError] = const info = KvtApiProfDelFn when CoreDbNoisyCaptJournal: @@ -402,7 +402,7 @@ proc kvtTraceRecorder(tr: TraceRecorderRef) = ok() tracerApi.put = - proc(kvt: KvtDbRef; key, data: openArray[byte]): Result[void,KvtError] = + proc(kvt: KvtTxRef; key, data: openArray[byte]): Result[void,KvtError] = const info = KvtApiProfPutFn when CoreDbNoisyCaptJournal: @@ -445,7 +445,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = tr.db.ariApi = tracerApi tracerApi.fetchAccountRecord = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; ): Result[AristoAccount,AristoError] = const info = AristoApiProfFetchAccountRecordFn @@ -467,7 +467,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok accRec tracerApi.fetchStateRoot = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; ): Result[Hash32,AristoError] = const info = AristoApiProfFetchStateRootFn @@ -488,7 +488,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok state tracerApi.fetchStorageData = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[UInt256,AristoError] = @@ -511,7 +511,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok stoData tracerApi.fetchStorageRoot = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; ): Result[Hash32,AristoError] = const info = AristoApiProfFetchStorageRootFn @@ -533,7 +533,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok state tracerApi.deleteAccountRecord = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; ): Result[void,AristoError] = const info = AristoApiProfDeleteAccountRecordFn @@ -569,7 +569,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok() tracerApi.deleteStorageData = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; stoPath: Hash32; ): Result[bool,AristoError] = @@ -605,7 +605,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok emptyTrie tracerApi.deleteStorageTree = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; ): Result[void,AristoError] = const info = AristoApiProfDeleteStorageTreeFn @@ -628,7 +628,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok() tracerApi.mergeAccountRecord = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; accRec: AristoAccount; ): Result[bool,AristoError] = @@ -661,7 +661,7 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = ok updated tracerApi.mergeStorageData = - proc(mpt: AristoDbRef; + proc(mpt: AristoTxRef; accPath: Hash32; stoPath: Hash32; stoData: UInt256; diff --git a/nimbus/db/core_db/base.nim b/nimbus/db/core_db/base.nim index 8c91b9aedf..f253333004 100644 --- a/nimbus/db/core_db/base.nim +++ b/nimbus/db/core_db/base.nim @@ -68,6 +68,18 @@ proc ctx*(db: CoreDbRef): CoreDbCtxRef = ## db.defCtx +proc baseTxFrame*(db: CoreDbRef): CoreDbTxRef = + ## The base tx frame is a staging are for reading and writing "almost" + ## directly from/to the database without using any pending frames - when a + ## transaction created using `beginTxFrame` is committed, it ultimately ends + ## up in the base txframe before being persisted to the database with a + ## persist call. + + CoreDbTxRef( + ctx: db.ctx, + aTx: db.ctx.parent.ariApi.call(baseTxFrame, db.ctx.mpt), + kTx: db.ctx.parent.kvtApi.call(baseTxFrame, db.ctx.kvt)) + # ------------------------------------------------------------------------------ # Public base descriptor methods # ------------------------------------------------------------------------------ @@ -102,17 +114,14 @@ proc persistent*( ## db.setTrackNewApi BasePersistentFn block body: - block: - let rc = CoreDbKvtRef(db.ctx).call(persist, db.ctx.kvt) - if rc.isOk or rc.error == TxPersistDelayed: - # The latter clause is OK: Piggybacking on `Aristo` backend - discard - elif CoreDbKvtRef(db.ctx).call(txFrameLevel, db.ctx.kvt) != 0: - result = err(rc.error.toError($api, TxPending)) - break body - else: - result = err(rc.error.toError $api) - break body + let rc = CoreDbKvtRef(db.ctx).call(persist, db.ctx.kvt) + if rc.isOk or rc.error == TxPersistDelayed: + # The latter clause is OK: Piggybacking on `Aristo` backend + discard + else: + result = err(rc.error.toError $api) + break body + # Having reached here `Aristo` must not fail as both `Kvt` and `Aristo` # are kept in sync. So if there is a legit fail condition it mist be # caught in the previous clause. @@ -121,13 +130,13 @@ proc persistent*( result = ok() db.ifTrackNewApi: debug logTxt, api, elapsed, blockNumber, result -proc stateBlockNumber*(db: CoreDbRef): BlockNumber = - ## Rhis function returns the block number stored with the latest `persist()` +proc stateBlockNumber*(db: CoreDbTxRef): BlockNumber = + ## This function returns the block number stored with the latest `persist()` ## directive. ## db.setTrackNewApi BaseStateBlockNumberFn result = block: - let rc = CoreDbAccRef(db.ctx).call(fetchLastSavedState, db.ctx.mpt) + let rc = db.ctx.parent.ariApi.call(fetchLastSavedState, db.aTx) if rc.isOk: rc.value.serial.BlockNumber else: @@ -169,11 +178,11 @@ proc getKvt*(ctx: CoreDbCtxRef): CoreDbKvtRef = # ----------- KVT --------------- -proc get*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[seq[byte]] = +proc get*(kvt: CoreDbTxRef; key: openArray[byte]): CoreDbRc[seq[byte]] = ## This function always returns a non-empty `seq[byte]` or an error code. kvt.setTrackNewApi KvtGetFn result = block: - let rc = kvt.call(get, kvt.kvt, key) + let rc = kvt.ctx.parent.kvtApi.call(get, kvt.kTx, key) if rc.isOk: ok(rc.value) elif rc.error == GetNotFound: @@ -182,13 +191,13 @@ proc get*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[seq[byte]] = err(rc.error.toError $api) kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result -proc getOrEmpty*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[seq[byte]] = +proc getOrEmpty*(kvt: CoreDbTxRef; key: openArray[byte]): CoreDbRc[seq[byte]] = ## Variant of `get()` returning an empty `seq[byte]` if the key is not found ## on the database. ## kvt.setTrackNewApi KvtGetOrEmptyFn result = block: - let rc = kvt.call(get, kvt.kvt, key) + let rc = kvt.ctx.parent.kvtApi.call(get, kvt.kTx, key) if rc.isOk: ok(rc.value) elif rc.error == GetNotFound: @@ -197,11 +206,11 @@ proc getOrEmpty*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[seq[byte]] = err(rc.error.toError $api) kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result -proc len*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[int] = +proc len*(kvt: CoreDbTxRef; key: openArray[byte]): CoreDbRc[int] = ## This function returns the size of the value associated with `key`. kvt.setTrackNewApi KvtLenFn result = block: - let rc = kvt.call(len, kvt.kvt, key) + let rc = kvt.ctx.parent.kvtApi.call(len, kvt.kTx, key) if rc.isOk: ok(rc.value) elif rc.error == GetNotFound: @@ -210,10 +219,10 @@ proc len*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[int] = err(rc.error.toError $api) kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result -proc del*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[void] = +proc del*(kvt: CoreDbTxRef; key: openArray[byte]): CoreDbRc[void] = kvt.setTrackNewApi KvtDelFn result = block: - let rc = kvt.call(del, kvt.kvt, key) + let rc = kvt.ctx.parent.kvtApi.call(del, kvt.kTx, key) if rc.isOk: ok() else: @@ -221,13 +230,13 @@ proc del*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[void] = kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result proc put*( - kvt: CoreDbKvtRef; + kvt: CoreDbTxRef; key: openArray[byte]; val: openArray[byte]; ): CoreDbRc[void] = kvt.setTrackNewApi KvtPutFn result = block: - let rc = kvt.call(put, kvt.kvt, key, val) + let rc = kvt.ctx.parent.kvtApi.call(put, kvt.kTx, key, val) if rc.isOk: ok() else: @@ -235,21 +244,21 @@ proc put*( kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, val=val.toLenStr, result -proc hasKeyRc*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[bool] = +proc hasKeyRc*(kvt: CoreDbTxRef; key: openArray[byte]): CoreDbRc[bool] = ## For the argument `key` return `true` if `get()` returned a value on ## that argument, `false` if it returned `GetNotFound`, and an error ## otherwise. ## kvt.setTrackNewApi KvtHasKeyRcFn result = block: - let rc = kvt.call(hasKeyRc, kvt.kvt, key) + let rc = kvt.ctx.parent.kvtApi.call(hasKeyRc, kvt.kTx, key) if rc.isOk: ok(rc.value) else: err(rc.error.toError $api) kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result -proc hasKey*(kvt: CoreDbKvtRef; key: openArray[byte]): bool = +proc hasKey*(kvt: CoreDbTxRef; key: openArray[byte]): bool = ## Simplified version of `hasKeyRc` where `false` is returned instead of ## an error. ## @@ -257,7 +266,7 @@ proc hasKey*(kvt: CoreDbKvtRef; key: openArray[byte]): bool = ## `Tables`. ## kvt.setTrackNewApi KvtHasKeyFn - result = kvt.call(hasKeyRc, kvt.kvt, key).valueOr: false + result = kvt.ctx.parent.kvtApi.call(hasKeyRc, kvt.kTx, key).valueOr: false kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result # ------------------------------------------------------------------------------ @@ -274,7 +283,7 @@ proc getAccounts*(ctx: CoreDbCtxRef): CoreDbAccRef = # ----------- accounts --------------- proc proof*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): CoreDbRc[(seq[seq[byte]],bool)] = ## On the accounts MPT, collect the nodes along the `accPath` interpreted as @@ -285,7 +294,7 @@ proc proof*( ## acc.setTrackNewApi AccProofFn result = block: - let rc = acc.call(partAccountTwig, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(partAccountTwig, acc.aTx, accPath) if rc.isOk: ok(rc.value) else: @@ -293,7 +302,7 @@ proc proof*( acc.ifTrackNewApi: debug logTxt, api, elapsed, result proc fetch*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): CoreDbRc[CoreDbAccount] = ## Fetch the account data record for the particular account indexed by @@ -301,7 +310,7 @@ proc fetch*( ## acc.setTrackNewApi AccFetchFn result = block: - let rc = acc.call(fetchAccountRecord, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(fetchAccountRecord, acc.aTx, accPath) if rc.isOk: ok(rc.value) elif rc.error == FetchPathNotFound: @@ -311,7 +320,7 @@ proc fetch*( acc.ifTrackNewApi: debug logTxt, api, elapsed, accPath=($$accPath), result proc delete*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): CoreDbRc[void] = ## Delete the particular account indexed by the key `accPath`. This @@ -319,7 +328,7 @@ proc delete*( ## acc.setTrackNewApi AccDeleteFn result = block: - let rc = acc.call(deleteAccountRecord, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(deleteAccountRecord, acc.aTx, accPath) if rc.isOk: ok() elif rc.error == DelPathNotFound: @@ -331,7 +340,7 @@ proc delete*( debug logTxt, api, elapsed, accPath=($$accPath), result proc clearStorage*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): CoreDbRc[void] = ## Delete all data slots from the storage area associated with the @@ -339,7 +348,7 @@ proc clearStorage*( ## acc.setTrackNewApi AccClearStorageFn result = block: - let rc = acc.call(deleteStorageTree, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(deleteStorageTree, acc.aTx, accPath) if rc.isOk or rc.error in {DelStoRootMissing,DelStoAccMissing}: ok() else: @@ -348,7 +357,7 @@ proc clearStorage*( debug logTxt, api, elapsed, accPath=($$accPath), result proc merge*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; accRec: CoreDbAccount; ): CoreDbRc[void] = @@ -357,7 +366,7 @@ proc merge*( ## acc.setTrackNewApi AccMergeFn result = block: - let rc = acc.call(mergeAccountRecord, acc.mpt, accPath, accRec) + let rc = acc.ctx.parent.ariApi.call(mergeAccountRecord, acc.aTx, accPath, accRec) if rc.isOk: ok() else: @@ -366,14 +375,14 @@ proc merge*( debug logTxt, api, elapsed, accPath=($$accPath), result proc hasPath*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): CoreDbRc[bool] = ## Would be named `contains` if it returned `bool` rather than `Result[]`. ## acc.setTrackNewApi AccHasPathFn result = block: - let rc = acc.call(hasPathAccount, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(hasPathAccount, acc.aTx, accPath) if rc.isOk: ok(rc.value) else: @@ -381,12 +390,12 @@ proc hasPath*( acc.ifTrackNewApi: debug logTxt, api, elapsed, accPath=($$accPath), result -proc getStateRoot*(acc: CoreDbAccRef): CoreDbRc[Hash32] = +proc getStateRoot*(acc: CoreDbTxRef): CoreDbRc[Hash32] = ## This function retrieves the Merkle state hash of the accounts ## column (if available.) acc.setTrackNewApi AccStateFn result = block: - let rc = acc.call(fetchStateRoot, acc.mpt) + let rc = acc.ctx.parent.ariApi.call(fetchStateRoot, acc.aTx) if rc.isOk: ok(rc.value) else: @@ -396,7 +405,7 @@ proc getStateRoot*(acc: CoreDbAccRef): CoreDbRc[Hash32] = # ------------ storage --------------- proc slotProof*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; stoPath: Hash32; ): CoreDbRc[(seq[seq[byte]],bool)] = @@ -412,7 +421,7 @@ proc slotProof*( ## acc.setTrackNewApi AccSlotProofFn result = block: - let rc = acc.call(partStorageTwig, acc.mpt, accPath, stoPath) + let rc = acc.ctx.parent.ariApi.call(partStorageTwig, acc.aTx, accPath, stoPath) if rc.isOk: ok(rc.value) else: @@ -420,14 +429,14 @@ proc slotProof*( acc.ifTrackNewApi: debug logTxt, api, elapsed, result proc slotFetch*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; stoPath: Hash32; ): CoreDbRc[UInt256] = ## Like `fetch()` but with cascaded index `(accPath,slot)`. acc.setTrackNewApi AccSlotFetchFn result = block: - let rc = acc.call(fetchStorageData, acc.mpt, accPath, stoPath) + let rc = acc.ctx.parent.ariApi.call(fetchStorageData, acc.aTx, accPath, stoPath) if rc.isOk: ok(rc.value) elif rc.error == FetchPathNotFound: @@ -439,14 +448,14 @@ proc slotFetch*( stoPath=($$stoPath), result proc slotDelete*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; stoPath: Hash32; ): CoreDbRc[void] = ## Like `delete()` but with cascaded index `(accPath,slot)`. acc.setTrackNewApi AccSlotDeleteFn result = block: - let rc = acc.call(deleteStorageData, acc.mpt, accPath, stoPath) + let rc = acc.ctx.parent.ariApi.call(deleteStorageData, acc.aTx, accPath, stoPath) if rc.isOk or rc.error == DelStoRootMissing: # The second `if` clause is insane but legit: A storage column was # announced for an account but no data have been added, yet. @@ -460,14 +469,14 @@ proc slotDelete*( stoPath=($$stoPath), result proc slotHasPath*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; stoPath: Hash32; ): CoreDbRc[bool] = ## Like `hasPath()` but with cascaded index `(accPath,slot)`. acc.setTrackNewApi AccSlotHasPathFn result = block: - let rc = acc.call(hasPathStorage, acc.mpt, accPath, stoPath) + let rc = acc.ctx.parent.ariApi.call(hasPathStorage, acc.aTx, accPath, stoPath) if rc.isOk: ok(rc.value) else: @@ -477,7 +486,7 @@ proc slotHasPath*( stoPath=($$stoPath), result proc slotMerge*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; stoPath: Hash32; stoData: UInt256; @@ -485,7 +494,7 @@ proc slotMerge*( ## Like `merge()` but with cascaded index `(accPath,slot)`. acc.setTrackNewApi AccSlotMergeFn result = block: - let rc = acc.call(mergeStorageData, acc.mpt, accPath, stoPath, stoData) + let rc = acc.ctx.parent.ariApi.call(mergeStorageData, acc.aTx, accPath, stoPath, stoData) if rc.isOk: ok() else: @@ -495,7 +504,7 @@ proc slotMerge*( stoPath=($$stoPath), stoData, result proc slotStorageRoot*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): CoreDbRc[Hash32] = ## This function retrieves the Merkle state hash of the storage data @@ -504,7 +513,7 @@ proc slotStorageRoot*( ## acc.setTrackNewApi AccSlotStorageRootFn result = block: - let rc = acc.call(fetchStorageRoot, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(fetchStorageRoot, acc.aTx, accPath) if rc.isOk: ok(rc.value) else: @@ -513,7 +522,7 @@ proc slotStorageRoot*( debug logTxt, api, elapsed, accPath=($$accPath), result proc slotStorageEmpty*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): CoreDbRc[bool] = ## This function returns `true` if the storage data column is empty or @@ -521,7 +530,7 @@ proc slotStorageEmpty*( ## acc.setTrackNewApi AccSlotStorageEmptyFn result = block: - let rc = acc.call(hasStorageData, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(hasStorageData, acc.aTx, accPath) if rc.isOk: ok(not rc.value) else: @@ -530,13 +539,13 @@ proc slotStorageEmpty*( debug logTxt, api, elapsed, accPath=($$accPath), result proc slotStorageEmptyOrVoid*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; ): bool = ## Convenience wrapper, returns `true` where `slotStorageEmpty()` would fail. acc.setTrackNewApi AccSlotStorageEmptyOrVoidFn result = block: - let rc = acc.call(hasStorageData, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(hasStorageData, acc.aTx, accPath) if rc.isOk: not rc.value else: @@ -547,7 +556,7 @@ proc slotStorageEmptyOrVoid*( # ------------- other ---------------- proc recast*( - acc: CoreDbAccRef; + acc: CoreDbTxRef; accPath: Hash32; accRec: CoreDbAccount; ): CoreDbRc[Account] = @@ -556,7 +565,7 @@ proc recast*( ## hash (see `slotStorageRoot()` above) is currently unavailable. ## acc.setTrackNewApi AccRecastFn - let rc = acc.call(fetchStorageRoot, acc.mpt, accPath) + let rc = acc.ctx.parent.ariApi.call(fetchStorageRoot, acc.aTx, accPath) result = block: if rc.isOk: ok Account( @@ -574,21 +583,14 @@ proc recast*( # Public transaction related methods # ------------------------------------------------------------------------------ -proc txFrameLevel*(db: CoreDbRef): int = - ## Retrieve transaction level (zero if there is no pending transaction). - ## - db.setTrackNewApi BaseLevelFn - result = CoreDbAccRef(db.ctx).call(txFrameLevel, db.ctx.mpt) - db.ifTrackNewApi: debug logTxt, api, elapsed, result - -proc txFrameBegin*(ctx: CoreDbCtxRef): CoreDbTxRef = +proc txFrameBegin*(ctx: CoreDbCtxRef, parent: CoreDbTxRef): CoreDbTxRef = ## Constructor ## ctx.setTrackNewApi BaseNewTxFn let - kTx = CoreDbKvtRef(ctx).call(txFrameBegin, ctx.kvt).valueOr: + kTx = CoreDbKvtRef(ctx).call(txFrameBegin, ctx.kvt, if parent != nil: parent.kTx else: nil).valueOr: raiseAssert $api & ": " & $error - aTx = CoreDbAccRef(ctx).call(txFrameBegin, ctx.mpt).valueOr: + aTx = CoreDbAccRef(ctx).call(txFrameBegin, ctx.mpt, if parent != nil: parent.aTx else: nil).valueOr: raiseAssert $api & ": " & $error result = ctx.bless CoreDbTxRef(kTx: kTx, aTx: aTx) ctx.ifTrackNewApi: @@ -616,12 +618,12 @@ proc rollback*(tx: CoreDbTxRef) = proc dispose*(tx: CoreDbTxRef) = tx.setTrackNewApi TxDisposeFn: let prvLevel {.used.} = CoreDbAccRef(tx.ctx).call(level, tx.aTx) - if CoreDbAccRef(tx.ctx).call(isTop, tx.aTx): - CoreDbAccRef(tx.ctx).call(rollback, tx.aTx).isOkOr: - raiseAssert $api & ": " & $error - if CoreDbKvtRef(tx.ctx).call(isTop, tx.kTx): - CoreDbKvtRef(tx.ctx).call(rollback, tx.kTx).isOkOr: - raiseAssert $api & ": " & $error + # if CoreDbAccRef(tx.ctx).call(isTop, tx.aTx): + CoreDbAccRef(tx.ctx).call(rollback, tx.aTx).isOkOr: + raiseAssert $api & ": " & $error + # if CoreDbKvtRef(tx.ctx).call(isTop, tx.kTx): + CoreDbKvtRef(tx.ctx).call(rollback, tx.kTx).isOkOr: + raiseAssert $api & ": " & $error tx.ifTrackNewApi: debug logTxt, api, elapsed, prvLevel # ------------------------------------------------------------------------------ diff --git a/nimbus/db/core_db/base/api_tracking.nim b/nimbus/db/core_db/base/api_tracking.nim index 5ebac1c885..cc3cd1b902 100644 --- a/nimbus/db/core_db/base/api_tracking.nim +++ b/nimbus/db/core_db/base/api_tracking.nim @@ -76,7 +76,6 @@ type TxCommitFn = "commit" TxDisposeFn = "dispose" - TxFrameLevelFn = "level" TxRollbackFn = "rollback" TxSaveDisposeFn = "safeDispose" diff --git a/nimbus/db/core_db/base/base_helpers.nim b/nimbus/db/core_db/base/base_helpers.nim index 01b12e8e62..46d43159e7 100644 --- a/nimbus/db/core_db/base/base_helpers.nim +++ b/nimbus/db/core_db/base/base_helpers.nim @@ -53,6 +53,9 @@ proc bless*(ctx: CoreDbCtxRef; dsc: CoreDbTxRef): auto = template kvt*(dsc: CoreDbKvtRef): KvtDbRef = CoreDbCtxRef(dsc).kvt +template kvt*(tx: CoreDbTxRef): KvtDbRef = + tx.ctx.kvt + template ctx*(kvt: CoreDbKvtRef): CoreDbCtxRef = CoreDbCtxRef(kvt) diff --git a/nimbus/db/core_db/base_iterators.nim b/nimbus/db/core_db/base_iterators.nim index e896f15b3c..1289ef87ae 100644 --- a/nimbus/db/core_db/base_iterators.nim +++ b/nimbus/db/core_db/base_iterators.nim @@ -34,9 +34,6 @@ when CoreDbEnableApiTracking: const logTxt = "API" -template dbType(dsc: CoreDbKvtRef | CoreDbAccRef): CoreDbType = - dsc.distinctBase.parent.dbType - # --------------- template call(api: KvtApiRef; fn: untyped; args: varargs[untyped]): untyped = @@ -50,9 +47,6 @@ template call(kvt: CoreDbKvtRef; fn: untyped; args: varargs[untyped]): untyped = # --------------- -template mpt(dsc: CoreDbAccRef): AristoDbRef = - dsc.distinctBase.mpt - template call(api: AristoApiRef; fn: untyped; args: varargs[untyped]): untyped = when CoreDbEnableApiJumpTable: api.fn(args) @@ -70,14 +64,10 @@ template call( # Public iterators # ------------------------------------------------------------------------------ -iterator slotPairs*(acc: CoreDbAccRef; accPath: Hash32): (seq[byte], UInt256) = +iterator slotPairs*(acc: CoreDbTxRef; accPath: Hash32): (seq[byte], UInt256) = acc.setTrackNewApi AccSlotPairsIt - case acc.dbType: - of AristoDbMemory, AristoDbRocks, AristoDbVoid: - for (path,data) in acc.mpt.rightPairsStorage accPath: - yield (acc.call(pathAsBlob, path), data) - of Ooops: - raiseAssert: "Unsupported database type: " & $acc.dbType + for (path,data) in acc.aTx.rightPairsStorage accPath: + yield (acc.ctx.parent.ariApi.call(pathAsBlob, path), data) acc.ifTrackNewApi: debug logTxt, api, elapsed diff --git a/nimbus/db/core_db/core_apps.nim b/nimbus/db/core_db/core_apps.nim index 869a31e922..2a7104aeb4 100644 --- a/nimbus/db/core_db/core_apps.nim +++ b/nimbus/db/core_db/core_apps.nim @@ -36,27 +36,27 @@ type # ------------------------------------------------------------------------------ proc getBlockHeader*( - db: CoreDbRef; + db: CoreDbTxRef; n: BlockNumber; ): Result[Header, string] proc getBlockHeader*( - db: CoreDbRef, + db: CoreDbTxRef, blockHash: Hash32; ): Result[Header, string] proc getBlockHash*( - db: CoreDbRef; + db: CoreDbTxRef; n: BlockNumber; ): Result[Hash32, string] proc addBlockNumberToHashLookup*( - db: CoreDbRef; + db: CoreDbTxRef; blockNumber: BlockNumber; blockHash: Hash32; ) -proc getCanonicalHeaderHash*(db: CoreDbRef): Result[Hash32, string] +proc getCanonicalHeaderHash*(db: CoreDbTxRef): Result[Hash32, string] # ------------------------------------------------------------------------------ # Private helpers @@ -73,17 +73,16 @@ template wrapRlpException(info: static[string]; code: untyped) = # ------------------------------------------------------------------------------ iterator getBlockTransactionData*( - db: CoreDbRef; + db: CoreDbTxRef; txRoot: Hash32; ): seq[byte] = block body: if txRoot == EMPTY_ROOT_HASH: break body - let kvt = db.ctx.getKvt() for idx in 0'u16..blockHash - discard kvt.del(blockNumberToHashKey(blockNum).toOpenArray) + discard db.del(blockNumberToHashKey(blockNum).toOpenArray) # delete blockHash->header, stateRoot->blockNum - discard kvt.del(genericHashKey(blockHash).toOpenArray) + discard db.del(genericHashKey(blockHash).toOpenArray) true proc getTransactionByIndex*( - db: CoreDbRef; + db: CoreDbTxRef; txRoot: Hash32; txIndex: uint16; ): Result[Transaction, string] = const info = "getTransaction()" - let kvt = db.ctx.getKvt() let key = hashIndexKey(txRoot, txIndex) - let txData = kvt.getOrEmpty(key).valueOr: + let txData = db.getOrEmpty(key).valueOr: return err($$error) if txData.len == 0: return err("tx data is empty for root=" & $txRoot & " and index=" & $txIndex) @@ -323,17 +316,16 @@ proc getTransactionByIndex*( return ok(rlp.decode(txData, Transaction)) proc getTransactionCount*( - db: CoreDbRef; + db: CoreDbTxRef; txRoot: Hash32; ): int = const info = "getTransactionCount()" - let kvt = db.ctx.getKvt() var txCount = 0'u16 while true: let key = hashIndexKey(txRoot, txCount) - let yes = kvt.hasKeyRc(key).valueOr: + let yes = db.hasKeyRc(key).valueOr: warn info, txRoot, key, error=($$error) return 0 if yes: @@ -344,7 +336,7 @@ proc getTransactionCount*( doAssert(false, "unreachable") proc getUnclesCount*( - db: CoreDbRef; + db: CoreDbTxRef; ommersHash: Hash32; ): Result[int, string] = const info = "getUnclesCount()" @@ -354,14 +346,14 @@ proc getUnclesCount*( wrapRlpException info: let encodedUncles = block: let key = genericHashKey(ommersHash) - db.ctx.getKvt().get(key.toOpenArray).valueOr: + db.get(key.toOpenArray).valueOr: if error.error != KvtNotFound: warn info, ommersHash, error=($$error) return ok(0) return ok(rlpFromBytes(encodedUncles).listLen) proc getUncles*( - db: CoreDbRef; + db: CoreDbTxRef; ommersHash: Hash32; ): Result[seq[Header], string] = const info = "getUncles()" @@ -371,29 +363,28 @@ proc getUncles*( wrapRlpException info: let encodedUncles = block: let key = genericHashKey(ommersHash) - db.ctx.getKvt().get(key.toOpenArray).valueOr: + db.get(key.toOpenArray).valueOr: if error.error != KvtNotFound: warn info, ommersHash, error=($$error) return ok(default(seq[Header])) return ok(rlp.decode(encodedUncles, seq[Header])) proc persistWithdrawals*( - db: CoreDbRef; + db: CoreDbTxRef; withdrawalsRoot: Hash32; withdrawals: openArray[Withdrawal]; ) = const info = "persistWithdrawals()" if withdrawals.len == 0: return - let kvt = db.ctx.getKvt() for idx, wd in withdrawals: let key = hashIndexKey(withdrawalsRoot, idx.uint16) - kvt.put(key, rlp.encode(wd)).isOkOr: + db.put(key, rlp.encode(wd)).isOkOr: warn info, idx, error=($$error) return proc getWithdrawals*( - db: CoreDbRef; + db: CoreDbTxRef; withdrawalsRoot: Hash32 ): Result[seq[Withdrawal], string] = wrapRlpException "getWithdrawals": @@ -403,7 +394,7 @@ proc getWithdrawals*( return ok(res) proc getTransactions*( - db: CoreDbRef; + db: CoreDbTxRef; txRoot: Hash32 ): Result[seq[Transaction], string] = wrapRlpException "getTransactions": @@ -413,7 +404,7 @@ proc getTransactions*( return ok(res) proc getBlockBody*( - db: CoreDbRef; + db: CoreDbTxRef; header: Header; ): Result[BlockBody, string] = wrapRlpException "getBlockBody": @@ -427,14 +418,14 @@ proc getBlockBody*( return ok(body) proc getBlockBody*( - db: CoreDbRef; + db: CoreDbTxRef; blockHash: Hash32; ): Result[BlockBody, string] = let header = ?db.getBlockHeader(blockHash) db.getBlockBody(header) proc getEthBlock*( - db: CoreDbRef; + db: CoreDbTxRef; hash: Hash32; ): Result[EthBlock, string] = var @@ -443,7 +434,7 @@ proc getEthBlock*( ok(EthBlock.init(move(header), move(blockBody))) proc getEthBlock*( - db: CoreDbRef; + db: CoreDbTxRef; blockNumber: BlockNumber; ): Result[EthBlock, string] = var @@ -454,7 +445,7 @@ proc getEthBlock*( proc getUncleHashes*( - db: CoreDbRef; + db: CoreDbTxRef; blockHashes: openArray[Hash32]; ): Result[seq[Hash32], string] = var res: seq[Hash32] @@ -464,7 +455,7 @@ proc getUncleHashes*( ok(res) proc getUncleHashes*( - db: CoreDbRef; + db: CoreDbTxRef; header: Header; ): Result[seq[Hash32], string] = if header.ommersHash != EMPTY_UNCLE_HASH: @@ -473,59 +464,58 @@ proc getUncleHashes*( wrapRlpException "getUncleHashes": let key = genericHashKey(header.ommersHash) - encodedUncles = db.ctx.getKvt().get(key.toOpenArray).valueOr: + encodedUncles = db.get(key.toOpenArray).valueOr: if error.error != KvtNotFound: warn "getUncleHashes()", ommersHash=header.ommersHash, error=($$error) return ok(default(seq[Hash32])) return ok(rlp.decode(encodedUncles, seq[Header]).mapIt(it.rlpHash)) proc getTransactionKey*( - db: CoreDbRef; + db: CoreDbTxRef; transactionHash: Hash32; ): Result[TransactionKey, string] = wrapRlpException "getTransactionKey": let txKey = transactionHashToBlockKey(transactionHash) - tx = db.ctx.getKvt().get(txKey.toOpenArray).valueOr: + tx = db.get(txKey.toOpenArray).valueOr: if error.error != KvtNotFound: warn "getTransactionKey()", transactionHash, error=($$error) return ok(default(TransactionKey)) return ok(rlp.decode(tx, TransactionKey)) -proc headerExists*(db: CoreDbRef; blockHash: Hash32): bool = +proc headerExists*(db: CoreDbTxRef; blockHash: Hash32): bool = ## Returns True if the header with the given block hash is in our DB. - db.ctx.getKvt().hasKeyRc(genericHashKey(blockHash).toOpenArray).valueOr: + db.hasKeyRc(genericHashKey(blockHash).toOpenArray).valueOr: if error.error != KvtNotFound: warn "headerExists()", blockHash, error=($$error) return false # => true/false proc setHead*( - db: CoreDbRef; + db: CoreDbTxRef; blockHash: Hash32; ): Result[void, string] = let canonicalHeadHash = canonicalHeadHashKey() - db.ctx.getKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(blockHash)).isOkOr: + db.put(canonicalHeadHash.toOpenArray, rlp.encode(blockHash)).isOkOr: return err($$error) ok() proc setHead*( - db: CoreDbRef; + db: CoreDbTxRef; header: Header; writeHeader = false; ): Result[void, string] = var headerHash = rlpHash(header) - let kvt = db.ctx.getKvt() if writeHeader: - kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr: + db.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr: return err($$error) let canonicalHeadHash = canonicalHeadHashKey() - kvt.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr: + db.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr: return err($$error) ok() proc persistReceipts*( - db: CoreDbRef; + db: CoreDbTxRef; receiptsRoot: Hash32; receipts: openArray[Receipt]; ) = @@ -533,14 +523,13 @@ proc persistReceipts*( if receipts.len == 0: return - let kvt = db.ctx.getKvt() for idx, rec in receipts: let key = hashIndexKey(receiptsRoot, idx.uint16) - kvt.put(key, rlp.encode(rec)).isOkOr: + db.put(key, rlp.encode(rec)).isOkOr: warn info, idx, error=($$error) proc getReceipts*( - db: CoreDbRef; + db: CoreDbTxRef; receiptsRoot: Hash32; ): Result[seq[Receipt], string] = wrapRlpException "getReceipts": @@ -550,21 +539,20 @@ proc getReceipts*( return ok(receipts) proc persistScore*( - db: CoreDbRef; + db: CoreDbTxRef; blockHash: Hash32; score: UInt256 ): Result[void, string] = const info = "persistScore" let - kvt = db.ctx.getKvt() scoreKey = blockHashToScoreKey(blockHash) - kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr: + db.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr: return err(info & ": " & $$error) ok() proc persistHeader*( - db: CoreDbRef; + db: CoreDbTxRef; blockHash: Hash32; header: Header; startOfHistory = GENESIS_PARENT_HASH; @@ -572,13 +560,12 @@ proc persistHeader*( const info = "persistHeader" let - kvt = db.ctx.getKvt() isStartOfHistory = header.parentHash == startOfHistory if not isStartOfHistory and not db.headerExists(header.parentHash): return err(info & ": parent header missing number " & $header.number) - kvt.put(genericHashKey(blockHash).toOpenArray, rlp.encode(header)).isOkOr: + db.put(genericHashKey(blockHash).toOpenArray, rlp.encode(header)).isOkOr: return err(info & ": " & $$error) let @@ -599,7 +586,7 @@ proc persistHeader*( ok() proc persistHeaderAndSetHead*( - db: CoreDbRef; + db: CoreDbTxRef; blockHash: Hash32; header: Header; startOfHistory = GENESIS_PARENT_HASH; @@ -621,7 +608,7 @@ proc persistHeaderAndSetHead*( db.setHead(blockHash) proc persistHeaderAndSetHead*( - db: CoreDbRef; + db: CoreDbTxRef; header: Header; startOfHistory = GENESIS_PARENT_HASH; ): Result[void, string] = @@ -629,43 +616,43 @@ proc persistHeaderAndSetHead*( blockHash = header.blockHash db.persistHeaderAndSetHead(blockHash, header, startOfHistory) -proc persistUncles*(db: CoreDbRef, uncles: openArray[Header]): Hash32 = +proc persistUncles*(db: CoreDbTxRef, uncles: openArray[Header]): Hash32 = ## Persists the list of uncles to the database. ## Returns the uncles hash. let enc = rlp.encode(uncles) result = keccak256(enc) - db.ctx.getKvt.put(genericHashKey(result).toOpenArray, enc).isOkOr: + db.put(genericHashKey(result).toOpenArray, enc).isOkOr: warn "persistUncles()", unclesHash=result, error=($$error) return EMPTY_ROOT_HASH -proc safeHeaderHash*(db: CoreDbRef): Hash32 = +proc safeHeaderHash*(db: CoreDbTxRef): Hash32 = db.getHash(safeHashKey()).valueOr(default(Hash32)) -proc safeHeaderHash*(db: CoreDbRef, headerHash: Hash32) = +proc safeHeaderHash*(db: CoreDbTxRef, headerHash: Hash32) = let safeHashKey = safeHashKey() - db.ctx.getKvt.put(safeHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr: + db.put(safeHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr: warn "safeHeaderHash()", safeHashKey, error=($$error) return proc finalizedHeaderHash*( - db: CoreDbRef; + db: CoreDbTxRef; ): Hash32 = db.getHash(finalizedHashKey()).valueOr(default(Hash32)) -proc finalizedHeaderHash*(db: CoreDbRef, headerHash: Hash32) = +proc finalizedHeaderHash*(db: CoreDbTxRef, headerHash: Hash32) = let finalizedHashKey = finalizedHashKey() - db.ctx.getKvt.put(finalizedHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr: + db.put(finalizedHashKey.toOpenArray, rlp.encode(headerHash)).isOkOr: warn "finalizedHeaderHash()", finalizedHashKey, error=($$error) return proc safeHeader*( - db: CoreDbRef; + db: CoreDbTxRef; ): Result[Header, string] = db.getBlockHeader(db.safeHeaderHash) proc finalizedHeader*( - db: CoreDbRef; + db: CoreDbTxRef; ): Result[Header, string] = db.getBlockHeader(db.finalizedHeaderHash) diff --git a/nimbus/db/kvt/kvt_api.nim b/nimbus/db/kvt/kvt_api.nim index 93dbe79ff9..41d0a6754a 100644 --- a/nimbus/db/kvt/kvt_api.nim +++ b/nimbus/db/kvt/kvt_api.nim @@ -41,26 +41,23 @@ type ## Borrowed from `aristo_profile` KvtApiCommitFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.} - KvtApiDelFn* = proc(db: KvtDbRef, + KvtApiDelFn* = proc(db: KvtTxRef, key: openArray[byte]): Result[void,KvtError] {.noRaise.} KvtApiFinishFn* = proc(db: KvtDbRef, eradicate = false) {.noRaise.} KvtApiForgetFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.} - KvtApiGetFn* = proc(db: KvtDbRef, + KvtApiGetFn* = proc(db: KvtTxRef, key: openArray[byte]): Result[seq[byte],KvtError] {.noRaise.} - KvtApiLenFn* = proc(db: KvtDbRef, + KvtApiLenFn* = proc(db: KvtTxRef, key: openArray[byte]): Result[int,KvtError] {.noRaise.} - KvtApiHasKeyRcFn* = proc(db: KvtDbRef, + KvtApiHasKeyRcFn* = proc(db: KvtTxRef, key: openArray[byte]): Result[bool,KvtError] {.noRaise.} - KvtApiIsTopFn* = proc(tx: KvtTxRef): bool {.noRaise.} - KvtApiTxFrameLevelFn* = proc(db: KvtDbRef): int {.noRaise.} - KvtApiPutFn* = proc(db: KvtDbRef, + KvtApiPutFn* = proc(db: KvtTxRef, key, data: openArray[byte]): Result[void,KvtError] {.noRaise.} KvtApiRollbackFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.} KvtApiPersistFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.} KvtApiToKvtDbRefFn* = proc(tx: KvtTxRef): KvtDbRef {.noRaise.} - KvtApiTxFrameBeginFn* = proc(db: KvtDbRef): Result[KvtTxRef,KvtError] {.noRaise.} - KvtApiTxFrameTopFn* = - proc(db: KvtDbRef): Result[KvtTxRef,KvtError] {.noRaise.} + KvtApiTxFrameBeginFn* = proc(db: KvtDbRef, parent: KvtTxRef): Result[KvtTxRef,KvtError] {.noRaise.} + KvtApiBaseTxFrameFn* = proc(db: KvtDbRef): KvtTxRef {.noRaise.} KvtApiRef* = ref KvtApiObj KvtApiObj* = object of RootObj @@ -72,14 +69,12 @@ type get*: KvtApiGetFn len*: KvtApiLenFn hasKeyRc*: KvtApiHasKeyRcFn - isTop*: KvtApiIsTopFn - txFrameLevel*: KvtApiTxFrameLevelFn put*: KvtApiPutFn rollback*: KvtApiRollbackFn persist*: KvtApiPersistFn toKvtDbRef*: KvtApiToKvtDbRefFn txFrameBegin*: KvtApiTxFrameBeginFn - txFrameTop*: KvtApiTxFrameTopFn + baseTxFrame*: KvtApiBaseTxFrameFn KvtApiProfNames* = enum @@ -92,14 +87,12 @@ type KvtApiProfGetFn = "get" KvtApiProfLenFn = "len" KvtApiProfHasKeyRcFn = "hasKeyRc" - KvtApiProfIsTopFn = "isTop" - KvtApiProfLevelFn = "level" KvtApiProfPutFn = "put" KvtApiProfRollbackFn = "rollback" KvtApiProfPersistFn = "persist" KvtApiProfToKvtDbRefFn = "toKvtDbRef" KvtApiProfTxFrameBeginFn = "txFrameBegin" - KvtApiProfTxFrameTopFn = "txFrameTop" + KvtApiProfBaseTxFrameFn = "baseTxFrame" KvtApiProfBeGetKvpFn = "be/getKvp" KvtApiProfBeLenKvpFn = "be/lenKvp" @@ -149,14 +142,13 @@ func init*(api: var KvtApiObj) = api.get = get api.len = len api.hasKeyRc = hasKeyRc - api.isTop = isTop - api.txFrameLevel = txFrameLevel api.put = put api.rollback = rollback api.persist = persist api.toKvtDbRef = toKvtDbRef api.txFrameBegin = txFrameBegin - api.txFrameTop = txFrameTop + api.baseTxFrame = baseTxFrame + when AutoValidateApiHooks: api.validate @@ -226,16 +218,6 @@ func init*( KvtApiProfHasKeyRcFn.profileRunner: result = api.hasKeyRc(a, b) - profApi.isTop = - proc(a: KvtTxRef): auto = - KvtApiProfIsTopFn.profileRunner: - result = api.isTop(a) - - profApi.level = - proc(a: KvtDbRef): auto = - KvtApiProfLevelFn.profileRunner: - result = api.level(a) - profApi.put = proc(a: KvtDbRef; b, c: openArray[byte]): auto = KvtApiProfPutFn.profileRunner: @@ -261,11 +243,6 @@ func init*( KvtApiProfTxFrameBeginFn.profileRunner: result = api.txFrameBegin(a) - profApi.txFrameTop = - proc(a: KvtDbRef): auto = - KvtApiProfTxFrameTopFn.profileRunner: - result = api.txFrameTop(a) - let beDup = be.dup() if beDup.isNil: profApi.be = be diff --git a/nimbus/db/kvt/kvt_delta.nim b/nimbus/db/kvt/kvt_delta.nim index a5e084af3b..318a251e27 100644 --- a/nimbus/db/kvt/kvt_delta.nim +++ b/nimbus/db/kvt/kvt_delta.nim @@ -23,7 +23,7 @@ import # ------------------------------------------------------------------------------ proc deltaPersistentOk*(db: KvtDbRef): bool = - ## Check whether the balancer filter can be merged into the backend + ## Check whether txRef can be merged into the backend not db.backend.isNil @@ -45,17 +45,17 @@ proc deltaPersistent*( return err(FilBackendMissing) # Blind or missing filter - if db.balancer.isNil: + if db.txRef.isNil: return ok() # Store structural single trie entries let writeBatch = ? be.putBegFn() - for k,v in db.balancer.sTab: + for k,v in db.txRef.layer.sTab: be.putKvpFn(writeBatch, k, v) ? be.putEndFn writeBatch - # Done with balancer, all saved to backend - db.balancer = LayerRef(nil) + # Done with txRef, all saved to backend + db.txRef.layer.sTab.clear() ok() diff --git a/nimbus/db/kvt/kvt_delta/delta_merge.nim b/nimbus/db/kvt/kvt_delta/delta_merge.nim index 4cde2d39da..da16a3a2e3 100644 --- a/nimbus/db/kvt/kvt_delta/delta_merge.nim +++ b/nimbus/db/kvt/kvt_delta/delta_merge.nim @@ -9,42 +9,13 @@ # except according to those terms. import - ../kvt_desc + ../kvt_desc, + ../../../utils/mergeutils # ------------------------------------------------------------------------------ # Private functions # ------------------------------------------------------------------------------ -proc layersMergeOnto(src: LayerRef; trg: var LayerObj) = - for (key,val) in src.sTab.pairs: - trg.sTab[key] = val - -# ------------------------------------------------------------------------------ -# Public functions -# ------------------------------------------------------------------------------ - -proc deltaMerge*( - upper: LayerRef; # Think of `top`, `nil` is ok - lower: LayerRef; # Think of `balancer`, `nil` is ok - ): LayerRef = - ## Merge argument `upper` into the `lower` filter instance. - ## - ## Note that the namimg `upper` and `lower` indicate that the filters are - ## stacked and the database access is `upper -> lower -> backend`. - ## - if lower.isNil: - # Degenerate case: `upper` is void - upper - - elif upper.isNil: - # Degenerate case: `lower` is void - lower - - else: - # Can modify `lower` which is the prefered action mode but applies only - # in cases where the `lower` argument is not shared. - layersMergeOnto(upper, lower[]) - lower # ------------------------------------------------------------------------------ # End diff --git a/nimbus/db/kvt/kvt_desc.nim b/nimbus/db/kvt/kvt_desc.nim index 0f007e20e8..b7ad3efedf 100644 --- a/nimbus/db/kvt/kvt_desc.nim +++ b/nimbus/db/kvt/kvt_desc.nim @@ -30,18 +30,15 @@ type ## Transaction descriptor db*: KvtDbRef ## Database descriptor parent*: KvtTxRef ## Previous transaction - txUid*: uint ## Unique ID among transactions - level*: int ## Stack index for this transaction + layer*: LayerRef KvtDbRef* = ref object of RootRef ## Three tier database object supporting distributed instances. - top*: LayerRef ## Database working layer, mutable - stack*: seq[LayerRef] ## Stashed immutable parent layers - balancer*: LayerRef ## Balance out concurrent backend access backend*: BackendRef ## Backend database (may well be `nil`) - txRef*: KvtTxRef ## Latest active transaction - txUidGen*: uint ## Tx-relative unique number generator + txRef*: KvtTxRef + ## Tx holding data scheduled to be written to disk during the next + ## `persist` call # Debugging data below, might go away in future xIdGen*: uint64 @@ -68,19 +65,15 @@ func isValid*(layer: LayerRef): bool = # Public functions, miscellaneous # ------------------------------------------------------------------------------ -# Hash set helper -func hash*(db: KvtDbRef): Hash = - ## Table/KeyedQueue/HashSet mixin - cast[pointer](db).hash +# Don't put in a hash! +func hash*(db: KvtDbRef): Hash {.error.} -# ------------------------------------------------------------------------------ -# Public functions, `dude` related -# ------------------------------------------------------------------------------ - -iterator rstack*(db: KvtDbRef): LayerRef = +iterator rstack*(tx: KvtTxRef): LayerRef = + var tx = tx # Stack in reverse order - for i in 0.. 0: - ac.ledger.slotMerge(acc.toAccountKey, slotKey, value).isOkOr: + ac.txFrame.slotMerge(acc.toAccountKey, slotKey, value).isOkOr: raiseAssert info & $$error # move the overlayStorage to originalStorage, related to EIP2200, EIP1283 acc.originalStorage[slot] = value else: - ac.ledger.slotDelete(acc.toAccountKey, slotKey).isOkOr: + ac.txFrame.slotDelete(acc.toAccountKey, slotKey).isOkOr: if error.error != StoNotFound: raiseAssert info & $$error discard @@ -332,7 +331,7 @@ proc persistStorage(acc: AccountRef, ac: LedgerRef) = # over.. let key = slotKey.data.slotHashToSlotKey - rc = ac.kvt.put(key.toOpenArray, blobify(slot).data) + rc = ac.txFrame.put(key.toOpenArray, blobify(slot).data) if rc.isErr: warn logTxt "persistStorage()", slot, error=($$rc.error) @@ -358,17 +357,16 @@ proc makeDirty(ac: LedgerRef, address: Address, cloneStorage = true): AccountRef # ------------------------------------------------------------------------------ # The LedgerRef is modeled after TrieDatabase for it's transaction style -proc init*(x: typedesc[LedgerRef], db: CoreDbRef, storeSlotHash: bool): LedgerRef = +proc init*(x: typedesc[LedgerRef], db: CoreDbTxRef, storeSlotHash: bool): LedgerRef = new result - result.ledger = db.ctx.getAccounts() - result.kvt = db.ctx.getKvt() + result.txFrame = db result.witnessCache = Table[Address, WitnessData]() result.storeSlotHash = storeSlotHash result.code = typeof(result.code).init(codeLruSize) result.slots = typeof(result.slots).init(slotsLruSize) discard result.beginSavepoint -proc init*(x: typedesc[LedgerRef], db: CoreDbRef): LedgerRef = +proc init*(x: typedesc[LedgerRef], db: CoreDbTxRef): LedgerRef = init(x, db, false) proc getStateRoot*(ac: LedgerRef): Hash32 = @@ -376,7 +374,7 @@ proc getStateRoot*(ac: LedgerRef): Hash32 = doAssert(ac.savePoint.parentSavepoint.isNil) # make sure all cache already committed doAssert(ac.isDirty == false) - ac.ledger.getStateRoot().expect("working database") + ac.txFrame.getStateRoot().expect("working database") proc isTopLevelClean*(ac: LedgerRef): bool = ## Getter, returns `true` if all pending data have been commited. @@ -464,7 +462,7 @@ proc getCode*(ac: LedgerRef, acc.code = if acc.statement.codeHash != EMPTY_CODE_HASH: ac.code.get(acc.statement.codeHash).valueOr: - var rc = ac.kvt.get(contractHashKey(acc.statement.codeHash).toOpenArray) + var rc = ac.txFrame.get(contractHashKey(acc.statement.codeHash).toOpenArray) if rc.isErr: warn logTxt "getCode()", codeHash=acc.statement.codeHash, error=($$rc.error) CodeBytesRef() @@ -494,7 +492,7 @@ proc getCodeSize*(ac: LedgerRef, address: Address): int = # cached and easily accessible in the database layer - this is to prevent # EXTCODESIZE calls from messing up the code cache and thus causing # recomputation of the jump destination table - var rc = ac.kvt.len(contractHashKey(acc.statement.codeHash).toOpenArray) + var rc = ac.txFrame.len(contractHashKey(acc.statement.codeHash).toOpenArray) return rc.valueOr: warn logTxt "getCodeSize()", codeHash=acc.statement.codeHash, error=($$rc.error) @@ -544,7 +542,7 @@ proc contractCollision*(ac: LedgerRef, address: Address): bool = return acc.statement.nonce != 0 or acc.statement.codeHash != EMPTY_CODE_HASH or - not ac.ledger.slotStorageEmptyOrVoid(acc.toAccountKey) + not ac.txFrame.slotStorageEmptyOrVoid(acc.toAccountKey) proc accountExists*(ac: LedgerRef, address: Address): bool = let acc = ac.getAccount(address, false) @@ -630,11 +628,11 @@ proc clearStorage*(ac: LedgerRef, address: Address) = let acc = ac.getAccount(address) acc.flags.incl {Alive, NewlyCreated} - let empty = ac.ledger.slotStorageEmpty(acc.toAccountKey).valueOr: return + let empty = ac.txFrame.slotStorageEmpty(acc.toAccountKey).valueOr: return if not empty: # need to clear the storage from the database first let acc = ac.makeDirty(address, cloneStorage = false) - ac.ledger.clearStorage(acc.toAccountKey).isOkOr: + ac.txFrame.clearStorage(acc.toAccountKey).isOkOr: raiseAssert info & $$error # update caches if acc.originalStorage.isNil.not: @@ -722,10 +720,10 @@ proc persist*(ac: LedgerRef, else: # This one is only necessary unless `persistStorage()` is run which needs # to `merge()` the latest statement as well. - ac.ledger.merge(acc.toAccountKey, acc.statement).isOkOr: + ac.txFrame.merge(acc.toAccountKey, acc.statement).isOkOr: raiseAssert info & $$error of Remove: - ac.ledger.delete(acc.toAccountKey).isOkOr: + ac.txFrame.delete(acc.toAccountKey).isOkOr: if error.error != AccNotFound: raiseAssert info & $$error ac.savePoint.cache.del eAddr @@ -762,14 +760,14 @@ iterator accounts*(ac: LedgerRef): Account = # make sure all savepoint already committed doAssert(ac.savePoint.parentSavepoint.isNil) for _, acc in ac.savePoint.cache: - yield ac.ledger.recast( + yield ac.txFrame.recast( acc.toAccountKey, acc.statement).value iterator pairs*(ac: LedgerRef): (Address, Account) = # make sure all savepoint already committed doAssert(ac.savePoint.parentSavepoint.isNil) for address, acc in ac.savePoint.cache: - yield (address, ac.ledger.recast( + yield (address, ac.txFrame.recast( acc.toAccountKey, acc.statement).value) iterator storage*( @@ -778,8 +776,8 @@ iterator storage*( ): (UInt256, UInt256) = # beware that if the account not persisted, # the storage root will not be updated - for (slotHash, value) in ac.ledger.slotPairs eAddr.toAccountKey: - let rc = ac.kvt.get(slotHashToSlotKey(slotHash).toOpenArray) + for (slotHash, value) in ac.txFrame.slotPairs eAddr.toAccountKey: + let rc = ac.txFrame.get(slotHashToSlotKey(slotHash).toOpenArray) if rc.isErr: warn logTxt "storage()", slotHash, error=($$rc.error) continue @@ -801,7 +799,7 @@ proc getStorageRoot*(ac: LedgerRef, address: Address): Hash32 = # the storage root will not be updated let acc = ac.getAccount(address, false) if acc.isNil: EMPTY_ROOT_HASH - else: ac.ledger.slotStorageRoot(acc.toAccountKey).valueOr: EMPTY_ROOT_HASH + else: ac.txFrame.slotStorageRoot(acc.toAccountKey).valueOr: EMPTY_ROOT_HASH proc update(wd: var WitnessData, acc: AccountRef) = # once the code is touched make sure it doesn't get reset back to false in another update @@ -895,13 +893,13 @@ proc getEthAccount*(ac: LedgerRef, address: Address): Account = return emptyEthAccount ## Convert to legacy object, will throw an assert if that fails - let rc = ac.ledger.recast(acc.toAccountKey, acc.statement) + let rc = ac.txFrame.recast(acc.toAccountKey, acc.statement) if rc.isErr: raiseAssert "getAccount(): cannot convert account: " & $$rc.error rc.value proc getAccountProof*(ac: LedgerRef, address: Address): seq[seq[byte]] = - let accProof = ac.ledger.proof(address.toAccountKey).valueOr: + let accProof = ac.txFrame.proof(address.toAccountKey).valueOr: raiseAssert "Failed to get account proof: " & $$error accProof[0] @@ -911,7 +909,7 @@ proc getStorageProof*(ac: LedgerRef, address: Address, slots: openArray[UInt256] let addressHash = address.toAccountKey - accountExists = ac.ledger.hasPath(addressHash).valueOr: + accountExists = ac.txFrame.hasPath(addressHash).valueOr: raiseAssert "Call to hasPath failed: " & $$error for slot in slots: @@ -922,7 +920,7 @@ proc getStorageProof*(ac: LedgerRef, address: Address, slots: openArray[UInt256] let slotKey = ac.slots.get(slot).valueOr: slot.toBytesBE.keccak256 - slotProof = ac.ledger.slotProof(addressHash, slotKey).valueOr: + slotProof = ac.txFrame.slotProof(addressHash, slotKey).valueOr: if error.aErr == FetchPathNotFound: storageProof.add(@[]) continue diff --git a/nimbus/evm/interpreter/op_handlers/oph_helpers.nim b/nimbus/evm/interpreter/op_handlers/oph_helpers.nim index 2a98b9a5b3..04f0cfd4e7 100644 --- a/nimbus/evm/interpreter/op_handlers/oph_helpers.nim +++ b/nimbus/evm/interpreter/op_handlers/oph_helpers.nim @@ -19,7 +19,7 @@ import ../../evm_errors, ../../types, ../gas_costs, - eth/common/[addresses, base, hashes] + eth/common/[addresses, base] when defined(evmc_enabled): import diff --git a/nimbus/evm/state.nim b/nimbus/evm/state.nim index 9548100ecb..60e9413de3 100644 --- a/nimbus/evm/state.nim +++ b/nimbus/evm/state.nim @@ -79,6 +79,7 @@ proc new*( parent: Header; ## parent header, account sync position blockCtx: BlockContext; com: CommonRef; ## block chain config + txFrame: CoreDbTxRef; tracer: TracerRef = nil, storeSlotHash = false): T = ## Create a new `BaseVMState` descriptor from a parent block header. This @@ -90,7 +91,7 @@ proc new*( ## with the `parent` block header. new result result.init( - ac = LedgerRef.init(com.db, storeSlotHash), + ac = LedgerRef.init(txFrame, storeSlotHash), parent = parent, blockCtx = blockCtx, com = com, @@ -99,7 +100,6 @@ proc new*( proc reinit*(self: BaseVMState; ## Object descriptor parent: Header; ## parent header, account sync pos. blockCtx: BlockContext; - linear: bool ): bool = ## Re-initialise state descriptor. The `LedgerRef` database is ## re-initilaise only if its `getStateRoot()` doe not point to `parent.stateRoot`, @@ -116,9 +116,7 @@ proc reinit*(self: BaseVMState; ## Object descriptor let tracer = self.tracer com = self.com - db = com.db - ac = if linear or self.stateDB.getStateRoot() == parent.stateRoot: self.stateDB - else: LedgerRef.init(db, self.stateDB.storeSlotHash) + ac = self.stateDB flags = self.flags self.init( ac = ac, @@ -132,7 +130,6 @@ proc reinit*(self: BaseVMState; ## Object descriptor proc reinit*(self: BaseVMState; ## Object descriptor parent: Header; ## parent header, account sync pos. header: Header; ## header with tx environment data fields - linear: bool ): bool = ## Variant of `reinit()`. The `parent` argument is used to sync the accounts ## cache and the `header` is used as a container to pass the `timestamp`, @@ -143,7 +140,6 @@ proc reinit*(self: BaseVMState; ## Object descriptor self.reinit( parent = parent, blockCtx = blockCtx(header), - linear = linear ) proc init*( @@ -151,6 +147,7 @@ proc init*( parent: Header; ## parent header, account sync position header: Header; ## header with tx environment data fields com: CommonRef; ## block chain config + txFrame: CoreDbTxRef; tracer: TracerRef = nil, storeSlotHash = false) = ## Variant of `new()` constructor above for in-place initalisation. The @@ -161,7 +158,7 @@ proc init*( ## It requires the `header` argument properly initalised so that for PoA ## networks, the miner address is retrievable via `ecRecover()`. self.init( - ac = LedgerRef.init(com.db, storeSlotHash), + ac = LedgerRef.init(txFrame, storeSlotHash), parent = parent, blockCtx = blockCtx(header), com = com, @@ -172,6 +169,7 @@ proc new*( parent: Header; ## parent header, account sync position header: Header; ## header with tx environment data fields com: CommonRef; ## block chain config + txFrame: CoreDbTxRef; tracer: TracerRef = nil, storeSlotHash = false): T = ## This is a variant of the `new()` constructor above where the `parent` @@ -185,6 +183,7 @@ proc new*( parent = parent, header = header, com = com, + txFrame = txFrame, tracer = tracer, storeSlotHash = storeSlotHash) @@ -192,18 +191,20 @@ proc new*( T: type BaseVMState; header: Header; ## header with tx environment data fields com: CommonRef; ## block chain config + txFrame: CoreDbTxRef; tracer: TracerRef = nil, storeSlotHash = false): EvmResult[T] = ## This is a variant of the `new()` constructor above where the field ## `header.parentHash`, is used to fetch the `parent` Header to be ## used in the `new()` variant, above. - let parent = com.db.getBlockHeader(header.parentHash).valueOr: + let parent = txFrame.getBlockHeader(header.parentHash).valueOr: return err(evmErr(EvmHeaderNotFound)) ok(BaseVMState.new( parent = parent, header = header, com = com, + txFrame = txFrame, tracer = tracer, storeSlotHash = storeSlotHash)) @@ -211,16 +212,18 @@ proc init*( vmState: BaseVMState; header: Header; ## header with tx environment data fields com: CommonRef; ## block chain config + txFrame: CoreDbTxRef; tracer: TracerRef = nil, storeSlotHash = false): bool = ## Variant of `new()` which does not throw an exception on a dangling ## `Header` parent hash reference. - let parent = com.db.getBlockHeader(header.parentHash).valueOr: + let parent = txFrame.getBlockHeader(header.parentHash).valueOr: return false vmState.init( parent = parent, header = header, com = com, + txFrame = txFrame, tracer = tracer, storeSlotHash = storeSlotHash) return true @@ -238,7 +241,7 @@ proc proofOfStake*(vmState: BaseVMState): bool = number: vmState.blockNumber, parentHash: vmState.blockCtx.parentHash, difficulty: vmState.blockCtx.difficulty, - )) + ), vmState.stateDB.txFrame) proc difficultyOrPrevRandao*(vmState: BaseVMState): UInt256 = if vmState.proofOfStake(): @@ -252,7 +255,7 @@ func baseFeePerGas*(vmState: BaseVMState): UInt256 = method getAncestorHash*( vmState: BaseVMState, blockNumber: BlockNumber): Hash32 {.gcsafe, base.} = - let db = vmState.com.db + let db = vmState.stateDB.txFrame let blockHash = db.getBlockHash(blockNumber).valueOr: return default(Hash32) blockHash diff --git a/nimbus/graphql/ethapi.nim b/nimbus/graphql/ethapi.nim index 8aa1c0486c..46bf2be130 100644 --- a/nimbus/graphql/ethapi.nim +++ b/nimbus/graphql/ethapi.nim @@ -152,7 +152,7 @@ proc getStateDB(com: CommonRef, header: Header): LedgerRef {.deprecated: "Ledge ## Retrieves the account db from canonical head ## we don't use accounst_cache here because it's read only operations # TODO the ledger initialized here refers to the base, not the given header! - LedgerRef.init(com.db) + LedgerRef.init(com.db.ctx.txFrameBegin(nil)) # TODO use frame from forkedchain! proc getBlockByNumber(ctx: GraphqlContextRef, number: Node): RespResult = try: @@ -181,7 +181,8 @@ proc getLatestBlock(ctx: GraphqlContextRef): RespResult = ok(headerNode(ctx, header)) proc getTxCount(ctx: GraphqlContextRef, txRoot: Hash32): RespResult = - let txCount = ctx.chainDB.getTransactionCount(txRoot) + # TODO forkedchain! + let txCount = ctx.chainDB.baseTxFrame().getTransactionCount(txRoot) ok(resp(txCount)) proc longNode(val: uint64 | int64): RespResult = @@ -234,17 +235,17 @@ proc resp(data: openArray[byte]): RespResult = ok(resp("0x" & data.toHex)) proc getTotalDifficulty(ctx: GraphqlContextRef, blockHash: Hash32): RespResult = - let score = getScore(ctx.chainDB, blockHash).valueOr: + let score = getScore(ctx.chainDB.baseTxFrame(), blockHash).valueOr: return err("can't get total difficulty") bigIntNode(score) proc getOmmerCount(ctx: GraphqlContextRef, ommersHash: Hash32): RespResult = - let ommers = ?ctx.chainDB.getUnclesCount(ommersHash) + let ommers = ?ctx.chainDB.baseTxFrame().getUnclesCount(ommersHash) ok(resp(ommers)) proc getOmmers(ctx: GraphqlContextRef, ommersHash: Hash32): RespResult = - let uncles = ?ctx.chainDB.getUncles(ommersHash) + let uncles = ?ctx.chainDB.baseTxFrame().getUncles(ommersHash) when false: # EIP 1767 says no ommers == null # but hive test case want empty array [] @@ -256,7 +257,7 @@ proc getOmmers(ctx: GraphqlContextRef, ommersHash: Hash32): RespResult = ok(list) proc getOmmerAt(ctx: GraphqlContextRef, ommersHash: Hash32, index: int): RespResult = - let uncles = ?ctx.chainDB.getUncles(ommersHash) + let uncles = ?ctx.chainDB.baseTxFrame().getUncles(ommersHash) if uncles.len == 0: return ok(respNull()) if index < 0 or index >= uncles.len: @@ -264,20 +265,20 @@ proc getOmmerAt(ctx: GraphqlContextRef, ommersHash: Hash32, index: int): RespRes ok(headerNode(ctx, uncles[index])) proc getTxs(ctx: GraphqlContextRef, header: Header): RespResult = - let txCount = getTransactionCount(ctx.chainDB, header.txRoot) + let txCount = getTransactionCount(ctx.chainDB.baseTxFrame(), header.txRoot) if txCount == 0: return ok(respNull()) var list = respList() var index = 0'u64 - let txList = ?ctx.chainDB.getTransactions(header.txRoot) + let txList = ?ctx.chainDB.baseTxFrame().getTransactions(header.txRoot) for tx in txList: list.add txNode(ctx, tx, index, header.number, header.baseFeePerGas) inc index index = 0'u64 var prevUsed = 0.GasInt - let receiptList = ?ctx.chainDB.getReceipts(header.receiptsRoot) + let receiptList = ?ctx.chainDB.baseTxFrame().getReceipts(header.receiptsRoot) for r in receiptList: let tx = TxNode(list.sons[index]) tx.receipt = r @@ -291,20 +292,20 @@ proc getWithdrawals(ctx: GraphqlContextRef, header: Header): RespResult = if header.withdrawalsRoot.isNone: return ok(respNull()) - let wds = ?ctx.chainDB.getWithdrawals(header.withdrawalsRoot.get) + let wds = ?ctx.chainDB.baseTxFrame().getWithdrawals(header.withdrawalsRoot.get) var list = respList() for wd in wds: list.add wdNode(ctx, wd) ok(list) proc getTxAt(ctx: GraphqlContextRef, header: Header, index: uint64): RespResult = - let tx = ctx.chainDB.getTransactionByIndex(header.txRoot, index.uint16).valueOr: + let tx = ctx.chainDB.baseTxFrame().getTransactionByIndex(header.txRoot, index.uint16).valueOr: return ok(respNull()) let txn = txNode(ctx, tx, index, header.number, header.baseFeePerGas) var i = 0'u64 var prevUsed = 0.GasInt - let receiptList = ?ctx.chainDB.getReceipts(header.receiptsRoot) + let receiptList = ?ctx.chainDB.baseTxFrame().getReceipts(header.receiptsRoot) for r in receiptList: if i == index: let tx = TxNode(txn) @@ -316,8 +317,8 @@ proc getTxAt(ctx: GraphqlContextRef, header: Header, index: uint64): RespResult proc getTxByHash(ctx: GraphqlContextRef, hash: Hash32): RespResult = let - txKey = ?ctx.chainDB.getTransactionKey(hash) - header = ?ctx.chainDB.getBlockHeader(txKey.blockNumber) + txKey = ?ctx.chainDB.baseTxFrame().getTransactionKey(hash) + header = ?ctx.chainDB.baseTxFrame().getBlockHeader(txKey.blockNumber) getTxAt(ctx, header, txKey.index) proc accountNode(ctx: GraphqlContextRef, header: Header, address: Address): RespResult = diff --git a/nimbus/nimbus_execution_client.nim b/nimbus/nimbus_execution_client.nim index 959125b881..4c62c4ae43 100644 --- a/nimbus/nimbus_execution_client.nim +++ b/nimbus/nimbus_execution_client.nim @@ -159,7 +159,7 @@ proc setupMetrics(nimbus: NimbusNode, conf: NimbusConf) = proc preventLoadingDataDirForTheWrongNetwork(db: CoreDbRef; conf: NimbusConf) = let - kvt = db.ctx.getKvt() + kvt = db.baseTxFrame() calculatedId = calcHash(conf.networkId, conf.networkParams) dataDirIdBytes = kvt.get(dataDirIdKey().toOpenArray).valueOr: # an empty database diff --git a/nimbus/nimbus_import.nim b/nimbus/nimbus_import.nim index 4de9fa377d..f5163ec8b7 100644 --- a/nimbus/nimbus_import.nim +++ b/nimbus/nimbus_import.nim @@ -97,7 +97,7 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) = setControlCHook(controlCHandler) let - start = com.db.getSavedStateBlockNumber() + 1 + start = com.db.baseTxFrame().getSavedStateBlockNumber() + 1 chain = com.newChain() (cfg, genesis_validators_root, lastEra1Block, firstSlotAfterMerge) = getMetadata(conf.networkId) diff --git a/nimbus/rpc/rpc_utils.nim b/nimbus/rpc/rpc_utils.nim index b41dbc246d..499e334dc3 100644 --- a/nimbus/rpc/rpc_utils.nim +++ b/nimbus/rpc/rpc_utils.nim @@ -256,7 +256,8 @@ proc createAccessList*(header: Header, args.gas = Opt.some(Quantity DEFAULT_RPC_GAS_CAP) let - vmState = BaseVMState.new(header, com).valueOr: + txFrame = com.db.ctx.txFrameBegin(nil) # TODO wrong txFrame + vmState = BaseVMState.new(header, com, txFrame).valueOr: handleError("failed to create vmstate: " & $error.code) fork = com.toEVMFork(forkDeterminationInfo(header.number, header.timestamp)) sender = args.sender @@ -284,7 +285,8 @@ proc createAccessList*(header: Header, # Apply the transaction with the access list tracer let tracer = AccessListTracer.new(accessList, sender, to, precompiles) - vmState = BaseVMState.new(header, com, tracer).valueOr: + txFrame = com.db.ctx.txFrameBegin(nil) # TODO fix txFrame + vmState = BaseVMState.new(header, com, txFrame, tracer).valueOr: handleError("failed to create vmstate: " & $error.code) res = rpcCallEvm(args, header, com, vmState).valueOr: handleError("failed to call evm: " & $error.code) diff --git a/nimbus/rpc/server_api.nim b/nimbus/rpc/server_api.nim index e761296bba..ed3cb930c5 100644 --- a/nimbus/rpc/server_api.nim +++ b/nimbus/rpc/server_api.nim @@ -41,8 +41,9 @@ func newServerAPI*(c: ForkedChainRef, t: TxPoolRef): ServerAPIRef = ServerAPIRef(com: c.com, chain: c, txPool: t) proc getTotalDifficulty*(api: ServerAPIRef, blockHash: Hash32): UInt256 = - let totalDifficulty = api.com.db.getScore(blockHash).valueOr: - return api.com.db.headTotalDifficulty() + # TODO forkedchain! + let totalDifficulty = api.com.db.baseTxFrame().getScore(blockHash).valueOr: + return api.com.db.baseTxFrame().headTotalDifficulty() return totalDifficulty proc getProof*( @@ -98,11 +99,13 @@ proc headerFromTag(api: ServerAPIRef, blockTag: Opt[BlockTag]): Result[Header, s api.headerFromTag(blockId) proc ledgerFromTag(api: ServerAPIRef, blockTag: BlockTag): Result[LedgerRef, string] = - let header = ?api.headerFromTag(blockTag) - if not api.chain.stateReady(header): - api.chain.replaySegment(header.blockHash) + # TODO avoid loading full header if hash is given + let + header = ?api.headerFromTag(blockTag) + txFrame = api.chain.txFrame(header) - ok(LedgerRef.init(api.com.db)) + # TODO maybe use a new frame derived from txFrame, to protect against abuse? + ok(LedgerRef.init(txFrame)) proc blockFromTag(api: ServerAPIRef, blockTag: BlockTag): Result[Block, string] = if blockTag.kind == bidAlias: @@ -220,9 +223,9 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer, ctx: EthContext) = let blk = api.chain.memoryBlock(header.blockHash) (blk.receipts, blk.blk.transactions) else: - let rcs = chain.db.getReceipts(header.receiptsRoot).valueOr: + let rcs = chain.baseTxFrame.getReceipts(header.receiptsRoot).valueOr: return Opt.some(newSeq[FilterLog](0)) - let txs = chain.db.getTransactions(header.txRoot).valueOr: + let txs = chain.baseTxFrame.getTransactions(header.txRoot).valueOr: return Opt.some(newSeq[FilterLog](0)) (rcs, txs) # Note: this will hit assertion error if number of block transactions @@ -337,17 +340,17 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer, ctx: EthContext) = if blockhash == zeroHash32: # Receipt in database - let txDetails = api.chain.db.getTransactionKey(data).valueOr: + let txDetails = api.chain.baseTxFrame.getTransactionKey(data).valueOr: raise newException(ValueError, "TransactionKey not found") if txDetails.index < 0: return nil let header = api.chain.headerByNumber(txDetails.blockNumber).valueOr: raise newException(ValueError, "Block not found") - let tx = api.chain.db.getTransactionByIndex( + let tx = api.chain.baseTxFrame.getTransactionByIndex( header.txRoot, uint16(txDetails.index)).valueOr: return nil - let receipts = api.chain.db.getReceipts(header.receiptsRoot).valueOr: + let receipts = api.chain.baseTxFrame.getReceipts(header.receiptsRoot).valueOr: return nil for receipt in receipts: let gasUsed = receipt.cumulativeGasUsed - prevGasUsed @@ -533,7 +536,7 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer, ctx: EthContext) = if res.isOk: return populateTransactionObject(res.get().tx, Opt.none(Hash32), Opt.none(uint64)) - let txDetails = api.chain.db.getTransactionKey(txHash).valueOr: + let txDetails = api.chain.baseTxFrame.getTransactionKey(txHash).valueOr: return nil if txDetails.index < 0: let @@ -543,9 +546,9 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer, ctx: EthContext) = return populateTransactionObject(tx, Opt.some(blockHash), Opt.some(txid)) # TODO: include block number - let header = api.chain.db.getBlockHeader(txDetails.blockNumber).valueOr: + let header = api.chain.baseTxFrame.getBlockHeader(txDetails.blockNumber).valueOr: return nil - let tx = api.chain.db.getTransactionByIndex(header.txRoot, uint16(txDetails.index)).valueOr: + let tx = api.chain.baseTxFrame.getTransactionByIndex(header.txRoot, uint16(txDetails.index)).valueOr: return nil return populateTransactionObject( tx, @@ -630,11 +633,11 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer, ctx: EthContext) = receipts = blkdesc.receipts txs = blkdesc.blk.transactions else: - let receiptList = api.chain.db.getReceipts(header.receiptsRoot).valueOr: + let receiptList = api.chain.baseTxFrame.getReceipts(header.receiptsRoot).valueOr: return Opt.none(seq[ReceiptObject]) for receipt in receiptList: receipts.add receipt - txs = api.chain.db.getTransactions(header.txRoot).valueOr: + txs = api.chain.baseTxFrame.getTransactions(header.txRoot).valueOr: return Opt.none(seq[ReceiptObject]) try: diff --git a/nimbus/sync/handlers/eth.nim b/nimbus/sync/handlers/eth.nim index 476d63792c..d0f42b659a 100644 --- a/nimbus/sync/handlers/eth.nim +++ b/nimbus/sync/handlers/eth.nim @@ -61,7 +61,8 @@ proc successorHeader(db: CoreDbRef, skip = 0'u): Opt[Header] = let offset = 1 + skip.BlockNumber if h.number <= (not 0.BlockNumber) - offset: - let header = db.getBlockHeader(h.number + offset).valueOr: + # TODO why is this using base db? + let header = db.baseTxFrame().getBlockHeader(h.number + offset).valueOr: return Opt.none(Header) return Opt.some(header) Opt.none(Header) @@ -71,7 +72,8 @@ proc ancestorHeader(db: CoreDbRef, skip = 0'u): Opt[Header] = let offset = 1 + skip.BlockNumber if h.number >= offset: - let header = db.getBlockHeader(h.number - offset).valueOr: + # TODO why is this using base db? + let header = db.baseTxFrame().getBlockHeader(h.number - offset).valueOr: return Opt.none(Header) return Opt.some(header) Opt.none(Header) @@ -79,10 +81,10 @@ proc ancestorHeader(db: CoreDbRef, proc blockHeader(db: CoreDbRef, b: BlockHashOrNumber): Opt[Header] = let header = if b.isHash: - db.getBlockHeader(b.hash).valueOr: + db.baseTxFrame().getBlockHeader(b.hash).valueOr: return Opt.none(Header) else: - db.getBlockHeader(b.number).valueOr: + db.baseTxFrame().getBlockHeader(b.number).valueOr: return Opt.none(Header) Opt.some(header) @@ -305,7 +307,8 @@ method getStatus*(ctx: EthWireRef): Result[EthState, string] forkId = com.forkId(bestBlock.number, bestBlock.timestamp) return ok(EthState( - totalDifficulty: db.headTotalDifficulty, + # TODO forkedChain + totalDifficulty: db.baseTxFrame().headTotalDifficulty, genesisHash: com.genesisHash, bestBlockHash: bestBlock.blockHash, forkId: ChainForkId( @@ -321,11 +324,12 @@ method getReceipts*(ctx: EthWireRef, let db = ctx.db var list: seq[seq[Receipt]] for blockHash in hashes: - let header = db.getBlockHeader(blockHash).valueOr: + # TODO forkedChain + let header = db.baseTxFrame().getBlockHeader(blockHash).valueOr: list.add @[] trace "handlers.getReceipts: blockHeader not found", blockHash continue - let receiptList = ?db.getReceipts(header.receiptsRoot) + let receiptList = ?db.baseTxFrame().getReceipts(header.receiptsRoot) list.add receiptList return ok(list) @@ -356,7 +360,8 @@ method getBlockBodies*(ctx: EthWireRef, let db = ctx.db var list: seq[BlockBody] for blockHash in hashes: - let body = db.getBlockBody(blockHash).valueOr: + # TODO forkedChain + let body = db.baseTxFrame().getBlockBody(blockHash).valueOr: list.add BlockBody() trace "handlers.getBlockBodies: blockBody not found", blockHash continue diff --git a/nimbus/tracer.nim b/nimbus/tracer.nim index 0a241178c5..af4b8127f5 100644 --- a/nimbus/tracer.nim +++ b/nimbus/tracer.nim @@ -72,7 +72,7 @@ proc init( com: CommonRef; topHeader: Header; ): T = - let header = com.db.getBlockHeader(topHeader.parentHash).expect("top header parent exists") + let header = com.db.baseTxFrame().getBlockHeader(topHeader.parentHash).expect("top header parent exists") T.init(com, header.stateRoot) proc activate(cc: CaptCtxRef): CaptCtxRef {.discardable.} = @@ -111,7 +111,7 @@ proc toJson(receipt: Receipt): JsonNode = result["status"] = %receipt.status proc dumpReceiptsImpl( - chainDB: CoreDbRef; + chainDB: CoreDbTxRef; header: Header; ): JsonNode = result = newJArray() @@ -168,7 +168,7 @@ proc traceTransactionImpl( let tracerInst = newLegacyTracer(tracerFlags) cc = activate CaptCtxRef.init(com, header) - vmState = BaseVMState.new(header, com, storeSlotHash = true).valueOr: return newJNull() + vmState = BaseVMState.new(header, com, com.db.baseTxFrame(), storeSlotHash = true).valueOr: return newJNull() stateDb = vmState.stateDB defer: cc.release() @@ -197,14 +197,12 @@ proc traceTransactionImpl( before.captureAccount(stateDb, miner, minerName) stateDb.persist() stateDiff["beforeRoot"] = %(stateDb.getStateRoot().toHex) - discard com.db.ctx.getAccounts.getStateRoot() # lazy hashing! stateCtx = CaptCtxRef.init(com, stateDb.getStateRoot()) let rc = vmState.processTransaction(tx, sender, header) gasUsed = if rc.isOk: rc.value else: 0 if idx.uint64 == txIndex: - discard com.db.ctx.getAccounts.getStateRoot() # lazy hashing! after.captureAccount(stateDb, sender, senderName) after.captureAccount(stateDb, recipient, recipientName) after.captureAccount(stateDb, miner, minerName) @@ -216,7 +214,7 @@ proc traceTransactionImpl( # internal transactions: let cx = activate stateCtx - ldgBefore = LedgerRef.init(com.db, storeSlotHash = true) + ldgBefore = LedgerRef.init(com.db.baseTxFrame(), storeSlotHash = true) defer: cx.release() for idx, acc in tracedAccountsPairs(tracerInst): @@ -249,7 +247,7 @@ proc dumpBlockStateImpl( # only need a stack dump when scanning for internal transaction address captureFlags = {DisableMemory, DisableStorage, EnableAccount} tracerInst = newLegacyTracer(captureFlags) - vmState = BaseVMState.new(header, com, tracerInst, storeSlotHash = true).valueOr: + vmState = BaseVMState.new(header, com, com.db.baseTxFrame(), tracerInst, storeSlotHash = true).valueOr: return newJNull() miner = vmState.coinbase() @@ -258,7 +256,7 @@ proc dumpBlockStateImpl( var before = newJArray() after = newJArray() - stateBefore = LedgerRef.init(com.db, storeSlotHash = true) + stateBefore = LedgerRef.init(com.db.baseTxFrame(), storeSlotHash = true) for idx, tx in blk.transactions: let sender = tx.recoverSender().expect("valid signature") @@ -313,7 +311,7 @@ proc traceBlockImpl( cc = activate CaptCtxRef.init(com, header) tracerInst = newLegacyTracer(tracerFlags) # Tracer needs a database where the reverse slot hash table has been set up - vmState = BaseVMState.new(header, com, tracerInst, storeSlotHash = true).valueOr: + vmState = BaseVMState.new(header, com, com.db.baseTxFrame(), tracerInst, storeSlotHash = true).valueOr: return newJNull() defer: cc.release() @@ -369,7 +367,7 @@ proc dumpMemoryDB*(node: JsonNode, cpt: CoreDbCaptRef) = n[k.toHex(false)] = %v node["state"] = n -proc dumpReceipts*(chainDB: CoreDbRef, header: Header): JsonNode = +proc dumpReceipts*(chainDB: CoreDbTxRef, header: Header): JsonNode = chainDB.dumpReceiptsImpl header proc traceTransaction*( diff --git a/nimbus/transaction/call_evm.nim b/nimbus/transaction/call_evm.nim index 3c9b9f6abf..15c4ca30ba 100644 --- a/nimbus/transaction/call_evm.nim +++ b/nimbus/transaction/call_evm.nim @@ -35,12 +35,14 @@ proc rpcCallEvm*(args: TransactionArgs, gasLimit: 0.GasInt, ## ??? baseFeePerGas: Opt.none UInt256, ## ??? ) - let vmState = ? BaseVMState.new(topHeader, com) - let params = ? toCallParams(vmState, args, globalGasCap, header.baseFeePerGas) - var dbTx = com.db.ctx.txFrameBegin() + var dbTx = com.db.ctx.txFrameBegin(nil) # TODO use matching header frame defer: dbTx.dispose() # always dispose state changes + let vmState = ? BaseVMState.new(topHeader, com, dbTx) + let params = ? toCallParams(vmState, args, globalGasCap, header.baseFeePerGas) + + ok(runComputation(params, CallResult)) proc rpcCallEvm*(args: TransactionArgs, @@ -50,7 +52,7 @@ proc rpcCallEvm*(args: TransactionArgs, const globalGasCap = 0 # TODO: globalGasCap should configurable by user let params = ? toCallParams(vmState, args, globalGasCap, header.baseFeePerGas) - var dbTx = com.db.ctx.txFrameBegin() + var dbTx = com.db.ctx.txFrameBegin(nil) # TODO provide db tx defer: dbTx.dispose() # always dispose state changes ok(runComputation(params, CallResult)) @@ -65,7 +67,11 @@ proc rpcEstimateGas*(args: TransactionArgs, gasLimit: 0.GasInt, ## ??? baseFeePerGas: Opt.none UInt256, ## ??? ) - let vmState = ? BaseVMState.new(topHeader, com) + + var dbTx = com.db.ctx.txFrameBegin(nil) # TODO header state + defer: dbTx.dispose() # always dispose state changes + + let vmState = ? BaseVMState.new(topHeader, com, dbTx) let fork = vmState.fork let txGas = GasInt gasFees[fork][GasTransaction] # txGas always 21000, use constants? var params = ? toCallParams(vmState, args, gasCap, header.baseFeePerGas) @@ -75,8 +81,6 @@ proc rpcEstimateGas*(args: TransactionArgs, hi : GasInt = GasInt args.gas.get(0.Quantity) cap: GasInt - var dbTx = com.db.ctx.txFrameBegin() - defer: dbTx.dispose() # always dispose state changes # Determine the highest gas limit can be used during the estimation. if hi < txGas: diff --git a/nimbus/utils/debug.nim b/nimbus/utils/debug.nim index f360256e07..3cd30091f6 100644 --- a/nimbus/utils/debug.nim +++ b/nimbus/utils/debug.nim @@ -105,7 +105,7 @@ proc debug*(vms: BaseVMState): string = proc `$`(x: ChainId): string = $int(x) -proc `$`(acl: AccessList): string = +proc `$`(acl: transactions.AccessList): string = if acl.len == 0: return "zero length" diff --git a/tests/macro_assembler.nim b/tests/macro_assembler.nim index 25ec65a862..375c58dfcc 100644 --- a/tests/macro_assembler.nim +++ b/tests/macro_assembler.nim @@ -279,7 +279,7 @@ proc initVMEnv*(network: string): BaseVMState = gasLimit: 100_000 ) - BaseVMState.new(parent, header, com) + BaseVMState.new(parent, header, com, com.db.baseTxFrame()) proc verifyAsmResult(vmState: BaseVMState, boa: Assembler, asmResult: DebugCallResult): bool = let com = vmState.com @@ -326,7 +326,7 @@ proc verifyAsmResult(vmState: BaseVMState, boa: Assembler, asmResult: DebugCallR stateDB.persist() let - al = com.db.ctx.getAccounts() + al = com.db.baseTxFrame() accPath = keccak256(codeAddress.data) for kv in boa.storage: diff --git a/tests/replay/undump_blocks_gz.nim b/tests/replay/undump_blocks_gz.nim index d300e51f77..17d5679d06 100644 --- a/tests/replay/undump_blocks_gz.nim +++ b/tests/replay/undump_blocks_gz.nim @@ -43,7 +43,7 @@ proc dumpBlocksEndNl*: string = proc dumpBlocksListNl*(header: Header; body: BlockBody): string = dumpBlocksList(header, body) & "\n" -proc dumpBlocksBeginNl*(db: CoreDbRef; +proc dumpBlocksBeginNl*(db: CoreDbTxRef; headers: openArray[Header]): string = if headers[0].number == 1'u64: let @@ -57,7 +57,7 @@ proc dumpBlocksBeginNl*(db: CoreDbRef; result &= dumpBlocksBegin(headers) & "\n" -proc dumpBlocksNl*(db: CoreDbRef; headers: openArray[Header]; +proc dumpBlocksNl*(db: CoreDbTxRef; headers: openArray[Header]; bodies: openArray[BlockBody]): string = ## Add this below the line `transaction.commit()` in the function ## `p2p/chain/persist_blocks.persistBlocksImpl()`: diff --git a/tests/test_aristo.nim b/tests/test_aristo.nim index b2adcfb2a7..1740f1510f 100644 --- a/tests/test_aristo.nim +++ b/tests/test_aristo.nim @@ -24,7 +24,7 @@ import ./test_aristo/test_portal_proof, ./test_aristo/test_compute, ./test_aristo/[ - test_balancer, test_helpers, test_samples_xx, test_tx, + test_helpers, test_samples_xx, test_tx, undump_accounts, undump_storages] const diff --git a/tests/test_aristo/test_balancer.nim b/tests/test_aristo/test_balancer.nim deleted file mode 100644 index fffce5ab8d..0000000000 --- a/tests/test_aristo/test_balancer.nim +++ /dev/null @@ -1,307 +0,0 @@ -# Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or -# distributed except according to those terms. - -## Aristo (aka Patricia) DB records distributed backend access test. -## - -import - eth/common, - results, - unittest2, - ../../nimbus/db/opts, - ../../nimbus/db/core_db/backend/aristo_rocksdb, - ../../nimbus/db/aristo/[ - aristo_check, - aristo_debug, - aristo_desc, - aristo_get, - aristo_persistent, - aristo_tx], - ../replay/xcheck, - ./test_helpers - -type - LeafQuartet = - array[0..3, seq[LeafTiePayload]] - - DbTriplet = - array[0..2, AristoDbRef] - -const - testRootVid = VertexID(2) - ## Need to reconfigure for the test, root ID 1 cannot be deleted as a trie - -# ------------------------------------------------------------------------------ -# Private debugging helpers -# ------------------------------------------------------------------------------ - -proc dump(pfx: string; dx: varargs[AristoDbRef]): string = - if 0 < dx.len: - result = "\n " - var - pfx = pfx - qfx = "" - if pfx.len == 0: - (pfx,qfx) = ("[","]") - elif 1 < dx.len: - pfx = pfx & "#" - for n in 0 ..< dx.len: - let n1 = n + 1 - result &= pfx - if 1 < dx.len: - result &= $n1 - result &= qfx & "\n " & dx[n].pp(backendOk=true) & "\n" - if n1 < dx.len: - result &= " ==========\n " - -proc dump(dx: varargs[AristoDbRef]): string {.used.} = - "".dump dx - -# ------------------------------------------------------------------------------ -# Private helpers -# ------------------------------------------------------------------------------ - -iterator quadripartite(td: openArray[ProofTrieData]): LeafQuartet = - ## ... - var collect: seq[seq[LeafTiePayload]] - - for w in td: - let lst = w.kvpLst.mapRootVid testRootVid - - if lst.len < 8: - if 2 < collect.len: - yield [collect[0], collect[1], collect[2], lst] - collect.setLen(0) - else: - collect.add lst - else: - if collect.len == 0: - let a = lst.len div 4 - yield [lst[0 ..< a], lst[a ..< 2*a], lst[2*a ..< 3*a], lst[3*a .. ^1]] - else: - if collect.len == 1: - let a = lst.len div 3 - yield [collect[0], lst[0 ..< a], lst[a ..< 2*a], lst[a .. ^1]] - elif collect.len == 2: - let a = lst.len div 2 - yield [collect[0], collect[1], lst[0 ..< a], lst[a .. ^1]] - else: - yield [collect[0], collect[1], collect[2], lst] - collect.setLen(0) - -proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[AristoDbRef,AristoError] = - let db = block: - if 0 < rdbPath.len: - let (dbOpts, cfOpts) = DbOptions.init().toRocksDb() - let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init(), dbOpts, cfOpts, []) - xCheckRc rc.error == 0: - result = err(rc.error) - rc.value()[0] - else: - AristoDbRef.init MemBackendRef - - # Set failed `xCheck()` error result - result = err(AristoError 1) - - # Fill backend - block: - let report = db.mergeList w[0] - if report.error != 0: - db.finish(eradicate=true) - xCheck report.error == 0 - let rc = db.persist() - xCheckRc rc.error == 0: - result = err(rc.error) - - let dx = db - -# ---------------------- - -proc cleanUp(dx: var AristoDbRef) = - if not dx.isNil: - dx.finish(eradicate=true) - dx.reset - -# ---------------------- - -proc isDbEq(a, b: LayerRef; db: AristoDbRef; noisy = true): bool = - ## Verify that argument filter `a` has the same effect on the - ## physical/unfiltered backend of `db` as argument filter `b`. - if a.isNil: - return b.isNil - if b.isNil: - return false - if unsafeAddr(a[]) != unsafeAddr(b[]): - if a.kMap.getOrVoid((testRootVid, testRootVid)) != - b.kMap.getOrVoid((testRootVid, testRootVid)) or - a.vTop != b.vTop: - return false - - # Void entries may differ unless on physical backend - var (aTab, bTab) = (a.sTab, b.sTab) - if aTab.len < bTab.len: - aTab.swap bTab - for (vid,aVtx) in aTab.pairs: - let bVtx = bTab.getOrVoid vid - bTab.del vid - - if aVtx != bVtx: - if aVtx.isValid and bVtx.isValid: - return false - # The valid one must match the backend data - let rc = db.getVtxUbe vid - if rc.isErr: - return false - let vtx = if aVtx.isValid: aVtx else: bVtx - if vtx != rc.value: - return false - - elif not vid.isValid and not bTab.hasKey vid: - let rc = db.getVtxUbe vid - if rc.isOk: - return false # Exists on backend but missing on `bTab[]` - elif rc.error != GetKeyNotFound: - return false # general error - - if 0 < bTab.len: - noisy.say "***", "not dbEq:", "bTabLen=", bTab.len - return false - - # Similar for `kMap[]` - var (aMap, bMap) = (a.kMap, b.kMap) - if aMap.len < bMap.len: - aMap.swap bMap - for (vid,aKey) in aMap.pairs: - let bKey = bMap.getOrVoid vid - bMap.del vid - - if aKey != bKey: - if aKey.isValid and bKey.isValid: - return false - # The valid one must match the backend data - let rc = db.getKeyUbe(vid, {}) - if rc.isErr: - return false - let key = if aKey.isValid: aKey else: bKey - if key != rc.value[0]: - return false - - elif not vid.isValid and not bMap.hasKey vid: - let rc = db.getKeyUbe(vid, {}) - if rc.isOk: - return false # Exists on backend but missing on `bMap[]` - elif rc.error != GetKeyNotFound: - return false # general error - - if 0 < bMap.len: - noisy.say "***", "not dbEq:", " bMapLen=", bMap.len - return false - - true - -# ---------------------- - -proc checkBeOk( - dx: AristoDbRef; - forceCache = false; - noisy = true; - ): bool = - ## .. - let rc = dx.checkBE() - xCheckRc rc.error == (0,0): - noisy.say "***", "db checkBE failed" - true - -# ------------------------------------------------------------------------------ -# Public test function -# ------------------------------------------------------------------------------ - -proc testBalancer*( - noisy: bool; - list: openArray[ProofTrieData]; - rdbPath: string; # Rocks DB storage directory - ): bool = - var n = 0 - for w in list.quadripartite: - n.inc - - # Resulting clause (11) filters from `aristo/README.md` example - # which will be used in the second part of the tests - var - c11Filter1 = LayerRef(nil) - c11Filter3 = LayerRef(nil) - - # Work through clauses (8)..(11) from `aristo/README.md` example - block: - - # Clause (8) from `aristo/README.md` example - var - dx = block: - let rc = dbTriplet(w, rdbPath) - xCheckRc rc.error == 0 - rc.value - db1 = dx - defer: - dx.cleanUp() - - when false: # or true: - noisy.say "*** testDistributedAccess (1)", "n=", n # , dx.dump - - # Clause (9) from `aristo/README.md` example - block: - let rc = db1.persist() - xCheckRc rc.error == 0 - xCheck db1.balancer == LayerRef(nil) - - # Check/verify backends - block: - let ok = dx.checkBeOk(noisy=noisy) - xCheck ok: - noisy.say "*** testDistributedAccess (4)", "n=", n - - # Capture filters from clause (11) - c11Filter1 = db1.balancer - - # Clean up - dx.cleanUp() - - # ---------- - - # Work through clauses (12)..(15) from `aristo/README.md` example - block: - var - dy = block: - let rc = dbTriplet(w, rdbPath) - xCheckRc rc.error == 0 - rc.value - db1 = dy - defer: - dy.cleanUp() - - # Clause (14) from `aristo/README.md` check - let c11Fil1_eq_db1RoFilter = c11Filter1.isDbEq(db1.balancer, db1, noisy) - xCheck c11Fil1_eq_db1RoFilter: - noisy.say "*** testDistributedAccess (7)", "n=", n, - "db1".dump(db1), - "" - - # Check/verify backends - block: - let ok = dy.checkBeOk(noisy=noisy) - xCheck ok - - when false: # or true: - noisy.say "*** testDistributedAccess (9)", "n=", n # , dy.dump - - true - -# ------------------------------------------------------------------------------ -# End -# ------------------------------------------------------------------------------ diff --git a/tests/test_aristo/test_compute.nim b/tests/test_aristo/test_compute.nim index 239f0222ba..faf63dfdd2 100644 --- a/tests/test_aristo/test_compute.nim +++ b/tests/test_aristo/test_compute.nim @@ -79,19 +79,20 @@ suite "Aristo compute": test "Add and delete entries " & $n: let db = AristoDbRef.init VoidBackendRef + txFrame = db.txRef root = VertexID(1) for (k, v, r) in sample: checkpoint("k = " & k.toHex & ", v = " & $v) check: - db.mergeAccountRecord(k, v) == Result[bool, AristoError].ok(true) + txFrame.mergeAccountRecord(k, v) == Result[bool, AristoError].ok(true) # Check state against expected value - let w = db.computeKey((root, root)).expect("no errors") + let w = txFrame.computeKey((root, root)).expect("no errors") check r == w.to(Hash32) - let rc = db.check + let rc = txFrame.check check rc == typeof(rc).ok() # Reverse run deleting entries @@ -103,29 +104,30 @@ suite "Aristo compute": deletedKeys.incl k # Check state against expected value - let w = db.computeKey((root, root)).value.to(Hash32) + let w = txFrame.computeKey((root, root)).value.to(Hash32) check r == w check: - db.deleteAccountRecord(k).isOk + txFrame.deleteAccountRecord(k).isOk - let rc = db.check + let rc = txFrame.check check rc == typeof(rc).ok() test "Pre-computed key": # TODO use mainnet genesis in this test? let db = AristoDbRef.init MemBackendRef + txFrame = db.txRef root = VertexID(1) for (k, v, r) in samples[^1]: check: - db.mergeAccountRecord(k, v) == Result[bool, AristoError].ok(true) + txFrame.mergeAccountRecord(k, v) == Result[bool, AristoError].ok(true) check db.txPersist(1).isOk() - check db.computeKeys(root).isOk() + check txFrame.computeKeys(root).isOk() - let w = db.computeKey((root, root)).value.to(Hash32) + let w = txFrame.computeKey((root, root)).value.to(Hash32) check w == samples[^1][^1][2] diff --git a/tests/test_aristo/test_helpers.nim b/tests/test_aristo/test_helpers.nim index 7f73bb17cc..6c06c2ea53 100644 --- a/tests/test_aristo/test_helpers.nim +++ b/tests/test_aristo/test_helpers.nim @@ -49,7 +49,7 @@ func to(a: NodeKey; T: type PathID): T = proc pp*( w: ProofTrieData; rootID: VertexID; - db: AristoDbRef; + db: AristoTxRef; indent = 4; ): string = let @@ -65,13 +65,13 @@ proc pp*( result &= "])" proc pp*(w: ProofTrieData; indent = 4): string = - var db = AristoDbRef() + var db = AristoTxRef() w.pp(VertexID(1), db, indent) proc pp*( w: openArray[ProofTrieData]; rootID: VertexID; - db: AristoDbRef; + db: AristoTxRef; indent = 4): string = let pfx = indent.toPfx "[" & w.mapIt(it.pp(rootID, db, indent + 1)).join("," & pfx & " ") & "]" @@ -80,7 +80,7 @@ proc pp*(w: openArray[ProofTrieData]; indent = 4): string = let pfx = indent.toPfx "[" & w.mapIt(it.pp(indent + 1)).join("," & pfx & " ") & "]" -proc pp*(ltp: LeafTiePayload; db: AristoDbRef): string = +proc pp*(ltp: LeafTiePayload; db: AristoTxRef): string = "(" & ltp.leafTie.pp(db) & "," & ltp.payload.pp(db) & ")" # ---------- @@ -208,11 +208,6 @@ proc schedStow*( db: AristoDbRef; # Database ): Result[void,AristoError] = ## Context based scheduled persistent/non-persistent storage. - let - layersMeter = db.nLayersVtx() + db.nLayersKey() - filterMeter = if db.balancer.isNil: 0 - else: db.balancer.sTab.len + db.balancer.kMap.len - persistent = MaxFilterBulk < max(layersMeter, filterMeter) db.persist() # ------------------ diff --git a/tests/test_aristo/test_merge_proof.nim b/tests/test_aristo/test_merge_proof.nim index fad1045907..f172e9dd0c 100644 --- a/tests/test_aristo/test_merge_proof.nim +++ b/tests/test_aristo/test_merge_proof.nim @@ -34,7 +34,7 @@ const proc innerCleanUp(ps: var PartStateRef) = if not ps.isNil: - ps.db.finish(eradicate=true) + ps.db.db.finish(eradicate=true) ps = PartStateRef(nil) # ----------------------- @@ -44,43 +44,43 @@ proc saveToBackend( noisy: bool; debugID: int; ): bool = - var db = tx.to(AristoDbRef) + # var db = tx.to(AristoDbRef) - # Verify context: nesting level must be 2 (i.e. two transactions) - xCheck tx.level == 2 + # # Verify context: nesting level must be 2 (i.e. two transactions) + # xCheck tx.level == 2 - # Commit and hashify the current layer - block: - let rc = tx.commit() - xCheckRc rc.error == 0 + # # Commit and hashify the current layer + # block: + # let rc = tx.commit() + # xCheckRc rc.error == 0 - block: - let rc = db.txFrameTop() - xCheckRc rc.error == 0 - tx = rc.value + # block: + # let rc = db.txFrameTop() + # xCheckRc rc.error == 0 + # tx = rc.value - # Verify context: nesting level must be 1 (i.e. one transaction) - xCheck tx.level == 1 + # # Verify context: nesting level must be 1 (i.e. one transaction) + # xCheck tx.level == 1 - block: - let rc = db.checkBE() - xCheckRc rc.error == (0,0) + # block: + # let rc = db.checkBE() + # xCheckRc rc.error == (0,0) - # Commit and save to backend - block: - let rc = tx.commit() - xCheckRc rc.error == 0 + # # Commit and save to backend + # block: + # let rc = tx.commit() + # xCheckRc rc.error == 0 - block: - let rc = db.txFrameTop() - xCheckErr rc.value.level < 0 # force error + # block: + # let rc = db.txFrameTop() + # xCheckErr rc.value.level < 0 # force error - block: - let rc = db.schedStow() - xCheckRc rc.error == 0 + # block: + # let rc = db.schedStow() + # xCheckRc rc.error == 0 - # Update layers to original level - tx = db.txFrameBegin().value.to(AristoDbRef).txFrameBegin().value + # # Update layers to original level + # tx = db.txFrameBegin().value.to(AristoDbRef).txFrameBegin().value true diff --git a/tests/test_aristo/test_portal_proof.nim b/tests/test_aristo/test_portal_proof.nim index 66591a60b4..3658a6109b 100644 --- a/tests/test_aristo/test_portal_proof.nim +++ b/tests/test_aristo/test_portal_proof.nim @@ -52,7 +52,7 @@ proc createPartDb(ps: PartStateRef; data: seq[seq[byte]]; info: static[string]) proc preLoadAristoDb(jKvp: JsonNode): PartStateRef = const info = "preLoadAristoDb" - let ps = PartStateRef.init AristoDbRef.init() + let ps = PartStateRef.init AristoDbRef.init().txRef # Collect rlp-encodede node blobs var proof: seq[seq[byte]] @@ -199,7 +199,7 @@ proc testCreatePortalProof(node: JsonNode, testStatusIMPL: var TestStatus) {.dep let chain = proof.chain # Create another partial database from tree - let pq = PartStateRef.init AristoDbRef.init() + let pq = PartStateRef.init AristoDbRef.init().txRef pq.createPartDb(chain, info) # Create the same proof again which must result into the same as before @@ -228,7 +228,7 @@ proc testCreatePortalProof(node: JsonNode, testStatusIMPL: var TestStatus) {.dep chain = @[ext] & tail # Create a third partial database from modified proof - let pq = PartStateRef.init AristoDbRef.init() + let pq = PartStateRef.init AristoDbRef.init().txRef pq.createPartDb(chain, info) # Re-create proof again diff --git a/tests/test_aristo/test_tx.nim b/tests/test_aristo/test_tx.nim index 93fe39fc94..341e8dd877 100644 --- a/tests/test_aristo/test_tx.nim +++ b/tests/test_aristo/test_tx.nim @@ -87,35 +87,35 @@ proc rand(td: var PrngDesc; top: int): int = # ----------------------- -proc randomisedLeafs( - db: AristoDbRef; - ltys: HashSet[LeafTie]; - td: var PrngDesc; - ): Result[seq[(LeafTie,RootedVertexID)],(VertexID,AristoError)] = - var lvp: seq[(LeafTie,RootedVertexID)] - for lty in ltys: - var hike: Hike - ?lty.hikeUp(db, Opt.none(VertexRef), hike) - lvp.add (lty,(hike.root, hike.legs[^1].wp.vid)) - - var lvp2 = lvp.sorted( - cmp = proc(a,b: (LeafTie,RootedVertexID)): int = cmp(a[0],b[0])) - if 2 < lvp2.len: - for n in 0 ..< lvp2.len-1: - let r = n + td.rand(lvp2.len - n) - lvp2[n].swap lvp2[r] - ok lvp2 - -proc innerCleanUp(db: var AristoDbRef): bool {.discardable.} = - ## Defer action - if not db.isNil: - let rx = db.txFrameTop() - if rx.isOk: - let rc = rx.value.collapse(commit=false) - xCheckRc rc.error == 0 - db.finish(eradicate=true) - db = AristoDbRef(nil) - true +# proc randomisedLeafs( +# db: AristoTxRef; +# ltys: HashSet[LeafTie]; +# td: var PrngDesc; +# ): Result[seq[(LeafTie,RootedVertexID)],(VertexID,AristoError)] = +# var lvp: seq[(LeafTie,RootedVertexID)] +# for lty in ltys: +# var hike: Hike +# ?lty.hikeUp(db, Opt.none(VertexRef), hike) +# lvp.add (lty,(hike.root, hike.legs[^1].wp.vid)) + +# var lvp2 = lvp.sorted( +# cmp = proc(a,b: (LeafTie,RootedVertexID)): int = cmp(a[0],b[0])) +# if 2 < lvp2.len: +# for n in 0 ..< lvp2.len-1: +# let r = n + td.rand(lvp2.len - n) +# lvp2[n].swap lvp2[r] +# ok lvp2 + +# proc innerCleanUp(db: var AristoTxRef): bool {.discardable.} = +# ## Defer action +# if not db.isNil: +# let rx = db.txFrameTop() +# if rx.isOk: +# let rc = rx.value.collapse(commit=false) +# xCheckRc rc.error == 0 +# db.finish(eradicate=true) +# db = AristoDbRef(nil) +# true # -------------------------------- @@ -127,50 +127,50 @@ proc saveToBackend( ): bool = var db = tx.to(AristoDbRef) - # Verify context: nesting level must be 2 (i.e. two transactions) - xCheck tx.level == 2 + # # Verify context: nesting level must be 2 (i.e. two transactions) + # xCheck tx.level == 2 - block: - let rc = db.checkTop() - xCheckRc rc.error == (0,0) + # block: + # let rc = db.checkTop() + # xCheckRc rc.error == (0,0) - # Commit and hashify the current layer - block: - let rc = tx.commit() - xCheckRc rc.error == 0 + # # Commit and hashify the current layer + # block: + # let rc = tx.commit() + # xCheckRc rc.error == 0 - block: - let rc = db.txFrameTop() - xCheckRc rc.error == 0 - tx = rc.value + # block: + # let rc = db.txFrameTop() + # xCheckRc rc.error == 0 + # tx = rc.value - # Verify context: nesting level must be 1 (i.e. one transaction) - xCheck tx.level == 1 + # # Verify context: nesting level must be 1 (i.e. one transaction) + # xCheck tx.level == 1 - block: - let rc = db.checkBE() - xCheckRc rc.error == (0,0) + # block: + # let rc = db.checkBE() + # xCheckRc rc.error == (0,0) - # Commit and save to backend - block: - let rc = tx.commit() - xCheckRc rc.error == 0 + # # Commit and save to backend + # block: + # let rc = tx.commit() + # xCheckRc rc.error == 0 - block: - let rc = db.txFrameTop() - xCheckErr rc.value.level < 0 # force error + # block: + # let rc = db.txFrameTop() + # xCheckErr rc.value.level < 0 # force error - block: - let rc = db.schedStow() - xCheckRc rc.error == 0 + # block: + # let rc = db.schedStow() + # xCheckRc rc.error == 0 - block: - let rc = db.checkBE() - xCheckRc rc.error == (0,0): - noisy.say "***", "saveToBackend (8)", " debugID=", debugID + # block: + # let rc = db.checkBE() + # xCheckRc rc.error == (0,0): + # noisy.say "***", "saveToBackend (8)", " debugID=", debugID - # Update layers to original level - tx = db.txFrameBegin().value.to(AristoDbRef).txFrameBegin().value + # # Update layers to original level + # tx = db.txFrameBegin().value.to(AristoDbRef).txFrameBegin().value true @@ -184,26 +184,26 @@ proc fwdWalkVerify( ): bool = let nLeafs = leftOver.len - var - leftOver = leftOver - last = LeafTie() - n = 0 - for (key,_) in db.rightPairs low(LeafTie,root): - xCheck key in leftOver: - noisy.say "*** fwdWalkVerify", "id=", n + (nLeafs + 1) * debugID - leftOver.excl key - last = key - n.inc - - # Verify stop condition - if last.root == VertexID(0): - last = low(LeafTie,root) - elif last != high(LeafTie,root): - last = last.next - let rc = last.right db - xCheck rc.isErr - xCheck rc.error[1] == NearbyBeyondRange - xCheck n == nLeafs + # var + # leftOver = leftOver + # last = LeafTie() + # n = 0 + # for (key,_) in db.rightPairs low(LeafTie,root): + # xCheck key in leftOver: + # noisy.say "*** fwdWalkVerify", "id=", n + (nLeafs + 1) * debugID + # leftOver.excl key + # last = key + # n.inc + + # # Verify stop condition + # if last.root == VertexID(0): + # last = low(LeafTie,root) + # elif last != high(LeafTie,root): + # last = last.next + # let rc = last.right db + # xCheck rc.isErr + # xCheck rc.error[1] == NearbyBeyondRange + # xCheck n == nLeafs true @@ -216,26 +216,26 @@ proc revWalkVerify( ): bool = let nLeafs = leftOver.len - var - leftOver = leftOver - last = LeafTie() - n = 0 - for (key,_) in db.leftPairs high(LeafTie,root): - xCheck key in leftOver: - noisy.say "*** revWalkVerify", " id=", n + (nLeafs + 1) * debugID - leftOver.excl key - last = key - n.inc - - # Verify stop condition - if last.root == VertexID(0): - last = high(LeafTie,root) - elif last != low(LeafTie,root): - last = last.prev - let rc = last.left db - xCheck rc.isErr - xCheck rc.error[1] == NearbyBeyondRange - xCheck n == nLeafs + # var + # leftOver = leftOver + # last = LeafTie() + # n = 0 + # for (key,_) in db.leftPairs high(LeafTie,root): + # xCheck key in leftOver: + # noisy.say "*** revWalkVerify", " id=", n + (nLeafs + 1) * debugID + # leftOver.excl key + # last = key + # n.inc + + # # Verify stop condition + # if last.root == VertexID(0): + # last = high(LeafTie,root) + # elif last != low(LeafTie,root): + # last = last.prev + # let rc = last.left db + # xCheck rc.isErr + # xCheck rc.error[1] == NearbyBeyondRange + # xCheck n == nLeafs true diff --git a/tests/test_blockchain_json.nim b/tests/test_blockchain_json.nim index f0a28d4536..9d74901fba 100644 --- a/tests/test_blockchain_json.nim +++ b/tests/test_blockchain_json.nim @@ -58,8 +58,8 @@ proc parseEnv(node: JsonNode): TestEnv = result.network = node["network"].getStr result.pre = node["pre"] -proc rootExists(db: CoreDbRef; root: Hash32): bool = - let state = db.ctx.getAccounts().getStateRoot().valueOr: +proc rootExists(db: CoreDbTxRef; root: Hash32): bool = + let state = db.getStateRoot().valueOr: return false state == root @@ -67,22 +67,22 @@ proc executeCase(node: JsonNode): bool = let env = parseEnv(node) memDB = newCoreDbRef DefaultDbMemory - stateDB = LedgerRef.init(memDB) + stateDB = LedgerRef.init(memDB.baseTxFrame()) config = getChainConfig(env.network) com = CommonRef.new(memDB, nil, config) setupStateDB(env.pre, stateDB) stateDB.persist() - com.db.persistHeaderAndSetHead(env.genesisHeader).isOkOr: + com.db.baseTxFrame().persistHeaderAndSetHead(env.genesisHeader).isOkOr: debugEcho "Failed to put genesis header into database: ", error return false - var c = ForkedChainRef.init(com) + var c = ForkedChainRef.init(com) if c.latestHash != env.genesisHeader.blockHash: debugEcho "Genesis block hash in database is different with expected genesis block hash" return false - + var lastStateRoot = env.genesisHeader.stateRoot for blk in env.blocks: let res = c.importBlock(blk.blk) @@ -100,14 +100,14 @@ proc executeCase(node: JsonNode): bool = c.forkChoice(env.lastBlockHash, env.lastBlockHash).isOkOr: debugEcho error return false - + let headHash = c.latestHash if headHash != env.lastBlockHash: debugEcho "lastestBlockHash mismatch, get: ", headHash, " expect: ", env.lastBlockHash return false - if not memDB.rootExists(lastStateRoot): + if not c.txFrame(headHash).rootExists(lastStateRoot): debugEcho "Last stateRoot not exists" return false diff --git a/tests/test_coredb/test_chainsync.nim b/tests/test_coredb/test_chainsync.nim index b2ee27f438..0ff4de05c5 100644 --- a/tests/test_coredb/test_chainsync.nim +++ b/tests/test_coredb/test_chainsync.nim @@ -147,7 +147,7 @@ proc test_chainSync*( let sayBlocks = 900'u64 chain = com.newChain() - blockOnDb = com.db.getSavedStateBlockNumber() + blockOnDb = com.db.baseTxFrame().getSavedStateBlockNumber() lastBlock = max(1, numBlocks).BlockNumber noisy.initLogging com @@ -203,7 +203,7 @@ proc test_chainSync*( for w in files.undumpBlocks(least = start): let (fromBlock, toBlock) = (w[0].header.number, w[^1].header.number) if fromBlock == 0'u64: - xCheck w[0].header == com.db.getBlockHeader(0'u64).expect("block header exists") + xCheck w[0].header == com.db.baseTxFrame().getBlockHeader(0'u64).expect("block header exists") continue # Process groups of blocks ... diff --git a/tests/test_evm_support.nim b/tests/test_evm_support.nim index ab191372a1..506dee79b4 100644 --- a/tests/test_evm_support.nim +++ b/tests/test_evm_support.nim @@ -355,6 +355,7 @@ proc runTestOverflow() = header, header, com, + com.db.baseTxFrame() ) s.stateDB.setCode(codeAddress, @data) diff --git a/tests/test_forked_chain.nim b/tests/test_forked_chain.nim index dd74d3c0e2..514c0090b2 100644 --- a/tests/test_forked_chain.nim +++ b/tests/test_forked_chain.nim @@ -55,7 +55,7 @@ proc makeBlk(com: CommonRef, number: BlockNumber, parentBlk: Block): Block = amount: 1, ) - let ledger = LedgerRef.init(com.db) + let ledger = LedgerRef.init(com.db.baseTxFrame()) for wd in wds: ledger.addBalance(wd.address, wd.weiAmount) @@ -91,440 +91,443 @@ proc makeBlk(com: CommonRef, number: BlockNumber, parentBlk: Block, extraData: b blk proc headHash(c: CommonRef): Hash32 = - c.db.getCanonicalHead().expect("canonical head exists").blockHash + # TODO c.db.getCanonicalHead().expect("canonical head exists").blockHash + discard func blockHash(x: Block): Hash32 = x.header.blockHash proc wdWritten(com: CommonRef, blk: Block): int = - if blk.header.withdrawalsRoot.isSome: - com.db.getWithdrawals(blk.header.withdrawalsRoot.get). - expect("withdrawals exists").len - else: - 0 + # if blk.header.withdrawalsRoot.isSome: + # com.db.getWithdrawals(blk.header.withdrawalsRoot.get). + # expect("withdrawals exists").len + # else: + # 0 + discard # TODO proc forkedChainMain*() = - suite "ForkedChainRef tests": - var env = setupEnv() - let - cc = env.newCom - genesisHash = cc.genesisHeader.blockHash - genesis = Block.init(cc.genesisHeader, BlockBody()) - - let - blk1 = cc.makeBlk(1, genesis) - blk2 = cc.makeBlk(2, blk1) - blk3 = cc.makeBlk(3, blk2) - - dbTx = cc.db.ctx.txFrameBegin() - blk4 = cc.makeBlk(4, blk3) - blk5 = cc.makeBlk(5, blk4) - blk6 = cc.makeBlk(6, blk5) - blk7 = cc.makeBlk(7, blk6) - - dbTx.dispose() - - let - B4 = cc.makeBlk(4, blk3, 1.byte) - B5 = cc.makeBlk(5, B4) - B6 = cc.makeBlk(6, B5) - B7 = cc.makeBlk(7, B6) - - test "newBase == oldBase": - const info = "newBase == oldBase" - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader) - check chain.importBlock(blk1).isOk - - # same header twice - check chain.importBlock(blk1).isOk - - check chain.importBlock(blk2).isOk - - check chain.importBlock(blk3).isOk - check chain.validate info & " (1)" - - # no parent - check chain.importBlock(blk5).isErr - - check com.headHash == genesisHash - check chain.latestHash == blk3.blockHash - check chain.validate info & " (2)" - - # finalized > head -> error - check chain.forkChoice(blk1.blockHash, blk3.blockHash).isErr - check chain.validate info & " (3)" - - # blk4 is not part of chain - check chain.forkChoice(blk4.blockHash, blk2.blockHash).isErr - - # finalized > head -> error - check chain.forkChoice(blk1.blockHash, blk2.blockHash).isErr - - # blk4 is not part of chain - check chain.forkChoice(blk2.blockHash, blk4.blockHash).isErr - - # finalized < head -> ok - check chain.forkChoice(blk2.blockHash, blk1.blockHash).isOk - check com.headHash == blk2.blockHash - check chain.latestHash == blk2.blockHash - check chain.validate info & " (7)" - - # finalized == head -> ok - check chain.forkChoice(blk2.blockHash, blk2.blockHash).isOk - check com.headHash == blk2.blockHash - check chain.latestHash == blk2.blockHash - check chain.validate info & " (8)" - - # no baggage written - check com.wdWritten(blk1) == 0 - check com.wdWritten(blk2) == 0 - check chain.validate info & " (9)" - - test "newBase == cursor": - const info = "newBase == cursor" - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(blk4).isOk - check chain.validate info & " (1)" - - # newbase == cursor - check chain.forkChoice(blk7.blockHash, blk6.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == blk7.blockHash - check chain.latestHash == blk7.blockHash - - check com.wdWritten(blk7) == 0 - - # head - baseDistance must been finalized - check com.wdWritten(blk4) == 4 - # make sure aristo not wiped out baggage - check com.wdWritten(blk3) == 3 - check chain.validate info & " (9)" - - test "newBase between oldBase and cursor": - const info = "newBase between oldBase and cursor" - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(blk7.blockHash, blk6.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == blk7.blockHash - check chain.latestHash == blk7.blockHash - - check com.wdWritten(blk6) == 0 - check com.wdWritten(blk7) == 0 - - # head - baseDistance must been finalized - check com.wdWritten(blk4) == 4 - # make sure aristo not wiped out baggage - check com.wdWritten(blk3) == 3 - check chain.validate info & " (9)" - - test "newBase == oldBase, fork and stay on that fork": - const info = "newBase == oldBase, fork .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(B7.blockHash, B5.blockHash).isOk - - check com.headHash == B7.blockHash - check chain.latestHash == B7.blockHash - check chain.validate info & " (9)" - - test "newBase == cursor, fork and stay on that fork": - const info = "newBase == cursor, fork .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - - check chain.importBlock(B4).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(B7.blockHash, B6.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == B7.blockHash - check chain.latestHash == B7.blockHash - check chain.validate info & " (9)" - - test "newBase on shorter canonical arc, discard arc with oldBase": - const info = "newBase on shorter canonical .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(B7.blockHash, B5.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == B7.blockHash - check chain.latestHash == B7.blockHash - check chain.baseNumber >= B4.header.number - check chain.cursorHeads.len == 1 - check chain.validate info & " (9)" - - test "newBase on curbed non-canonical arc": - const info = "newBase on curbed non-canonical .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 5) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(B7.blockHash, B5.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == B7.blockHash - check chain.latestHash == B7.blockHash - check chain.baseNumber > 0 - check chain.baseNumber < B4.header.number - check chain.cursorHeads.len == 2 - check chain.validate info & " (9)" - - test "newBase == oldBase, fork and return to old chain": - const info = "newBase == oldBase, fork .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(blk7.blockHash, blk5.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == blk7.blockHash - check chain.latestHash == blk7.blockHash - check chain.validate info & " (9)" - - test "newBase == cursor, fork and return to old chain": - const info = "newBase == cursor, fork .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - - check chain.importBlock(blk4).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(blk7.blockHash, blk5.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == blk7.blockHash - check chain.latestHash == blk7.blockHash - check chain.validate info & " (9)" - - test "newBase on shorter canonical arc, discard arc with oldBase" & - " (ign dup block)": - const info = "newBase on shorter canonical .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - - check chain.importBlock(blk4).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(B7.blockHash, B5.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == B7.blockHash - check chain.latestHash == B7.blockHash - check chain.baseNumber >= B4.header.number - check chain.cursorHeads.len == 1 - check chain.validate info & " (9)" - - test "newBase on longer canonical arc, discard arc with oldBase": - const info = "newBase on longer canonical .." - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(blk7.blockHash, blk5.blockHash).isOk - check chain.validate info & " (2)" - - check com.headHash == blk7.blockHash - check chain.latestHash == blk7.blockHash - check chain.baseNumber > 0 - check chain.baseNumber < blk5.header.number - check chain.cursorHeads.len == 1 - check chain.validate info & " (9)" - - test "headerByNumber": - const info = "headerByNumber" - let com = env.newCom() - - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - - check chain.importBlock(B4).isOk - check chain.importBlock(B5).isOk - check chain.importBlock(B6).isOk - check chain.importBlock(B7).isOk - check chain.validate info & " (1)" - - check chain.forkChoice(blk7.blockHash, blk5.blockHash).isOk - check chain.validate info & " (2)" - - # cursor - check chain.headerByNumber(8).isErr - check chain.headerByNumber(7).expect("OK").number == 7 - check chain.headerByNumber(7).expect("OK").blockHash == blk7.blockHash - - # from db - check chain.headerByNumber(3).expect("OK").number == 3 - check chain.headerByNumber(3).expect("OK").blockHash == blk3.blockHash - - # base - check chain.headerByNumber(4).expect("OK").number == 4 - check chain.headerByNumber(4).expect("OK").blockHash == blk4.blockHash - - # from cache - check chain.headerByNumber(5).expect("OK").number == 5 - check chain.headerByNumber(5).expect("OK").blockHash == blk5.blockHash - check chain.validate info & " (9)" - - test "Import after Replay Segment": - const info = "Import after Replay Segment" - let com = env.newCom() - var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) - - check chain.importBlock(blk1).isOk - check chain.importBlock(blk2).isOk - check chain.importBlock(blk3).isOk - check chain.importBlock(blk4).isOk - check chain.importBlock(blk5).isOk - check chain.validate info & " (1)" - - chain.replaySegment(blk2.header.blockHash) - chain.replaySegment(blk5.header.blockHash) - check chain.validate info & " (2)" - - check chain.importBlock(blk6).isOk - check chain.importBlock(blk7).isOk - check chain.validate info & " (9)" + discard # TODO +# suite "ForkedChainRef tests": +# var env = setupEnv() +# let +# cc = env.newCom +# genesisHash = cc.genesisHeader.blockHash +# genesis = Block.init(cc.genesisHeader, BlockBody()) + +# let +# blk1 = cc.makeBlk(1, genesis) +# blk2 = cc.makeBlk(2, blk1) +# blk3 = cc.makeBlk(3, blk2) + +# dbTx = cc.db.ctx.txFrameBegin() +# blk4 = cc.makeBlk(4, blk3) +# blk5 = cc.makeBlk(5, blk4) +# blk6 = cc.makeBlk(6, blk5) +# blk7 = cc.makeBlk(7, blk6) + +# dbTx.dispose() + +# let +# B4 = cc.makeBlk(4, blk3, 1.byte) +# B5 = cc.makeBlk(5, B4) +# B6 = cc.makeBlk(6, B5) +# B7 = cc.makeBlk(7, B6) + +# test "newBase == oldBase": +# const info = "newBase == oldBase" +# let com = env.newCom() + +# var chain = newForkedChain(com, com.genesisHeader) +# check chain.importBlock(blk1).isOk + +# # same header twice +# check chain.importBlock(blk1).isOk + +# check chain.importBlock(blk2).isOk + +# check chain.importBlock(blk3).isOk +# check chain.validate info & " (1)" + +# # no parent +# check chain.importBlock(blk5).isErr + +# check com.headHash == genesisHash +# check chain.latestHash == blk3.blockHash +# check chain.validate info & " (2)" + +# # finalized > head -> error +# check chain.forkChoice(blk1.blockHash, blk3.blockHash).isErr +# check chain.validate info & " (3)" + +# # blk4 is not part of chain +# check chain.forkChoice(blk4.blockHash, blk2.blockHash).isErr + +# # finalized > head -> error +# check chain.forkChoice(blk1.blockHash, blk2.blockHash).isErr + +# # blk4 is not part of chain +# check chain.forkChoice(blk2.blockHash, blk4.blockHash).isErr + +# # finalized < head -> ok +# check chain.forkChoice(blk2.blockHash, blk1.blockHash).isOk +# check com.headHash == blk2.blockHash +# check chain.latestHash == blk2.blockHash +# check chain.validate info & " (7)" + +# # finalized == head -> ok +# check chain.forkChoice(blk2.blockHash, blk2.blockHash).isOk +# check com.headHash == blk2.blockHash +# check chain.latestHash == blk2.blockHash +# check chain.validate info & " (8)" + +# # no baggage written +# check com.wdWritten(blk1) == 0 +# check com.wdWritten(blk2) == 0 +# check chain.validate info & " (9)" + +# test "newBase == cursor": +# const info = "newBase == cursor" +# let com = env.newCom() + +# var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) +# check chain.importBlock(blk1).isOk +# check chain.importBlock(blk2).isOk +# check chain.importBlock(blk3).isOk +# check chain.importBlock(blk4).isOk +# check chain.importBlock(blk5).isOk +# check chain.importBlock(blk6).isOk +# check chain.importBlock(blk7).isOk + +# check chain.importBlock(blk4).isOk +# check chain.validate info & " (1)" + +# # newbase == cursor +# check chain.forkChoice(blk7.blockHash, blk6.blockHash).isOk +# check chain.validate info & " (2)" + +# check com.headHash == blk7.blockHash +# check chain.latestHash == blk7.blockHash + +# check com.wdWritten(blk7) == 0 + +# # head - baseDistance must been finalized +# check com.wdWritten(blk4) == 4 +# # make sure aristo not wiped out baggage +# check com.wdWritten(blk3) == 3 +# check chain.validate info & " (9)" + +# test "newBase between oldBase and cursor": +# const info = "newBase between oldBase and cursor" +# let com = env.newCom() + +# var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) +# check chain.importBlock(blk1).isOk +# check chain.importBlock(blk2).isOk +# check chain.importBlock(blk3).isOk +# check chain.importBlock(blk4).isOk +# check chain.importBlock(blk5).isOk +# check chain.importBlock(blk6).isOk +# check chain.importBlock(blk7).isOk +# check chain.validate info & " (1)" + +# check chain.forkChoice(blk7.blockHash, blk6.blockHash).isOk +# check chain.validate info & " (2)" + +# check com.headHash == blk7.blockHash +# check chain.latestHash == blk7.blockHash + +# check com.wdWritten(blk6) == 0 +# check com.wdWritten(blk7) == 0 + +# # head - baseDistance must been finalized +# check com.wdWritten(blk4) == 4 +# # make sure aristo not wiped out baggage +# check com.wdWritten(blk3) == 3 +# check chain.validate info & " (9)" + +# test "newBase == oldBase, fork and stay on that fork": +# const info = "newBase == oldBase, fork .." +# let com = env.newCom() + +# var chain = newForkedChain(com, com.genesisHeader) +# check chain.importBlock(blk1).isOk +# check chain.importBlock(blk2).isOk +# check chain.importBlock(blk3).isOk +# check chain.importBlock(blk4).isOk +# check chain.importBlock(blk5).isOk +# check chain.importBlock(blk6).isOk +# check chain.importBlock(blk7).isOk + +# check chain.importBlock(B4).isOk +# check chain.importBlock(B5).isOk +# check chain.importBlock(B6).isOk +# check chain.importBlock(B7).isOk +# check chain.validate info & " (1)" + +# check chain.forkChoice(B7.blockHash, B5.blockHash).isOk + +# check com.headHash == B7.blockHash +# check chain.latestHash == B7.blockHash +# check chain.validate info & " (9)" + +# test "newBase == cursor, fork and stay on that fork": +# const info = "newBase == cursor, fork .." +# let com = env.newCom() + +# var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) +# check chain.importBlock(blk1).isOk +# check chain.importBlock(blk2).isOk +# check chain.importBlock(blk3).isOk +# check chain.importBlock(blk4).isOk +# check chain.importBlock(blk5).isOk +# check chain.importBlock(blk6).isOk +# check chain.importBlock(blk7).isOk + +# check chain.importBlock(B4).isOk +# check chain.importBlock(B5).isOk +# check chain.importBlock(B6).isOk +# check chain.importBlock(B7).isOk + +# check chain.importBlock(B4).isOk +# check chain.validate info & " (1)" + +# check chain.forkChoice(B7.blockHash, B6.blockHash).isOk +# check chain.validate info & " (2)" + +# check com.headHash == B7.blockHash +# check chain.latestHash == B7.blockHash +# check chain.validate info & " (9)" + +# test "newBase on shorter canonical arc, discard arc with oldBase": +# const info = "newBase on shorter canonical .." +# let com = env.newCom() + +# var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) +# check chain.importBlock(blk1).isOk +# check chain.importBlock(blk2).isOk +# check chain.importBlock(blk3).isOk +# check chain.importBlock(blk4).isOk +# check chain.importBlock(blk5).isOk +# check chain.importBlock(blk6).isOk +# check chain.importBlock(blk7).isOk + +# check chain.importBlock(B4).isOk +# check chain.importBlock(B5).isOk +# check chain.importBlock(B6).isOk +# check chain.importBlock(B7).isOk +# check chain.validate info & " (1)" + +# check chain.forkChoice(B7.blockHash, B5.blockHash).isOk +# check chain.validate info & " (2)" + +# check com.headHash == B7.blockHash +# check chain.latestHash == B7.blockHash +# check chain.baseNumber >= B4.header.number +# check chain.cursorHeads.len == 1 +# check chain.validate info & " (9)" + +# test "newBase on curbed non-canonical arc": +# const info = "newBase on curbed non-canonical .." +# let com = env.newCom() + +# var chain = newForkedChain(com, com.genesisHeader, baseDistance = 5) +# check chain.importBlock(blk1).isOk +# check chain.importBlock(blk2).isOk +# check chain.importBlock(blk3).isOk +# check chain.importBlock(blk4).isOk +# check chain.importBlock(blk5).isOk +# check chain.importBlock(blk6).isOk +# check chain.importBlock(blk7).isOk + +# check chain.importBlock(B4).isOk +# check chain.importBlock(B5).isOk +# check chain.importBlock(B6).isOk +# check chain.importBlock(B7).isOk +# check chain.validate info & " (1)" + +# check chain.forkChoice(B7.blockHash, B5.blockHash).isOk +# check chain.validate info & " (2)" + +# check com.headHash == B7.blockHash +# check chain.latestHash == B7.blockHash +# check chain.baseNumber > 0 +# check chain.baseNumber < B4.header.number +# check chain.cursorHeads.len == 2 +# check chain.validate info & " (9)" + +# test "newBase == oldBase, fork and return to old chain": +# const info = "newBase == oldBase, fork .." +# let com = env.newCom() + +# var chain = newForkedChain(com, com.genesisHeader) +# check chain.importBlock(blk1).isOk +# check chain.importBlock(blk2).isOk +# check chain.importBlock(blk3).isOk +# check chain.importBlock(blk4).isOk +# check chain.importBlock(blk5).isOk +# check chain.importBlock(blk6).isOk +# check chain.importBlock(blk7).isOk + +# check chain.importBlock(B4).isOk +# check chain.importBlock(B5).isOk +# check chain.importBlock(B6).isOk +# check chain.importBlock(B7).isOk +# check chain.validate info & " (1)" + +# check chain.forkChoice(blk7.blockHash, blk5.blockHash).isOk +# check chain.validate info & " (2)" + +# check com.headHash == blk7.blockHash +# check chain.latestHash == blk7.blockHash +# check chain.validate info & " (9)" + +# test "newBase == cursor, fork and return to old chain": +# const info = "newBase == cursor, fork .." +# let com = env.newCom() + +# var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) +# check chain.importBlock(blk1).isOk +# check chain.importBlock(blk2).isOk +# check chain.importBlock(blk3).isOk +# check chain.importBlock(blk4).isOk +# check chain.importBlock(blk5).isOk +# check chain.importBlock(blk6).isOk +# check chain.importBlock(blk7).isOk + +# check chain.importBlock(B4).isOk +# check chain.importBlock(B5).isOk +# check chain.importBlock(B6).isOk +# check chain.importBlock(B7).isOk + +# check chain.importBlock(blk4).isOk +# check chain.validate info & " (1)" + +# check chain.forkChoice(blk7.blockHash, blk5.blockHash).isOk +# check chain.validate info & " (2)" + +# check com.headHash == blk7.blockHash +# check chain.latestHash == blk7.blockHash +# check chain.validate info & " (9)" + +# test "newBase on shorter canonical arc, discard arc with oldBase" & +# " (ign dup block)": +# const info = "newBase on shorter canonical .." +# let com = env.newCom() + +# var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) +# check chain.importBlock(blk1).isOk +# check chain.importBlock(blk2).isOk +# check chain.importBlock(blk3).isOk +# check chain.importBlock(blk4).isOk +# check chain.importBlock(blk5).isOk +# check chain.importBlock(blk6).isOk +# check chain.importBlock(blk7).isOk + +# check chain.importBlock(B4).isOk +# check chain.importBlock(B5).isOk +# check chain.importBlock(B6).isOk +# check chain.importBlock(B7).isOk + +# check chain.importBlock(blk4).isOk +# check chain.validate info & " (1)" + +# check chain.forkChoice(B7.blockHash, B5.blockHash).isOk +# check chain.validate info & " (2)" + +# check com.headHash == B7.blockHash +# check chain.latestHash == B7.blockHash +# check chain.baseNumber >= B4.header.number +# check chain.cursorHeads.len == 1 +# check chain.validate info & " (9)" + +# test "newBase on longer canonical arc, discard arc with oldBase": +# const info = "newBase on longer canonical .." +# let com = env.newCom() + +# var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) +# check chain.importBlock(blk1).isOk +# check chain.importBlock(blk2).isOk +# check chain.importBlock(blk3).isOk +# check chain.importBlock(blk4).isOk +# check chain.importBlock(blk5).isOk +# check chain.importBlock(blk6).isOk +# check chain.importBlock(blk7).isOk + +# check chain.importBlock(B4).isOk +# check chain.importBlock(B5).isOk +# check chain.importBlock(B6).isOk +# check chain.importBlock(B7).isOk +# check chain.validate info & " (1)" + +# check chain.forkChoice(blk7.blockHash, blk5.blockHash).isOk +# check chain.validate info & " (2)" + +# check com.headHash == blk7.blockHash +# check chain.latestHash == blk7.blockHash +# check chain.baseNumber > 0 +# check chain.baseNumber < blk5.header.number +# check chain.cursorHeads.len == 1 +# check chain.validate info & " (9)" + +# test "headerByNumber": +# const info = "headerByNumber" +# let com = env.newCom() + +# var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) +# check chain.importBlock(blk1).isOk +# check chain.importBlock(blk2).isOk +# check chain.importBlock(blk3).isOk +# check chain.importBlock(blk4).isOk +# check chain.importBlock(blk5).isOk +# check chain.importBlock(blk6).isOk +# check chain.importBlock(blk7).isOk + +# check chain.importBlock(B4).isOk +# check chain.importBlock(B5).isOk +# check chain.importBlock(B6).isOk +# check chain.importBlock(B7).isOk +# check chain.validate info & " (1)" + +# check chain.forkChoice(blk7.blockHash, blk5.blockHash).isOk +# check chain.validate info & " (2)" + +# # cursor +# check chain.headerByNumber(8).isErr +# check chain.headerByNumber(7).expect("OK").number == 7 +# check chain.headerByNumber(7).expect("OK").blockHash == blk7.blockHash + +# # from db +# check chain.headerByNumber(3).expect("OK").number == 3 +# check chain.headerByNumber(3).expect("OK").blockHash == blk3.blockHash + +# # base +# check chain.headerByNumber(4).expect("OK").number == 4 +# check chain.headerByNumber(4).expect("OK").blockHash == blk4.blockHash + +# # from cache +# check chain.headerByNumber(5).expect("OK").number == 5 +# check chain.headerByNumber(5).expect("OK").blockHash == blk5.blockHash +# check chain.validate info & " (9)" + +# test "Import after Replay Segment": +# const info = "Import after Replay Segment" +# let com = env.newCom() +# var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) + +# check chain.importBlock(blk1).isOk +# check chain.importBlock(blk2).isOk +# check chain.importBlock(blk3).isOk +# check chain.importBlock(blk4).isOk +# check chain.importBlock(blk5).isOk +# check chain.validate info & " (1)" + +# chain.replaySegment(blk2.header.blockHash) +# chain.replaySegment(blk5.header.blockHash) +# check chain.validate info & " (2)" + +# check chain.importBlock(blk6).isOk +# check chain.importBlock(blk7).isOk +# check chain.validate info & " (9)" when isMainModule: forkedChainMain() diff --git a/tests/test_generalstate_json.nim b/tests/test_generalstate_json.nim index 1d32676d82..4dc4a3c1dc 100644 --- a/tests/test_generalstate_json.nim +++ b/tests/test_generalstate_json.nim @@ -96,6 +96,7 @@ proc testFixtureIndexes(ctx: var TestCtx, testStatusIMPL: var TestStatus) = parent = parent, header = ctx.header, com = com, + txFRame = com.db.baseTxFrame(), tracer = tracer, storeSlotHash = ctx.trace, ) diff --git a/tests/test_genesis.nim b/tests/test_genesis.nim index a0bc7f0d2f..b0cfb9da1e 100644 --- a/tests/test_genesis.nim +++ b/tests/test_genesis.nim @@ -35,7 +35,7 @@ proc proofOfStake(params: NetworkParams): bool = networkId = params.config.chainId.NetworkId, params = params) let header = com.genesisHeader - com.proofOfStake(header) + com.proofOfStake(header, com.db.baseTxFrame()) proc genesisTest() = suite "Genesis": @@ -71,7 +71,7 @@ proc customGenesisTest() = let genesisHash = hash32"a28d8d73e087a01d09d8cb806f60863652f30b6b6dfa4e0157501ff07d422399" check com.genesisHeader.stateRoot == stateRoot check com.genesisHeader.blockHash == genesisHash - check com.proofOfStake(com.genesisHeader) == false + check com.proofOfStake(com.genesisHeader, com.db.baseTxFrame()) == false test "Devnet5.json (aka Kiln in all but chainId and TTD)": var cg: NetworkParams @@ -81,7 +81,7 @@ proc customGenesisTest() = let genesisHash = hash32"51c7fe41be669f69c45c33a56982cbde405313342d9e2b00d7c91a7b284dd4f8" check com.genesisHeader.stateRoot == stateRoot check com.genesisHeader.blockHash == genesisHash - check com.proofOfStake(com.genesisHeader) == false + check com.proofOfStake(com.genesisHeader, com.db.baseTxFrame()) == false test "Mainnet shadow fork 1": var cg: NetworkParams @@ -93,7 +93,7 @@ proc customGenesisTest() = check com.genesisHeader.stateRoot == stateRoot check com.genesisHeader.blockHash == genesisHash check com.ttd.get == ttd - check com.proofOfStake(com.genesisHeader) == false + check com.proofOfStake(com.genesisHeader, com.db.baseTxFrame()) == false test "Geth shadow fork 1": # parse using geth format should produce the same result with nimbus format @@ -106,7 +106,7 @@ proc customGenesisTest() = check com.genesisHeader.stateRoot == stateRoot check com.genesisHeader.blockHash == genesisHash check com.ttd.get == ttd - check com.proofOfStake(com.genesisHeader) == false + check com.proofOfStake(com.genesisHeader, com.db.baseTxFrame()) == false check cg.config.mergeNetsplitBlock.isSome check cg.config.mergeNetsplitBlock.get == 14660963.BlockNumber diff --git a/tests/test_getproof_json.nim b/tests/test_getproof_json.nim index dc26ca8265..e46559655f 100644 --- a/tests/test_getproof_json.nim +++ b/tests/test_getproof_json.nim @@ -129,9 +129,9 @@ proc getProofJsonMain*() = let accounts = getGenesisAlloc("tests" / "customgenesis" / file) coreDb = newCoreDbRef(DefaultDbMemory) - accountsCache = LedgerRef.init(coreDb) + accountsCache = LedgerRef.init(coreDb.baseTxFrame()) stateRootHash = setupStateDB(accounts, accountsCache) - accountDb = LedgerRef.init(coreDb) + accountDb = LedgerRef.init(coreDb.baseTxFrame()) checkProofsForExistingLeafs(accounts, accountDb, stateRootHash) @@ -141,9 +141,9 @@ proc getProofJsonMain*() = let accounts = getGenesisAlloc("tests" / "customgenesis" / file) coreDb = newCoreDbRef(DefaultDbMemory) - accountsCache = LedgerRef.init(coreDb) + accountsCache = LedgerRef.init(coreDb.baseTxFrame()) stateRootHash = setupStateDB(accounts, accountsCache) - accountDb = LedgerRef.init(coreDb) + accountDb = LedgerRef.init(coreDb.baseTxFrame()) checkProofsForMissingLeafs(accounts, accountDb, stateRootHash) diff --git a/tests/test_ledger.nim b/tests/test_ledger.nim index 0971c4c346..52df882825 100644 --- a/tests/test_ledger.nim +++ b/tests/test_ledger.nim @@ -145,8 +145,8 @@ proc importBlock(env: TestEnv; blk: Block) = raiseAssert "persistBlocks() failed at block #" & $blk.header.number & " msg: " & error -proc getLedger(com: CommonRef): LedgerRef = - LedgerRef.init(com.db) +proc getLedger(txFrame: CoreDbTxRef): LedgerRef = + LedgerRef.init(txFrame) func getRecipient(tx: Transaction): Address = tx.to.expect("transaction have no recipient") @@ -212,8 +212,6 @@ proc runTrial3Survive(env: TestEnv, ledger: LedgerRef; inx: int; noisy = false) let eAddr = env.txs[inx].getRecipient block: - let dbTx = env.xdb.ctx.txFrameBegin() - block: let accTx = ledger.beginSavepoint ledger.modBalance(eAddr) @@ -225,11 +223,7 @@ proc runTrial3Survive(env: TestEnv, ledger: LedgerRef; inx: int; noisy = false) ledger.modBalance(eAddr) ledger.rollback(accTx) - dbTx.rollback() - block: - let dbTx = env.xdb.ctx.txFrameBegin() - block: let accTx = ledger.beginSavepoint ledger.modBalance(eAddr) @@ -239,16 +233,11 @@ proc runTrial3Survive(env: TestEnv, ledger: LedgerRef; inx: int; noisy = false) ledger.persist() - dbTx.commit() - - proc runTrial4(env: TestEnv, ledger: LedgerRef; inx: int; rollback: bool) = ## Like `runTrial3()` but with four blocks and extra db transaction frames. let eAddr = env.txs[inx].getRecipient block: - let dbTx = env.xdb.ctx.txFrameBegin() - block: let accTx = ledger.beginSavepoint ledger.modBalance(eAddr) @@ -272,21 +261,13 @@ proc runTrial4(env: TestEnv, ledger: LedgerRef; inx: int; rollback: bool) = ledger.commit(accTx) ledger.persist() - # There must be no dbTx.rollback() here unless `ledger` is - # discarded and/or re-initialised. - dbTx.commit() - block: - let dbTx = env.xdb.ctx.txFrameBegin() - block: let accTx = ledger.beginSavepoint ledger.modBalance(eAddr) ledger.commit(accTx) ledger.persist() - dbTx.commit() - # ------------------------------------------------------------------------------ # Test Runner # ------------------------------------------------------------------------------ @@ -350,38 +331,38 @@ proc runLedgerTransactionTests(noisy = true) = test &"Run {env.txi.len} two-step trials with rollback": for n in env.txi: - let dbTx = env.xdb.ctx.txFrameBegin() + let dbTx = env.xdb.ctx.txFrameBegin(nil) defer: dbTx.dispose() - let ledger = env.com.getLedger() + let ledger = dbTx.getLedger() env.runTrial2ok(ledger, n) test &"Run {env.txi.len} three-step trials with rollback": for n in env.txi: - let dbTx = env.xdb.ctx.txFrameBegin() + let dbTx = env.xdb.ctx.txFrameBegin(nil) defer: dbTx.dispose() - let ledger = env.com.getLedger() + let ledger = dbTx.getLedger() env.runTrial3(ledger, n, rollback = true) test &"Run {env.txi.len} three-step trials with extra db frame rollback" & " throwing Exceptions": for n in env.txi: - let dbTx = env.xdb.ctx.txFrameBegin() + let dbTx = env.xdb.ctx.txFrameBegin(nil) defer: dbTx.dispose() - let ledger = env.com.getLedger() + let ledger = dbTx.getLedger() env.runTrial3Survive(ledger, n, noisy) test &"Run {env.txi.len} tree-step trials without rollback": for n in env.txi: - let dbTx = env.xdb.ctx.txFrameBegin() + let dbTx = env.xdb.ctx.txFrameBegin(nil) defer: dbTx.dispose() - let ledger = env.com.getLedger() + let ledger = dbTx.getLedger() env.runTrial3(ledger, n, rollback = false) test &"Run {env.txi.len} four-step trials with rollback and db frames": for n in env.txi: - let dbTx = env.xdb.ctx.txFrameBegin() + let dbTx = env.xdb.ctx.txFrameBegin(nil) defer: dbTx.dispose() - let ledger = env.com.getLedger() + let ledger = dbTx.getLedger() env.runTrial4(ledger, n, rollback = true) proc runLedgerBasicOperationsTests() = @@ -391,7 +372,7 @@ proc runLedgerBasicOperationsTests() = var memDB = newCoreDbRef DefaultDbMemory - stateDB {.used.} = LedgerRef.init(memDB) + stateDB {.used.} = LedgerRef.init(memDB.baseTxFrame()) address {.used.} = address"0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6" code {.used.} = hexToSeqByte("0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6") stateRoot {.used.} : Hash32 @@ -448,7 +429,7 @@ proc runLedgerBasicOperationsTests() = check y.originalStorage.len == 3 test "Ledger various operations": - var ac = LedgerRef.init(memDB) + var ac = LedgerRef.init(memDB.baseTxFrame()) var addr1 = initAddr(1) check ac.isDeadAccount(addr1) == true @@ -480,7 +461,7 @@ proc runLedgerBasicOperationsTests() = ac.persist() stateRoot = ac.getStateRoot() - var db = LedgerRef.init(memDB) + var db = LedgerRef.init(memDB.baseTxFrame()) db.setBalance(addr1, 1100.u256) db.setNonce(addr1, 2) db.setCode(addr1, code) @@ -488,7 +469,7 @@ proc runLedgerBasicOperationsTests() = check stateRoot == db.getStateRoot() # Ledger readonly operations using previous hash - var ac2 = LedgerRef.init(memDB) + var ac2 = LedgerRef.init(memDB.baseTxFrame()) var addr2 = initAddr(2) check ac2.getCodeHash(addr2) == emptyAcc.codeHash @@ -508,14 +489,14 @@ proc runLedgerBasicOperationsTests() = check ac2.getStateRoot() == stateRoot test "Ledger code retrieval after persist called": - var ac = LedgerRef.init(memDB) + var ac = LedgerRef.init(memDB.baseTxFrame()) var addr2 = initAddr(2) ac.setCode(addr2, code) ac.persist() check ac.getCode(addr2) == code let key = contractHashKey(keccak256(code)) - val = memDB.ctx.getKvt().get(key.toOpenArray).valueOr: EmptyBlob + val = memDB.baseTxFrame().get(key.toOpenArray).valueOr: EmptyBlob check val == code test "accessList operations": @@ -541,7 +522,7 @@ proc runLedgerBasicOperationsTests() = proc accessList(ac: LedgerRef, address, slot: int) {.inline.} = ac.accessList(address.initAddr, slot.u256) - var ac = LedgerRef.init(memDB) + var ac = LedgerRef.init(memDB.baseTxFrame()) ac.accessList(0xaa) ac.accessList(0xbb, 0x01) @@ -583,7 +564,7 @@ proc runLedgerBasicOperationsTests() = check ac.verifySlots(0xdd, 0x04) test "transient storage operations": - var ac = LedgerRef.init(memDB) + var ac = LedgerRef.init(memDB.baseTxFrame()) proc tStore(ac: LedgerRef, address, slot, val: int) = ac.setTransientStorage(address.initAddr, slot.u256, val.u256) @@ -650,7 +631,7 @@ proc runLedgerBasicOperationsTests() = test "ledger contractCollision": # use previous hash - var ac = LedgerRef.init(memDB) + var ac = LedgerRef.init(memDB.baseTxFrame()) let addr2 = initAddr(2) check ac.contractCollision(addr2) == false @@ -671,7 +652,7 @@ proc runLedgerBasicOperationsTests() = check ac.contractCollision(addr4) == true test "Ledger storage iterator": - var ac = LedgerRef.init(memDB, storeSlotHash = true) + var ac = LedgerRef.init(memDB.baseTxFrame(), storeSlotHash = true) let addr2 = initAddr(2) ac.setStorage(addr2, 1.u256, 2.u256) ac.setStorage(addr2, 2.u256, 3.u256) diff --git a/tests/test_precompiles.nim b/tests/test_precompiles.nim index 84ef1c01f4..c3647ee19f 100644 --- a/tests/test_precompiles.nim +++ b/tests/test_precompiles.nim @@ -75,7 +75,8 @@ proc testFixture(fixtures: JsonNode, testStatusIMPL: var TestStatus) = vmState = BaseVMState.new( Header(number: 1'u64, stateRoot: emptyRlpHash), Header(), - com + com, + com.db.baseTxFrame() ) case toLowerAscii(label) diff --git a/tests/test_rpc.nim b/tests/test_rpc.nim index 9fb7ffabd1..3c01698144 100644 --- a/tests/test_rpc.nim +++ b/tests/test_rpc.nim @@ -66,10 +66,10 @@ proc verifySlotProof(trustedStorageRoot: Hash32, slot: StorageProof): MptProofVe key, value) -proc persistFixtureBlock(chainDB: CoreDbRef) = +proc persistFixtureBlock(chainDB: CoreDbTxRef) = let header = getBlockHeader4514995() # Manually inserting header to avoid any parent checks - discard chainDB.ctx.getKvt.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header)) + discard chainDB.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header)) chainDB.addBlockNumberToHashLookup(header.number, header.blockHash) chainDB.persistTransactions(header.number, header.txRoot, getBlockBody4514995().transactions) chainDB.persistReceipts(header.receiptsRoot, getReceipts4514995()) @@ -94,7 +94,7 @@ proc setupEnv(signer, ks2: Address, ctx: EthContext, com: CommonRef): TestEnv = var acc = ctx.am.getAccount(signer).tryGet() blockNumber = 1'u64 - parent = com.db.getCanonicalHead().expect("canonicalHead exists") + parent = com.db.baseTxFrame().getCanonicalHead().expect("canonicalHead exists") parentHash = parent.blockHash let code = evmByteCode: @@ -108,7 +108,7 @@ proc setupEnv(signer, ks2: Address, ctx: EthContext, com: CommonRef): TestEnv = let vmHeader = Header(parentHash: parentHash, gasLimit: 5_000_000) vmState = BaseVMState() - vmState.init(parent, vmHeader, com) + vmState.init(parent, vmHeader, com, com.db.baseTxFrame()) vmState.stateDB.setCode(ks2, code) vmState.stateDB.addBalance( @@ -155,7 +155,7 @@ proc setupEnv(signer, ks2: Address, ctx: EthContext, com: CommonRef): TestEnv = txs = [signedTx1, signedTx2] let txRoot = calcTxRoot(txs) - com.db.persistTransactions(blockNumber, txRoot, txs) + com.db.baseTxFrame().persistTransactions(blockNumber, txRoot, txs) vmState.receipts = newSeq[Receipt](txs.len) vmState.cumulativeGasUsed = 0 @@ -172,7 +172,7 @@ proc setupEnv(signer, ks2: Address, ctx: EthContext, com: CommonRef): TestEnv = date = dateTime(2017, mMar, 30) timeStamp = date.toTime.toUnix.EthTime difficulty = com.calcDifficulty(timeStamp, parent) - + txFrame = com.db.baseTxFrame() # call persist() before we get the stateRoot vmState.stateDB.persist() @@ -189,16 +189,16 @@ proc setupEnv(signer, ks2: Address, ctx: EthContext, com: CommonRef): TestEnv = timestamp : timeStamp ) - com.db.persistHeaderAndSetHead(header, + txFrame.persistHeaderAndSetHead(header, com.startOfHistory).expect("persistHeader not error") - + let uncles = [header] - header.ommersHash = com.db.persistUncles(uncles) + header.ommersHash = txFrame.persistUncles(uncles) - com.db.persistHeaderAndSetHead(header, + txFrame.persistHeaderAndSetHead(header, com.startOfHistory).expect("persistHeader not error") - com.db.persistFixtureBlock() + txFrame.persistFixtureBlock() com.db.persistent(header.number).isOkOr: echo "Failed to save state: ", $error @@ -222,6 +222,7 @@ proc rpcMain*() = conf.networkId, conf.networkParams ) + txFrame = com.db.baseTxFrame() signer = address"0x0e69cde81b1aa07a45c32c6cd85d67229d36bb1b" ks2 = address"0xa3b2222afa5c987da6ef773fde8d01b9f23d481f" ks3 = address"0x597176e9a64aad0845d83afdaf698fbeff77703b" @@ -331,7 +332,7 @@ proc rpcMain*() = check res == w3Qty(0'u64) test "eth_getBlockTransactionCountByHash": - let hash = com.db.getBlockHash(0'u64).expect("block hash exists") + let hash = txFrame.getBlockHash(0'u64).expect("block hash exists") let res = await client.eth_getBlockTransactionCountByHash(hash) check res == w3Qty(0'u64) @@ -340,7 +341,7 @@ proc rpcMain*() = check res == w3Qty(0'u64) test "eth_getUncleCountByBlockHash": - let hash = com.db.getBlockHash(0'u64).expect("block hash exists") + let hash = txFrame.getBlockHash(0'u64).expect("block hash exists") let res = await client.eth_getUncleCountByBlockHash(hash) check res == w3Qty(0'u64) diff --git a/tests/test_tracer_json.nim b/tests/test_tracer_json.nim index bab93cb1d6..afffc8eb8d 100644 --- a/tests/test_tracer_json.nim +++ b/tests/test_tracer_json.nim @@ -35,37 +35,37 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) = predRoot: Hash32 # from predecessor header txRoot: Hash32 # header with block number `num` rcptRoot: Hash32 # ditto - let - adb = cdb.ctx.mpt # `Aristo` db - kdb = cdb.ctx.kvt # `Kvt` db - ps = PartStateRef.init adb # Partial DB descriptor - - # Fill KVT and collect `proof` data - for (k,v) in jKvp.pairs: - let - key = hexToSeqByte(k) - val = hexToSeqByte(v.getStr()) - if key.len == 32: - doAssert key == val.keccak256.data - if val != @[0x80u8]: # Exclude empty item - proof.add val - else: - if key[0] == 0: - try: - # Pull our particular header fields (if possible) - let header = rlp.decode(val, Header) - if header.number == num: - txRoot = header.txRoot - rcptRoot = header.receiptsRoot - elif header.number == num-1: - predRoot = header.stateRoot - except RlpError: - discard - check kdb.put(key, val).isOk - - # Set up production MPT - ps.partPut(proof, AutomaticPayload).isOkOr: - raiseAssert info & ": partPut => " & $error + # let + # adb = cdb.ctx.mpt # `Aristo` db + # kdb = cdb.ctx.kvt # `Kvt` db + # ps = PartStateRef.init cdb.baseTxFrame # Partial DB descriptor + + # # Fill KVT and collect `proof` data + # for (k,v) in jKvp.pairs: + # let + # key = hexToSeqByte(k) + # val = hexToSeqByte(v.getStr()) + # if key.len == 32: + # doAssert key == val.keccak256.data + # if val != @[0x80u8]: # Exclude empty item + # proof.add val + # else: + # if key[0] == 0: + # try: + # # Pull our particular header fields (if possible) + # let header = rlp.decode(val, Header) + # if header.number == num: + # txRoot = header.txRoot + # rcptRoot = header.receiptsRoot + # elif header.number == num-1: + # predRoot = header.stateRoot + # except RlpError: + # discard + # check kdb.put(key, val).isOk + + # # Set up production MPT + # ps.partPut(proof, AutomaticPayload).isOkOr: + # raiseAssert info & ": partPut => " & $error # TODO code needs updating after removal of generic payloads # # Handle transaction sub-tree @@ -112,8 +112,8 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) = # for (rvid,key) in ps.vkPairs: # adb.layersPutKey(rvid, key) - ps.check().isOkOr: - raiseAssert info & ": check => " & $error + # ps.check().isOkOr: + # raiseAssert info & ": check => " & $error #echo ">>> preLoadAristoDb (9)", # "\n ps\n ", ps.pp(byKeyOk=false,byVidOk=false), @@ -141,7 +141,7 @@ proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: C # Some hack for `Aristo` using the `snap` protocol proof-loader memoryDB.preLoadAristoDb(state, blockNumber) - var blk = com.db.getEthBlock(blockNumber).expect("eth block exists") + var blk = com.db.baseTxFrame().getEthBlock(blockNumber).expect("eth block exists") let txTraces = traceTransactions(com, blk.header, blk.transactions) let stateDump = dumpBlockState(com, blk) diff --git a/tests/test_txpool2.nim b/tests/test_txpool2.nim index 525a2a9615..0624fd0c94 100644 --- a/tests/test_txpool2.nim +++ b/tests/test_txpool2.nim @@ -180,14 +180,14 @@ proc runTxPoolPosTest() = check rr.isOk() test "validate TxPool prevRandao setter": - var sdb = LedgerRef.init(com.db) + var sdb = LedgerRef.init(com.db.baseTxFrame()) let val = sdb.getStorage(recipient, slot) let randao = Bytes32(val.toBytesBE) check randao == prevRandao test "feeRecipient rewarded": check blk.header.coinbase == feeRecipient - var sdb = LedgerRef.init(com.db) + var sdb = LedgerRef.init(com.db.baseTxFrame()) let bal = sdb.getBalance(feeRecipient) check not bal.isZero @@ -245,14 +245,14 @@ proc runTxPoolBlobhashTest() = check rr.isOk() test "validate TxPool prevRandao setter": - var sdb = LedgerRef.init(com.db) + var sdb = LedgerRef.init(com.db.baseTxFrame()) let val = sdb.getStorage(recipient, slot) let randao = Bytes32(val.toBytesBE) check randao == prevRandao test "feeRecipient rewarded": check blk.header.coinbase == feeRecipient - var sdb = LedgerRef.init(com.db) + var sdb = LedgerRef.init(com.db.baseTxFrame()) let bal = sdb.getBalance(feeRecipient) check not bal.isZero @@ -329,7 +329,7 @@ proc runTxHeadDelta(noisy = true) = check com.syncCurrent == 10.BlockNumber head = chain.headerByNumber(com.syncCurrent).expect("block header exists") let - sdb = LedgerRef.init(com.db) + sdb = LedgerRef.init(com.db.baseTxFrame()) expected = u256(txPerblock * numBlocks) * amount balance = sdb.getBalance(recipient) check balance == expected diff --git a/tools/evmstate/helpers.nim b/tools/evmstate/helpers.nim index c5ea327416..4e0dd6b0e4 100644 --- a/tools/evmstate/helpers.nim +++ b/tools/evmstate/helpers.nim @@ -10,9 +10,7 @@ import std/[json, strutils], - eth/common/keys, - eth/common/headers, - eth/common/transactions, + eth/common/[base, keys, headers, transactions], stint, stew/byteutils, ../../nimbus/transaction, @@ -58,7 +56,7 @@ proc fromJson(T: type PrivateKey, n: JsonNode): PrivateKey = removePrefix(secretKey, "0x") PrivateKey.fromHex(secretKey).tryGet() -proc fromJson(T: type AccessList, n: JsonNode): AccessList = +proc fromJson(T: type transactions.AccessList, n: JsonNode): transactions.AccessList = if n.kind == JNull: return