Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Fix] deletion proof should be collected in log tracing #247

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 36 additions & 14 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -352,37 +352,59 @@ func (s *StateDB) GetRootHash() common.Hash {

// StorageTrieProof is not in Db interface and used explictily for reading proof in storage trie (not the dirty value)
// For zktrie it also provide required data for predict the deletion, else it just fallback to GetStorageProof
func (s *StateDB) GetStorageTrieProof(a common.Address, key common.Hash) ([][]byte, []byte, error) {
func (s *StateDB) GetStorageTrieProof(a common.Address, key common.Hash) ([][]byte, error) {

// try the trie in stateObject first, else we would create one
stateObject := s.getStateObject(a)
if stateObject == nil {
return nil, nil, errors.New("storage trie for requested address does not exist")
return nil, errors.New("storage trie for requested address does not exist")
}

trieS := stateObject.trie
trie := stateObject.trie
var err error
if trieS == nil {
if trie == nil {
// use a new, temporary trie
trieS, err = s.db.OpenStorageTrie(stateObject.addrHash, stateObject.data.Root)
trie, err = s.db.OpenStorageTrie(stateObject.addrHash, stateObject.data.Root)
if err != nil {
return nil, nil, fmt.Errorf("can't create storage trie on root %s: %v ", stateObject.data.Root, err)
return nil, fmt.Errorf("can't create storage trie on root %s: %v ", stateObject.data.Root, err)
}
}

var proof proofList
var sibling []byte
if s.IsZktrie() {
zkTrie := trieS.(*trie.ZkTrie)
if zkTrie == nil {
panic("unexpected trie type for zktrie")
}
key_s, _ := zkt.ToSecureKeyBytes(key.Bytes())
sibling, err = zkTrie.ProveWithDeletion(key_s.Bytes(), 0, &proof)
err = trie.Prove(key_s.Bytes(), 0, &proof)
} else {
err = trieS.Prove(crypto.Keccak256(key.Bytes()), 0, &proof)
err = trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof)
}
return proof, err
}

// GetDeletetionProof return the sibling of a leaf node. For any deletion,
// information for this sibling node for the leaf node being deleted is required to be passed to
// witness generator for predicting the trie root after deletion
func (s *StateDB) GetDeletetionProof(a common.Address, key common.Hash) ([]byte, error) {

// mpt trie do not need this
if !s.IsZktrie() {
return nil, nil
}
return proof, sibling, err

trieS := s.StorageTrie(a)
if trieS == nil {
return nil, errors.New("storage trie for requested address does not exist")
}

zkTrie := trieS.(*trie.ZkTrie)
if zkTrie == nil {
panic("unexpected trie type for zktrie")
}

key_s, _ := zkt.ToSecureKeyBytes(key.Bytes())

// notice proof is not need, we just need the sibling bytes being returned
var proof proofList
return zkTrie.ProveWithDeletion(key_s.Bytes(), 0, &proof)
}

// GetStorageProof returns the Merkle proof for given storage slot.
Expand Down
1 change: 1 addition & 0 deletions core/vm/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ type StateDB interface {
GetProof(addr common.Address) ([][]byte, error)
GetProofByHash(addrHash common.Hash) ([][]byte, error)
GetStorageProof(a common.Address, key common.Hash) ([][]byte, error)
GetDeletetionProof(a common.Address, key common.Hash) ([]byte, error)

Suicide(common.Address) bool
HasSuicided(common.Address) bool
Expand Down
19 changes: 19 additions & 0 deletions core/vm/logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,7 @@ type StructLogger struct {
statesAffected map[common.Address]struct{}
storage map[common.Address]Storage
createdAccount *types.AccountWrapper
deletionProof [][]byte

callStackLogInd []int
logs []*StructLog
Expand All @@ -192,6 +193,7 @@ func NewStructLogger(cfg *LogConfig) *StructLogger {
func (l *StructLogger) Reset() {
l.storage = make(map[common.Address]Storage)
l.statesAffected = make(map[common.Address]struct{})
l.deletionProof = nil
l.output = make([]byte, 0)
l.logs = l.logs[:0]
l.callStackLogInd = nil
Expand Down Expand Up @@ -267,6 +269,18 @@ func (l *StructLogger) CaptureState(pc uint64, op OpCode, gas, cost uint64, scop
if err := traceStorage(l, scope, structlog.getOrInitExtraData()); err != nil {
log.Error("Failed to trace data", "opcode", op.String(), "err", err)
}

oldValue := l.env.StateDB.GetState(contractAddress, storageKey)
if !bytes.Equal(oldValue.Bytes(), storageValue.Bytes()) &&
bytes.Equal(storageValue.Bytes(), common.Hash{}.Bytes()) {

if delSibling, err := l.env.StateDB.GetDeletetionProof(contractAddress, storageKey); err != nil {

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What's the expected performance impact of this?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Now we need to do a proof on each deletion (SSTORE a non-zero value to 0). The cost depends while considering the storage trie usually is not very high and deletion is not so common I think the impact should be limited.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay, sounds acceptable. We just need to think if this can be triggered maliciously somehow.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You are right. I just recognized that current implement would bring an addition commition on the corresponding storage trie so it may bring significant cost by a series well designed malicious txs in one block.

Lucikly if we should have separated and constrainted the logtrace only work while obtaining block trace, such a attack is mitigated for the cost only raised in any following nodes used for providng block trace.

And I think we can further push the deletion generation process to commiting step, I would start this idea in another PR.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would trace this problem in #253

log.Error("Fail to obtain deletion proof", "err", err, "address", contractAddress, "key", storageKey)
} else {
l.deletionProof = append(l.deletionProof, delSibling)
}
}

}
if l.cfg.EnableReturnData {
structlog.ReturnData.Write(rData)
Expand Down Expand Up @@ -381,6 +395,11 @@ func (l *StructLogger) UpdatedStorages() map[common.Address]Storage {
return l.storage
}

// DeletionProofs is used to collect all deletion proof collected in handling
func (l *StructLogger) DeletionProofs() [][]byte {
return l.deletionProof
}

// CreatedAccount return the account data in case it is a create tx
func (l *StructLogger) CreatedAccount() *types.AccountWrapper { return l.createdAccount }

Expand Down
21 changes: 17 additions & 4 deletions eth/tracers/api_blocktrace.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (
"github.com/scroll-tech/go-ethereum/core/state"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/core/vm"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"github.com/scroll-tech/go-ethereum/rollup/rcfg"
Expand Down Expand Up @@ -42,6 +43,8 @@ type traceEnv struct {
// this lock is used to protect StorageTrace's read and write mutual exclusion.
sMu sync.Mutex
*types.StorageTrace
// used for de-duplication of deletion proofs when handling mutiple txs
deletionProofAgg map[common.Hash][]byte
executionResults []*types.ExecutionResult
}

Expand Down Expand Up @@ -119,6 +122,7 @@ func (api *API) createTraceEnv(ctx context.Context, config *TraceConfig, block *
Proofs: make(map[string][]hexutil.Bytes),
StorageProofs: make(map[string]map[string][]hexutil.Bytes),
},
deletionProofAgg: make(map[common.Hash][]byte),
executionResults: make([]*types.ExecutionResult, block.Transactions().Len()),
}

Expand Down Expand Up @@ -314,7 +318,7 @@ func (api *API) getTxResult(env *traceEnv, state *state.StateDB, index int, bloc
}
env.sMu.Unlock()

proof, sibling, err := state.GetStorageTrieProof(addr, key)
proof, err := state.GetStorageTrieProof(addr, key)
if err != nil {
log.Error("Storage proof not available", "error", err, "address", addrStr, "key", keyStr)
// but we still mark the proofs map with nil array
Expand All @@ -325,13 +329,18 @@ func (api *API) getTxResult(env *traceEnv, state *state.StateDB, index int, bloc
}
env.sMu.Lock()
m[keyStr] = wrappedProof
if sibling != nil {
env.DeletionProofs = append(env.DeletionProofs, sibling)
}
env.sMu.Unlock()
}
}

deletionProofs := tracer.DeletionProofs()
for _, bt := range deletionProofs {
key := crypto.Keccak256Hash(bt)
env.sMu.Lock()
noel2004 marked this conversation as resolved.
Show resolved Hide resolved
env.deletionProofAgg[key] = bt

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need the last proof for the same key? Not the first one?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

deletionProof is not a full proof but just the bytes of a single node (the sibling of deleted leaf node) so each item is unique. From logtrace we may collect the same node so here we use a map for deduplication.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bytes of a node's value? Or its key? If we delete, reinsert, then delete again, this will work?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It is in fact bytes of 'the whole node', i.e. trie can read these bytes as a valid node and add it to the (temporary) database.

Such an entry of node in database is immutable and not affected by insert / updating or deletion. It would just lost any reference from trie when being deleted. So adding any node into database would never break the trie, it just take no use in the worst case.

env.sMu.Unlock()
}

var l1Fee uint64
if result.L1Fee != nil {
l1Fee = result.L1Fee.Uint64()
Expand Down Expand Up @@ -360,6 +369,10 @@ func (api *API) fillBlockTrace(env *traceEnv, block *types.Block) (*types.BlockT
txs[i] = types.NewTransactionData(tx, block.NumberU64(), api.backend.ChainConfig())
}

for _, bt := range env.deletionProofAgg {
env.DeletionProofs = append(env.DeletionProofs, bt)
}

blockTrace := &types.BlockTrace{
ChainID: api.backend.ChainConfig().ChainID.Uint64(),
Version: params.ArchiveVersion(params.CommitHash),
Expand Down
2 changes: 1 addition & 1 deletion params/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
const (
VersionMajor = 0 // Major version component of the current release
VersionMinor = 1 // Minor version component of the current release
VersionPatch = 10 // Patch version component of the current release
VersionPatch = 11 // Patch version component of the current release
noel2004 marked this conversation as resolved.
Show resolved Hide resolved
VersionMeta = "alpha" // Version metadata to append to the version string
)

Expand Down