diff --git a/.github/workflows/release_asset.yml b/.github/workflows/release_asset.yml index f221739c1d..2a89643068 100644 --- a/.github/workflows/release_asset.yml +++ b/.github/workflows/release_asset.yml @@ -48,14 +48,11 @@ jobs: uses: actions-rs/cargo@v1 with: command: build - args: --release + args: --manifest-path=./cmd/starcoin/Cargo.toml --release - name: build starcoin release asset run: bash ./scripts/release.sh ${{ matrix.platform }} - - name: build mpm release asset - run: bash ./scripts/release_mpm.sh ${{ matrix.platform }} - - name: upload artifact asset uses: actions/upload-artifact@v2 if: ${{ github.event_name != 'release'}} diff --git a/Cargo.lock b/Cargo.lock index 5c7c99a783..b093080f1d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -377,6 +377,16 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote 1.0.28", + "syn 1.0.107", +] + [[package]] name = "async-channel" version = "1.8.0" @@ -428,6 +438,7 @@ dependencies = [ "blocking", "futures-lite", "once_cell", + "tokio", ] [[package]] @@ -466,6 +477,7 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" dependencies = [ + "async-attributes", "async-channel", "async-global-executor", "async-io", @@ -689,6 +701,7 @@ dependencies = [ "starcoin-config", "starcoin-consensus", "starcoin-crypto", + "starcoin-dag", "starcoin-executor", "starcoin-executor-benchmark", "starcoin-genesis", @@ -2135,6 +2148,7 @@ dependencies = [ "starcoin-config", "starcoin-consensus", "starcoin-crypto", + "starcoin-dag", "starcoin-executor", "starcoin-genesis", "starcoin-logger", @@ -2303,7 +2317,7 @@ checksum = "850878694b7933ca4c9569d30a34b55031b9b139ee1fc7b94a527c4ef960d690" [[package]] name = "diem-crypto" version = "0.0.3" -source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=a742ddc0674022800341182cbb4c3681807b2f00#a742ddc0674022800341182cbb4c3681807b2f00" +source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=8d41c280a227594ca0a2b6ecba580643518274ea#8d41c280a227594ca0a2b6ecba580643518274ea" dependencies = [ "aes-gcm 0.8.0", "anyhow", @@ -2338,7 +2352,7 @@ dependencies = [ [[package]] name = "diem-crypto-derive" version = "0.0.3" -source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=a742ddc0674022800341182cbb4c3681807b2f00#a742ddc0674022800341182cbb4c3681807b2f00" +source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=8d41c280a227594ca0a2b6ecba580643518274ea#8d41c280a227594ca0a2b6ecba580643518274ea" dependencies = [ "proc-macro2 1.0.59", "quote 1.0.28", @@ -9253,6 +9267,7 @@ name = "starcoin-chain" version = "1.13.9" dependencies = [ "anyhow", + "async-std", "bcs-ext", "clap 3.2.23", "proptest", @@ -9267,9 +9282,12 @@ dependencies = [ "starcoin-config", "starcoin-consensus", "starcoin-crypto", + "starcoin-dag", "starcoin-executor", + "starcoin-flexidag", "starcoin-genesis", "starcoin-logger", + "starcoin-network-rpc-api", "starcoin-open-block", "starcoin-resource-viewer", "starcoin-service-registry", @@ -9299,7 +9317,10 @@ dependencies = [ "rand_core 0.6.4", "serde 1.0.152", "starcoin-accumulator", + "starcoin-config", "starcoin-crypto", + "starcoin-dag", + "starcoin-network-rpc-api", "starcoin-service-registry", "starcoin-state-api", "starcoin-statedb", @@ -9327,6 +9348,7 @@ dependencies = [ "starcoin-config", "starcoin-consensus", "starcoin-crypto", + "starcoin-dag", "starcoin-executor", "starcoin-genesis", "starcoin-logger", @@ -9356,16 +9378,21 @@ name = "starcoin-chain-service" version = "1.13.9" dependencies = [ "anyhow", + "async-std", "async-trait", "futures 0.3.26", "rand 0.8.5", "rand_core 0.6.4", "serde 1.0.152", + "starcoin-accumulator", "starcoin-chain", "starcoin-chain-api", "starcoin-config", + "starcoin-consensus", "starcoin-crypto", + "starcoin-dag", "starcoin-logger", + "starcoin-network-rpc-api", "starcoin-service-registry", "starcoin-state-api", "starcoin-storage", @@ -9409,8 +9436,10 @@ dependencies = [ "starcoin-config", "starcoin-consensus", "starcoin-crypto", + "starcoin-dag", "starcoin-dev", "starcoin-executor", + "starcoin-flexidag", "starcoin-genesis", "starcoin-logger", "starcoin-move-compiler", @@ -9508,7 +9537,7 @@ dependencies = [ [[package]] name = "starcoin-crypto" version = "1.10.0-rc.2" -source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=a742ddc0674022800341182cbb4c3681807b2f00#a742ddc0674022800341182cbb4c3681807b2f00" +source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=8d41c280a227594ca0a2b6ecba580643518274ea#8d41c280a227594ca0a2b6ecba580643518274ea" dependencies = [ "anyhow", "bcs", @@ -9527,13 +9556,51 @@ dependencies = [ [[package]] name = "starcoin-crypto-macro" version = "1.10.0-rc.2" -source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=a742ddc0674022800341182cbb4c3681807b2f00#a742ddc0674022800341182cbb4c3681807b2f00" +source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=8d41c280a227594ca0a2b6ecba580643518274ea#8d41c280a227594ca0a2b6ecba580643518274ea" dependencies = [ "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.107", ] +[[package]] +name = "starcoin-dag" +version = "1.13.8" +dependencies = [ + "anyhow", + "bcs-ext", + "bincode", + "byteorder", + "cryptonight-rs", + "futures 0.3.26", + "hex", + "itertools", + "once_cell", + "parking_lot 0.12.1", + "proptest", + "proptest-derive", + "rand 0.8.5", + "rand_core 0.6.4", + "rocksdb", + "rust-argon2", + "schemars", + "serde 1.0.152", + "sha3", + "starcoin-accumulator", + "starcoin-config", + "starcoin-crypto", + "starcoin-logger", + "starcoin-state-api", + "starcoin-storage", + "starcoin-time-service", + "starcoin-types", + "starcoin-vm-types", + "stest", + "tempfile", + "thiserror", + "tokio", +] + [[package]] name = "starcoin-dataformat-generator" version = "1.13.9" @@ -9668,10 +9735,31 @@ dependencies = [ "tokio-executor 0.2.0-alpha.6", ] +[[package]] +name = "starcoin-flexidag" +version = "1.13.8" +dependencies = [ + "anyhow", + "async-trait", + "bcs-ext", + "futures 0.3.26", + "starcoin-accumulator", + "starcoin-config", + "starcoin-consensus", + "starcoin-crypto", + "starcoin-dag", + "starcoin-logger", + "starcoin-service-registry", + "starcoin-storage", + "starcoin-types", + "thiserror", + "tokio", +] + [[package]] name = "starcoin-framework" -version = "11.0.0" -source = "git+https://github.com/starcoinorg/starcoin-framework?rev=345a3900a0064dc57a9560235bc72c12f03448b1#345a3900a0064dc57a9560235bc72c12f03448b1" +version = "13.0.0" +source = "git+https://github.com/starcoinorg/starcoin-framework?rev=975539d8bcad6210b443a5f26685bd2e0d14263f#975539d8bcad6210b443a5f26685bd2e0d14263f" dependencies = [ "anyhow", "include_dir", @@ -9728,6 +9816,7 @@ dependencies = [ "starcoin-chain-mock", "starcoin-config", "starcoin-crypto", + "starcoin-dag", "starcoin-genesis", "starcoin-logger", "starcoin-storage", @@ -9749,6 +9838,7 @@ dependencies = [ "starcoin-config", "starcoin-consensus", "starcoin-crypto", + "starcoin-dag", "starcoin-executor", "starcoin-logger", "starcoin-state-api", @@ -9760,6 +9850,7 @@ dependencies = [ "starcoin-vm-types", "stdlib", "stest", + "tempfile", "thiserror", ] @@ -9872,6 +9963,7 @@ dependencies = [ "starcoin-config", "starcoin-consensus", "starcoin-crypto", + "starcoin-dag", "starcoin-executor", "starcoin-genesis", "starcoin-logger", @@ -10181,11 +10273,13 @@ dependencies = [ "starcoin-account-api", "starcoin-account-service", "starcoin-block-relayer", + "starcoin-chain-api", "starcoin-chain-notify", "starcoin-chain-service", "starcoin-config", "starcoin-consensus", "starcoin-crypto", + "starcoin-dag", "starcoin-dev", "starcoin-executor", "starcoin-genesis", @@ -10293,6 +10387,7 @@ dependencies = [ "network-types", "starcoin-config", "starcoin-crypto", + "starcoin-dag", "starcoin-genesis", "starcoin-logger", "starcoin-network", @@ -10318,6 +10413,7 @@ dependencies = [ "sp-utils", "starcoin-chain", "starcoin-config", + "starcoin-dag", "starcoin-genesis", "starcoin-logger", "starcoin-storage", @@ -10370,6 +10466,8 @@ dependencies = [ "starcoin-chain-api", "starcoin-config", "starcoin-crypto", + "starcoin-dag", + "starcoin-flexidag", "starcoin-logger", "starcoin-resource-viewer", "starcoin-service-registry", @@ -10412,6 +10510,7 @@ dependencies = [ "starcoin-account-api", "starcoin-config", "starcoin-crypto", + "starcoin-dag", "starcoin-logger", "starcoin-rpc-api", "starcoin-rpc-server", @@ -10490,6 +10589,7 @@ dependencies = [ "starcoin-config", "starcoin-consensus", "starcoin-crypto", + "starcoin-dag", "starcoin-dev", "starcoin-executor", "starcoin-genesis", @@ -10718,7 +10818,9 @@ dependencies = [ "starcoin-config", "starcoin-consensus", "starcoin-crypto", + "starcoin-dag", "starcoin-executor", + "starcoin-flexidag", "starcoin-genesis", "starcoin-logger", "starcoin-metrics", @@ -10746,6 +10848,7 @@ dependencies = [ "sysinfo", "test-helper", "thiserror", + "timeout-join-handler", "tokio", ] @@ -10850,6 +10953,7 @@ dependencies = [ "starcoin-chain-api", "starcoin-config", "starcoin-crypto", + "starcoin-dag", "starcoin-dev", "starcoin-genesis", "starcoin-resource-viewer", @@ -10969,7 +11073,9 @@ dependencies = [ "bytes 1.4.0", "forkable-jellyfish-merkle", "hex", + "lazy_static 1.4.0", "num_enum", + "parking_lot 0.12.1", "proptest", "proptest-derive", "rand 0.8.5", @@ -11511,6 +11617,7 @@ dependencies = [ "starcoin-config", "starcoin-consensus", "starcoin-crypto", + "starcoin-dag", "starcoin-dev", "starcoin-executor", "starcoin-genesis", diff --git a/Cargo.toml b/Cargo.toml index 7d299ec4b8..ffe2b623c4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -111,6 +111,8 @@ members = [ "cmd/miner_client/api", "cmd/db-exporter", "cmd/genesis-nft-miner", + "flexidag", + "flexidag/dag", ] default-members = [ @@ -217,6 +219,8 @@ default-members = [ "stratum", "cmd/miner_client/api", "cmd/db-exporter", + "flexidag", + "flexidag/dag", ] [profile.dev] @@ -246,7 +250,7 @@ api-limiter = { path = "commons/api-limiter" } arc-swap = "1.5.1" arrayref = "0.3" ascii = "1.0.0" -async-std = "1.12" +async-std = { version = "1.12", features = ["attributes", "tokio1"] } async-trait = "0.1.53" asynchronous-codec = "0.5" atomic-counter = "1.0.1" @@ -257,6 +261,9 @@ bcs-ext = { path = "commons/bcs_ext" } bech32 = "0.9" bencher = "0.1.5" bitflags = "1.3.2" +faster-hex = "0.6" +indexmap = "1.9.1" +bincode = { version = "1", default-features = false } bs58 = "0.3.1" byteorder = "1.3.4" bytes = "1" @@ -438,11 +445,13 @@ starcoin-chain-service = { path = "chain/service" } starcoin-cmd = { path = "cmd/starcoin" } starcoin-config = { path = "config" } starcoin-consensus = { path = "consensus" } -starcoin-crypto = { git = "https://github.com/starcoinorg/starcoin-crypto", rev = "a742ddc0674022800341182cbb4c3681807b2f00" } +#starcoin-crypto = { git = "https://github.com/starcoinorg/starcoin-crypto", rev = "a742ddc0674022800341182cbb4c3681807b2f00" } +starcoin-crypto = { git = "https://github.com/starcoinorg/starcoin-crypto", rev = "8d41c280a227594ca0a2b6ecba580643518274ea" } + starcoin-decrypt = { path = "commons/decrypt" } starcoin-dev = { path = "vm/dev" } starcoin-executor = { path = "executor" } -starcoin-framework = { git = "https://github.com/starcoinorg/starcoin-framework", rev = "345a3900a0064dc57a9560235bc72c12f03448b1" } +starcoin-framework = { git = "https://github.com/starcoinorg/starcoin-framework", rev = "975539d8bcad6210b443a5f26685bd2e0d14263f" } starcoin-genesis = { path = "genesis" } starcoin-logger = { path = "commons/logger" } starcoin-metrics = { path = "commons/metrics" } @@ -496,7 +505,8 @@ starcoin-parallel-executor = { path = "vm/parallel-executor" } starcoin-transaction-benchmarks = { path = "vm/transaction-benchmarks" } starcoin-language-e2e-tests = { path = "vm/e2e-tests" } starcoin-proptest-helpers = { path = "vm/proptest-helpers" } - +starcoin-flexidag = { path = "flexidag" } +starcoin-dag = {path = "flexidag/dag"} syn = { version = "1.0.107", features = [ "full", "extra-traits", diff --git a/account/src/account_test.rs b/account/src/account_test.rs index bba50ab6cb..5e36ea2528 100644 --- a/account/src/account_test.rs +++ b/account/src/account_test.rs @@ -224,7 +224,7 @@ pub fn test_wallet_account() -> Result<()> { ); //println!("verify result is {:?}", sign.verify(&raw_txn, &public_key)?); println!("public key is {:?}", public_key.to_bytes().as_ref()); - println!("hash value is {:?}", hash_value.as_ref()); + println!("hash value is {:?}", hash_value); println!("key is {:?}", key.derived_address()); println!("address is {:?},result is {:?}", address, result); diff --git a/benchmarks/Cargo.toml b/benchmarks/Cargo.toml index 0030d998ea..40359a197d 100644 --- a/benchmarks/Cargo.toml +++ b/benchmarks/Cargo.toml @@ -39,7 +39,7 @@ starcoin-vm-runtime = { workspace = true } starcoin-vm-types = { workspace = true } starcoin-types = { workspace = true } starcoin-executor-benchmark = { workspace = true } - +starcoin-dag = {workspace = true} [dev-dependencies] [lib] diff --git a/benchmarks/src/chain.rs b/benchmarks/src/chain.rs index ede8471734..f16fc23c28 100644 --- a/benchmarks/src/chain.rs +++ b/benchmarks/src/chain.rs @@ -42,12 +42,19 @@ impl ChainBencher { )) .unwrap(), ); + let dag = starcoin_dag::blockdag::BlockDAG::create_for_testing().unwrap(); let (chain_info, _) = - Genesis::init_and_check_storage(&net, storage.clone(), temp_path.path()) + Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), temp_path.path()) .expect("init storage by genesis fail."); - let chain = BlockChain::new(net.time_service(), chain_info.head().id(), storage, None) - .expect("create block chain should success."); + let chain = BlockChain::new( + net.time_service(), + chain_info.head().id(), + storage, + None, + dag, + ) + .expect("create block chain should success."); let miner_account = AccountInfo::random(); ChainBencher { @@ -66,7 +73,7 @@ impl ChainBencher { let (block_template, _) = self .chain .read() - .create_block_template(*self.account.address(), None, vec![], vec![], None) + .create_block_template(*self.account.address(), None, vec![], vec![], None, None) .unwrap(); let block = ConsensusStrategy::Dummy .create_block(block_template, self.net.time_service().as_ref()) diff --git a/block-relayer/src/block_relayer.rs b/block-relayer/src/block_relayer.rs index d8d791051c..6f066818b6 100644 --- a/block-relayer/src/block_relayer.rs +++ b/block-relayer/src/block_relayer.rs @@ -203,7 +203,9 @@ impl BlockRelayer { ctx: &mut ServiceContext, ) -> Result<()> { let network = ctx.get_shared::()?; - let block_connector_service = ctx.service_ref::()?.clone(); + let block_connector_service = ctx + .service_ref::>()? + .clone(); let txpool = self.txpool.clone(); let metrics = self.metrics.clone(); let fut = async move { @@ -277,7 +279,7 @@ impl EventHandler for BlockRelayer { fn handle_event(&mut self, event: NewHeadBlock, ctx: &mut ServiceContext) { debug!( "[block-relay] Handle new head block event, block_id: {:?}", - event.0.block().id() + event.executed_block.block().id() ); let network = match ctx.get_shared::() { Ok(network) => network, @@ -286,7 +288,7 @@ impl EventHandler for BlockRelayer { return; } }; - self.broadcast_compact_block(network, event.0); + self.broadcast_compact_block(network, event.executed_block); } } diff --git a/chain/Cargo.toml b/chain/Cargo.toml index ae9f58e3be..e5b41bfea9 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -23,6 +23,10 @@ starcoin-types = { package = "starcoin-types", workspace = true } starcoin-vm-types = { workspace = true } starcoin-storage = { workspace = true } thiserror = { workspace = true } +starcoin-network-rpc-api = { workspace = true } +async-std = { workspace = true } +starcoin-flexidag ={ workspace = true } +starcoin-dag ={ workspace = true } [dev-dependencies] proptest = { workspace = true } @@ -39,6 +43,7 @@ stdlib = { workspace = true } stest = { workspace = true } test-helper = { workspace = true } tokio = { features = ["full"], workspace = true } +starcoin-network-rpc-api = { workspace = true } [features] default = [] diff --git a/chain/api/Cargo.toml b/chain/api/Cargo.toml index 8ecc37b4d1..8a0c546f7d 100644 --- a/chain/api/Cargo.toml +++ b/chain/api/Cargo.toml @@ -15,7 +15,9 @@ starcoin-time-service = { workspace = true } starcoin-types = { workspace = true } starcoin-vm-types = { workspace = true } thiserror = { workspace = true } - +starcoin-network-rpc-api = { workspace = true } +starcoin-config = { workspace = true } +starcoin-dag = { workspace = true } [dev-dependencies] diff --git a/chain/api/src/chain.rs b/chain/api/src/chain.rs index 93884610e2..6904a28acb 100644 --- a/chain/api/src/chain.rs +++ b/chain/api/src/chain.rs @@ -80,7 +80,7 @@ pub trait ChainReader { /// Verify block header and body, base current chain, but do not verify it execute state. fn verify(&self, block: Block) -> Result; /// Execute block and verify it execute state, and save result base current chain, but do not change current chain. - fn execute(&self, block: VerifiedBlock) -> Result; + fn execute(&mut self, block: VerifiedBlock) -> Result; /// Get chain transaction infos fn get_transaction_infos( &self, @@ -100,6 +100,15 @@ pub trait ChainReader { event_index: Option, access_path: Option, ) -> Result>; + + fn current_tips_hash( + &self, + header: &BlockHeader, + ) -> Result)>>; + fn has_dag_block(&self, header_id: HashValue) -> Result; + fn dag_fork_height(&self) -> Result; + fn is_dag(&self, block_header: &BlockHeader) -> Result; + fn is_dag_genesis(&self, block_header: &BlockHeader) -> Result; } pub trait ChainWriter { diff --git a/chain/api/src/errors.rs b/chain/api/src/errors.rs index 777cb19e7c..0fccef901c 100644 --- a/chain/api/src/errors.rs +++ b/chain/api/src/errors.rs @@ -63,6 +63,10 @@ pub enum ConnectBlockError { VerifyBlockFailed(VerifyBlockField, Error), #[error("Barnard hard fork block: {:?} ", .0.header())] BarnardHardFork(Box), + #[error("dag block before time window: {:?} ", .0.header())] + DagBlockBeforeTimeWindow(Box), + #[error("dag block after time window: {:?} ", .0.header())] + DagBlockAfterTimeWindow(Box), } impl ConnectBlockError { @@ -74,6 +78,10 @@ impl ConnectBlockError { ReputationChange::new_fatal("VerifyBlockFailed"); pub const REP_BARNARD_HARD_FORK: ReputationChange = ReputationChange::new_fatal("BarnardHardFork"); + pub const REP_BLOCK_BEFORE_TIME_WINDOW: ReputationChange = + ReputationChange::new_fatal("DagBlockBeforeTimeWindow"); + pub const REP_BLOCK_AFTER_TIME_WINDOW: ReputationChange = + ReputationChange::new_fatal("DagBlockAfterTimeWindow"); pub fn reason(&self) -> &str { match self { @@ -81,6 +89,8 @@ impl ConnectBlockError { ConnectBlockError::ParentNotExist(_) => "ParentNotExist", ConnectBlockError::VerifyBlockFailed(_, _) => "VerifyBlockFailed", ConnectBlockError::BarnardHardFork(_) => "BarnardHardFork", + ConnectBlockError::DagBlockBeforeTimeWindow(_) => "DagBlockBeforeTimeWindow", + ConnectBlockError::DagBlockAfterTimeWindow(_) => "DagBlockAfterTimeWindow", } } @@ -92,6 +102,12 @@ impl ConnectBlockError { ConnectBlockError::REP_VERIFY_BLOCK_FAILED } ConnectBlockError::BarnardHardFork(_) => ConnectBlockError::REP_BARNARD_HARD_FORK, + ConnectBlockError::DagBlockBeforeTimeWindow(_) => { + ConnectBlockError::REP_BLOCK_BEFORE_TIME_WINDOW + } + ConnectBlockError::DagBlockAfterTimeWindow(_) => { + ConnectBlockError::REP_BLOCK_AFTER_TIME_WINDOW + } } } } diff --git a/chain/api/src/message.rs b/chain/api/src/message.rs index d4144fe9a0..0fcf1d5505 100644 --- a/chain/api/src/message.rs +++ b/chain/api/src/message.rs @@ -4,6 +4,7 @@ use crate::TransactionInfoWithProof; use anyhow::Result; use starcoin_crypto::HashValue; +use starcoin_dag::consensusdb::consenses_state::DagStateView; use starcoin_service_registry::ServiceRequest; use starcoin_types::transaction::RichTransactionInfo; use starcoin_types::{ @@ -60,6 +61,11 @@ pub enum ChainRequest { access_path: Option, }, GetBlockInfos(Vec), + GetDagBlockChildren { + block_ids: Vec, + }, + GetDagForkNumber, + GetDagStateView, } impl ServiceRequest for ChainRequest { @@ -88,4 +94,6 @@ pub enum ChainResponse { HashVec(Vec), TransactionProof(Box>), BlockInfoVec(Box>>), + DagForkNumber(BlockNumber), + DagStateView(Box), } diff --git a/chain/api/src/service.rs b/chain/api/src/service.rs index 8ba6adce0e..182e573aa7 100644 --- a/chain/api/src/service.rs +++ b/chain/api/src/service.rs @@ -5,6 +5,7 @@ use crate::message::{ChainRequest, ChainResponse}; use crate::TransactionInfoWithProof; use anyhow::{bail, Result}; use starcoin_crypto::HashValue; +use starcoin_dag::consensusdb::consenses_state::DagStateView; use starcoin_service_registry::{ActorService, ServiceHandler, ServiceRef}; use starcoin_types::contract_event::{ContractEvent, ContractEventInfo}; use starcoin_types::filter::Filter; @@ -72,6 +73,8 @@ pub trait ReadableChainService { ) -> Result>; fn get_block_infos(&self, ids: Vec) -> Result>>; + fn get_dag_block_children(&self, ids: Vec) -> Result>; + fn get_dag_state(&self) -> Result; } /// Writeable block chain service trait @@ -139,6 +142,9 @@ pub trait ChainAsyncService: ) -> Result>; async fn get_block_infos(&self, hashes: Vec) -> Result>>; + async fn get_dag_block_children(&self, hashes: Vec) -> Result>; + async fn dag_fork_number(&self) -> Result; + async fn get_dag_state(&self) -> Result; } #[async_trait::async_trait] @@ -436,4 +442,34 @@ where bail!("get block_infos error") } } + + async fn get_dag_block_children(&self, hashes: Vec) -> Result> { + let response = self + .send(ChainRequest::GetDagBlockChildren { block_ids: hashes }) + .await??; + if let ChainResponse::HashVec(children) = response { + Ok(children) + } else { + bail!("get dag block children error") + } + } + + async fn dag_fork_number(&self) -> Result { + if let ChainResponse::DagForkNumber(fork_number) = + self.send(ChainRequest::GetDagForkNumber).await?? + { + Ok(fork_number) + } else { + bail!("Get dag form number response error.") + } + } + + async fn get_dag_state(&self) -> Result { + let response = self.send(ChainRequest::GetDagStateView).await??; + if let ChainResponse::DagStateView(dag_state) = response { + Ok(*dag_state) + } else { + bail!("get dag state error") + } + } } diff --git a/chain/chain-notify/src/lib.rs b/chain/chain-notify/src/lib.rs index 60c1985dbe..2cf26a6db4 100644 --- a/chain/chain-notify/src/lib.rs +++ b/chain/chain-notify/src/lib.rs @@ -52,11 +52,9 @@ impl EventHandler for ChainNotifyHandlerService { item: NewHeadBlock, ctx: &mut ServiceContext, ) { - let NewHeadBlock(block_detail) = item; - let block = block_detail.block(); + let block = item.executed_block.block(); // notify header. self.notify_new_block(block, ctx); - // notify events if let Err(e) = self.notify_events(block, self.store.clone(), ctx) { error!(target: "pubsub", "fail to notify events to client, err: {}", &e); diff --git a/chain/mock/Cargo.toml b/chain/mock/Cargo.toml index 53495a21da..d0c895861d 100644 --- a/chain/mock/Cargo.toml +++ b/chain/mock/Cargo.toml @@ -23,7 +23,7 @@ starcoin-storage = { workspace = true } starcoin-types = { package = "starcoin-types", workspace = true } starcoin-vm-types = { workspace = true } thiserror = { workspace = true } - +starcoin-dag = { workspace = true } [dev-dependencies] proptest = { workspace = true } proptest-derive = { workspace = true } diff --git a/chain/mock/src/mock_chain.rs b/chain/mock/src/mock_chain.rs index 403cd09611..e7a0dfed28 100644 --- a/chain/mock/src/mock_chain.rs +++ b/chain/mock/src/mock_chain.rs @@ -7,9 +7,11 @@ use starcoin_chain::{BlockChain, ChainReader, ChainWriter}; use starcoin_config::ChainNetwork; use starcoin_consensus::Consensus; use starcoin_crypto::HashValue; +use starcoin_dag::blockdag::BlockDAG; use starcoin_genesis::Genesis; use starcoin_logger::prelude::*; use starcoin_storage::Storage; +use starcoin_types::block::{BlockNumber, TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH}; use starcoin_types::block::{Block, BlockHeader}; use starcoin_types::startup_info::ChainInfo; use std::sync::Arc; @@ -18,16 +20,28 @@ pub struct MockChain { net: ChainNetwork, head: BlockChain, miner: AccountInfo, + storage: Arc, } impl MockChain { pub fn new(net: ChainNetwork) -> Result { - let (storage, chain_info, _) = - Genesis::init_storage_for_test(&net).expect("init storage by genesis fail."); + Self::new_with_fork(net, TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH) + } + + + pub fn new_with_fork(net: ChainNetwork, fork_number: BlockNumber) -> Result { + let (storage, chain_info, _, dag) = Genesis::init_storage_for_test(&net, fork_number) + .expect("init storage by genesis fail."); - let chain = BlockChain::new(net.time_service(), chain_info.head().id(), storage, None)?; + let chain = BlockChain::new( + net.time_service(), + chain_info.head().id(), + storage.clone(), + None, + dag, + )?; let miner = AccountInfo::random(); - Ok(Self::new_inner(net, chain, miner)) + Ok(Self::new_inner(net, chain, miner, storage)) } pub fn new_with_storage( @@ -35,18 +49,39 @@ impl MockChain { storage: Arc, head_block_hash: HashValue, miner: AccountInfo, + dag: BlockDAG, ) -> Result { - let chain = BlockChain::new(net.time_service(), head_block_hash, storage, None)?; - Ok(Self::new_inner(net, chain, miner)) + let chain = BlockChain::new( + net.time_service(), + head_block_hash, + storage.clone(), + None, + dag, + )?; + Ok(Self::new_inner(net, chain, miner, storage)) } - pub fn new_with_chain(net: ChainNetwork, chain: BlockChain) -> Result { + pub fn new_with_chain( + net: ChainNetwork, + chain: BlockChain, + storage: Arc, + ) -> Result { let miner = AccountInfo::random(); - Ok(Self::new_inner(net, chain, miner)) + Ok(Self::new_inner(net, chain, miner, storage)) } - fn new_inner(net: ChainNetwork, head: BlockChain, miner: AccountInfo) -> Self { - Self { net, head, miner } + fn new_inner( + net: ChainNetwork, + head: BlockChain, + miner: AccountInfo, + storage: Arc, + ) -> Self { + Self { + net, + head, + miner, + storage, + } } pub fn net(&self) -> &ChainNetwork { @@ -72,6 +107,7 @@ impl MockChain { block_id, self.head.get_storage(), None, + self.head.dag(), ) } @@ -81,9 +117,24 @@ impl MockChain { head: chain, net: self.net.clone(), miner: AccountInfo::random(), + storage: self.storage.clone(), + }) + } + + pub fn fork_dag(&self, head_id: Option) -> Result { + let chain = self.fork_new_branch(head_id)?; + Ok(Self { + head: chain, + net: self.net.clone(), + miner: AccountInfo::random(), + storage: self.storage.clone(), }) } + pub fn get_storage(&self) -> Arc { + self.storage.clone() + } + pub fn select_head(&mut self, new_block: Block) -> Result<()> { //TODO reuse WriteChainService's select_head logic. // new block should be execute and save to storage. @@ -93,6 +144,7 @@ impl MockChain { new_block_id, self.head.get_storage(), None, + self.head.dag(), )?; let branch_total_difficulty = branch.get_total_difficulty()?; let head_total_difficulty = self.head.get_total_difficulty()?; @@ -112,9 +164,28 @@ impl MockChain { } pub fn produce(&self) -> Result { - let (template, _) = - self.head - .create_block_template(*self.miner.address(), None, vec![], vec![], None)?; + let (template, _) = self.head.create_block_template( + *self.miner.address(), + None, + vec![], + vec![], + None, + None, + )?; + self.head + .consensus() + .create_block(template, self.net.time_service().as_ref()) + } + + pub fn produce_block_by_header(&mut self, parent_header: BlockHeader) -> Result { + let (template, _) = self.head.create_block_template_by_header( + *self.miner.address(), + parent_header, + vec![], + vec![], + None, + None, + )?; self.head .consensus() .create_block(template, self.net.time_service().as_ref()) diff --git a/chain/open-block/src/lib.rs b/chain/open-block/src/lib.rs index 7df7510ecd..cef4e4fb25 100644 --- a/chain/open-block/src/lib.rs +++ b/chain/open-block/src/lib.rs @@ -10,6 +10,7 @@ use starcoin_logger::prelude::*; use starcoin_state_api::{ChainStateReader, ChainStateWriter}; use starcoin_statedb::ChainStateDB; use starcoin_storage::Store; +use starcoin_types::block::Block; use starcoin_types::genesis_config::{ChainId, ConsensusStrategy}; use starcoin_types::vm_error::KeptVMStatus; use starcoin_types::{ @@ -39,6 +40,7 @@ pub struct OpenedBlock { difficulty: U256, strategy: ConsensusStrategy, vm_metrics: Option, + blue_blocks: Option>, } impl OpenedBlock { @@ -52,6 +54,8 @@ impl OpenedBlock { difficulty: U256, strategy: ConsensusStrategy, vm_metrics: Option, + tips_hash: Option>, + blue_blocks: Option>, ) -> Result { let previous_block_id = previous_header.id(); let block_info = storage @@ -80,7 +84,6 @@ impl OpenedBlock { previous_block_info: block_info, block_meta, gas_limit: block_gas_limit, - state: chain_state, txn_accumulator, gas_used: 0, @@ -90,6 +93,7 @@ impl OpenedBlock { difficulty, strategy, vm_metrics, + blue_blocks, }; opened_block.initialize()?; Ok(opened_block) @@ -136,11 +140,19 @@ impl OpenedBlock { /// as the internal state may be corrupted. /// TODO: make the function can be called again even last call returns error. pub fn push_txns(&mut self, user_txns: Vec) -> Result { - let mut txns: Vec<_> = user_txns - .iter() - .cloned() - .map(Transaction::UserTransaction) - .collect(); + let mut txns = vec![]; + for block in self.blue_blocks.as_ref().unwrap_or(&vec![]) { + txns.extend( + block + .transactions() + .iter() + .skip(1) + .cloned() + .map(Transaction::UserTransaction), + ); + } + + txns.extend(user_txns.iter().cloned().map(Transaction::UserTransaction)); let txn_outputs = { let gas_left = self.gas_limit.checked_sub(self.gas_used).ok_or_else(|| { @@ -157,7 +169,6 @@ impl OpenedBlock { self.vm_metrics.clone(), )? }; - let untouched_user_txns: Vec = if txn_outputs.len() >= txns.len() { vec![] } else { @@ -168,6 +179,7 @@ impl OpenedBlock { let mut discard_txns: Vec = Vec::new(); debug_assert_eq!(txns.len(), txn_outputs.len()); + for (txn, output) in txns.into_iter().zip(txn_outputs.into_iter()) { let txn_hash = txn.id(); match output.status() { @@ -264,8 +276,9 @@ impl OpenedBlock { /// Construct a block template for mining. pub fn finalize(self) -> Result { - let accumulator_root = self.txn_accumulator.root_hash(); let state_root = self.state.state_root(); + let accumulator_root = self.txn_accumulator.root_hash(); + let uncles = if !self.uncles.is_empty() { Some(self.uncles) } else { diff --git a/chain/service/Cargo.toml b/chain/service/Cargo.toml index 34d81bd91b..120c0b1acc 100644 --- a/chain/service/Cargo.toml +++ b/chain/service/Cargo.toml @@ -1,5 +1,6 @@ [dependencies] anyhow = { workspace = true } +async-std = { workspace = true } async-trait = { workspace = true } futures = { workspace = true } rand = { workspace = true } @@ -18,6 +19,10 @@ starcoin-vm-runtime = { workspace = true } starcoin-vm-types = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } +starcoin-network-rpc-api = { workspace = true } +starcoin-consensus = { workspace = true } +starcoin-dag = { workspace = true } +starcoin-accumulator = { package = "starcoin-accumulator", workspace = true } [dev-dependencies] stest = { workspace = true } diff --git a/chain/service/src/chain_service.rs b/chain/service/src/chain_service.rs index f7b32799d1..65138e094a 100644 --- a/chain/service/src/chain_service.rs +++ b/chain/service/src/chain_service.rs @@ -1,7 +1,7 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use anyhow::{format_err, Error, Result}; +use anyhow::{bail, format_err, Error, Result}; use starcoin_chain::BlockChain; use starcoin_chain_api::message::{ChainRequest, ChainResponse}; use starcoin_chain_api::{ @@ -9,6 +9,8 @@ use starcoin_chain_api::{ }; use starcoin_config::NodeConfig; use starcoin_crypto::HashValue; +use starcoin_dag::blockdag::BlockDAG; +use starcoin_dag::consensusdb::consenses_state::DagStateView; use starcoin_logger::prelude::*; use starcoin_service_registry::{ ActorService, EventHandler, ServiceContext, ServiceFactory, ServiceHandler, @@ -39,10 +41,11 @@ impl ChainReaderService { config: Arc, startup_info: StartupInfo, storage: Arc, + dag: BlockDAG, vm_metrics: Option, ) -> Result { Ok(Self { - inner: ChainReaderServiceInner::new(config, startup_info, storage, vm_metrics)?, + inner: ChainReaderServiceInner::new(config, startup_info, storage, dag, vm_metrics)?, }) } } @@ -54,8 +57,9 @@ impl ServiceFactory for ChainReaderService { let startup_info = storage .get_startup_info()? .ok_or_else(|| format_err!("StartupInfo should exist at service init."))?; + let dag = ctx.get_shared::()?; let vm_metrics = ctx.get_shared_opt::()?; - Self::new(config, startup_info, storage, vm_metrics) + Self::new(config, startup_info, storage, dag, vm_metrics) } } @@ -73,9 +77,14 @@ impl ActorService for ChainReaderService { impl EventHandler for ChainReaderService { fn handle_event(&mut self, event: NewHeadBlock, _ctx: &mut ServiceContext) { - let new_head = event.0.block().header(); - if let Err(e) = if self.inner.get_main().can_connect(event.0.as_ref()) { - self.inner.update_chain_head(event.0.as_ref().clone()) + let new_head = event.executed_block.block().header().clone(); + if let Err(e) = if self + .inner + .get_main() + .can_connect(event.executed_block.as_ref()) + { + self.inner + .update_chain_head(event.executed_block.as_ref().clone()) } else { self.inner.switch_main(new_head.id()) } { @@ -232,6 +241,15 @@ impl ServiceHandler for ChainReaderService { ChainRequest::GetBlockInfos(ids) => Ok(ChainResponse::BlockInfoVec(Box::new( self.inner.get_block_infos(ids)?, ))), + ChainRequest::GetDagBlockChildren { block_ids } => Ok(ChainResponse::HashVec( + self.inner.get_dag_block_children(block_ids)?, + )), + ChainRequest::GetDagForkNumber => Ok(ChainResponse::DagForkNumber( + self.inner.main.dag_fork_height()?, + )), + ChainRequest::GetDagStateView => Ok(ChainResponse::DagStateView(Box::new( + self.inner.get_dag_state()?, + ))), } } } @@ -241,6 +259,7 @@ pub struct ChainReaderServiceInner { startup_info: StartupInfo, main: BlockChain, storage: Arc, + dag: BlockDAG, vm_metrics: Option, } @@ -249,6 +268,7 @@ impl ChainReaderServiceInner { config: Arc, startup_info: StartupInfo, storage: Arc, + dag: BlockDAG, vm_metrics: Option, ) -> Result { let net = config.net(); @@ -257,12 +277,14 @@ impl ChainReaderServiceInner { startup_info.main, storage.clone(), vm_metrics.clone(), + dag.clone(), )?; Ok(Self { config, startup_info, main, storage, + dag, vm_metrics, }) } @@ -283,6 +305,7 @@ impl ChainReaderServiceInner { new_head_id, self.storage.clone(), self.vm_metrics.clone(), + self.dag.clone(), )?; Ok(()) } @@ -366,6 +389,7 @@ impl ReadableChainService for ChainReaderServiceInner { fn main_startup_info(&self) -> StartupInfo { self.startup_info.clone() } + fn main_blocks_by_number( &self, number: Option, @@ -416,6 +440,34 @@ impl ReadableChainService for ChainReaderServiceInner { fn get_block_infos(&self, ids: Vec) -> Result>> { self.storage.get_block_infos(ids) } + + fn get_dag_block_children(&self, ids: Vec) -> Result> { + ids.into_iter().fold(Ok(vec![]), |mut result, id| { + match self.dag.get_children(id) { + anyhow::Result::Ok(children) => { + let _ = result.as_mut().map(|r| r.extend(children)); + Ok(result?) + } + Err(e) => Err(e), + } + }) + } + + fn get_dag_state(&self) -> Result { + let head = self.main.current_header(); + if !self.main.is_dag(&head)? || !self.main.is_dag_genesis(&head)? { + bail!( + "The chain is still not a dag and its dag fork number is {} and the current is {}.", + self.main.dag_fork_height()?, + head.number() + ); + } + let (dag_genesis, state) = self.main.get_dag_state_by_block(&head)?; + Ok(DagStateView { + dag_genesis, + tips: state.tips, + }) + } } #[cfg(test)] @@ -424,12 +476,15 @@ mod tests { use starcoin_chain_api::ChainAsyncService; use starcoin_config::NodeConfig; use starcoin_service_registry::{RegistryAsyncService, RegistryService}; + use starcoin_types::block::TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH; #[stest::test] async fn test_actor_launch() -> Result<()> { let config = Arc::new(NodeConfig::random_for_test()); - let (storage, chain_info, _) = test_helper::Genesis::init_storage_for_test(config.net())?; + let (storage, chain_info, _, dag) = + test_helper::Genesis::init_storage_for_test(config.net(), TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH)?; let registry = RegistryService::launch(); + registry.put_shared(dag).await?; registry.put_shared(config).await?; registry.put_shared(storage).await?; let service_ref = registry.register::().await?; diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 1c7825d4c7..d2e2ef6286 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1,8 +1,9 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::verifier::{BlockVerifier, FullVerifier}; -use anyhow::{bail, ensure, format_err, Result}; +use crate::verifier::{BlockVerifier, DagBasicVerifier, DagVerifier, FullVerifier}; +use anyhow::{anyhow, bail, ensure, format_err, Ok, Result}; +use bcs_ext::BCSCodec; use sp_utils::stop_watch::{watch, CHAIN_WATCH_NAME}; use starcoin_accumulator::inmemory::InMemoryAccumulator; use starcoin_accumulator::{ @@ -15,6 +16,10 @@ use starcoin_chain_api::{ use starcoin_consensus::Consensus; use starcoin_crypto::hash::PlainCryptoHash; use starcoin_crypto::HashValue; +use starcoin_dag::block_dag_config::BlockDAGType; +use starcoin_dag::blockdag::BlockDAG; +use starcoin_dag::consensusdb::consenses_state::DagState; +use starcoin_dag::consensusdb::prelude::StoreError; use starcoin_executor::VMMetrics; use starcoin_logger::prelude::*; use starcoin_open_block::OpenedBlock; @@ -38,8 +43,11 @@ use starcoin_types::{ use starcoin_vm_types::access_path::AccessPath; use starcoin_vm_types::account_config::genesis_address; use starcoin_vm_types::genesis_config::ConsensusStrategy; +use starcoin_vm_types::on_chain_config::FlexiDagConfig; use starcoin_vm_types::on_chain_resource::Epoch; +use starcoin_vm_types::state_view::StateReaderExt; use std::cmp::min; +use std::collections::HashSet; use std::iter::Extend; use std::option::Option::{None, Some}; use std::{collections::HashMap, sync::Arc}; @@ -60,6 +68,7 @@ pub struct BlockChain { uncles: HashMap, epoch: Epoch, vm_metrics: Option, + dag: BlockDAG, } impl BlockChain { @@ -68,11 +77,12 @@ impl BlockChain { head_block_hash: HashValue, storage: Arc, vm_metrics: Option, + dag: BlockDAG, ) -> Result { let head = storage .get_block_by_hash(head_block_hash)? .ok_or_else(|| format_err!("Can not find block by hash {:?}", head_block_hash))?; - Self::new_with_uncles(time_service, head, None, storage, vm_metrics) + Self::new_with_uncles(time_service, head, None, storage, vm_metrics, dag) } fn new_with_uncles( @@ -81,6 +91,7 @@ impl BlockChain { uncles: Option>, storage: Arc, vm_metrics: Option, + mut dag: BlockDAG, ) -> Result { let block_info = storage .get_block_info(head_block.id())? @@ -113,11 +124,16 @@ impl BlockChain { head: head_block, }, statedb: chain_state, - storage, + storage: storage.clone(), uncles: HashMap::new(), epoch, vm_metrics, + dag: dag.clone(), }; + let current_header = chain.current_header(); + if chain.is_dag(¤t_header)? || chain.is_dag_genesis(¤t_header)? { + dag.set_reindex_root(chain.get_block_dag_origin()?)?; + } watch(CHAIN_WATCH_NAME, "n1251"); match uncles { Some(data) => chain.uncles = data, @@ -132,6 +148,7 @@ impl BlockChain { storage: Arc, genesis_epoch: Epoch, genesis_block: Block, + dag: BlockDAG, ) -> Result { debug_assert!(genesis_block.header().is_genesis()); let txn_accumulator = MerkleAccumulator::new_empty( @@ -151,7 +168,7 @@ impl BlockChain { genesis_block, None, )?; - Self::new(time_service, executed_block.block.id(), storage, None) + Self::new(time_service, executed_block.block.id(), storage, None, dag) } pub fn current_epoch_uncles_size(&self) -> u64 { @@ -169,6 +186,10 @@ impl BlockChain { self.time_service.clone() } + pub fn dag(&self) -> BlockDAG { + self.dag.clone() + } + //TODO lazy init uncles cache. fn update_uncle_cache(&mut self) -> Result<()> { self.uncles = self.epoch_uncles()?; @@ -219,6 +240,7 @@ impl BlockChain { user_txns: Vec, uncles: Vec, block_gas_limit: Option, + tips: Option>, ) -> Result<(BlockTemplate, ExcludedTxns)> { //FIXME create block template by parent may be use invalid chain state, such as epoch. //So the right way should be creating a BlockChain by parent_hash, then create block template. @@ -230,31 +252,71 @@ impl BlockChain { None => self.current_header(), }; - self.create_block_template_inner( + self.create_block_template_by_header( author, previous_header, user_txns, uncles, block_gas_limit, + tips, ) } - fn create_block_template_inner( + pub fn create_block_template_by_header( &self, author: AccountAddress, previous_header: BlockHeader, user_txns: Vec, uncles: Vec, block_gas_limit: Option, + tips: Option>, ) -> Result<(BlockTemplate, ExcludedTxns)> { + let current_number = previous_header.number().saturating_add(1); let epoch = self.epoch(); let on_chain_block_gas_limit = epoch.block_gas_limit(); let final_block_gas_limit = block_gas_limit .map(|block_gas_limit| min(block_gas_limit, on_chain_block_gas_limit)) .unwrap_or(on_chain_block_gas_limit); - + let (_, tips_hash) = if current_number <= self.dag_fork_height()? { + (None, None) + } else if tips.is_some() { + (Some(self.get_block_dag_genesis(&previous_header)?), tips) + } else { + let result = self.current_tips_hash(&previous_header)?.expect("the block number is larger than the dag fork number but the state data doese not exis"); + (Some(result.0), Some(result.1)) + }; let strategy = epoch.strategy(); let difficulty = strategy.calculate_next_difficulty(self)?; + let (uncles, blue_blocks) = { + match &tips_hash { + None => (uncles, None), + Some(tips) => { + let mut blues = self.dag.ghostdata(tips)?.mergeset_blues.to_vec(); + info!( + "create block template with tips:{:?}, ghostdata blues:{:?}", + &tips_hash, blues + ); + let mut blue_blocks = vec![]; + let _selected_parent = blues.remove(0); + for blue in &blues { + let block = self + .storage + .get_block_by_hash(blue.to_owned())? + .expect("Block should exist"); + blue_blocks.push(block); + } + ( + blue_blocks + .as_slice() + .iter() + .map(|b| b.header.clone()) + .collect(), + Some(blue_blocks), + ) + } + } + }; + info!("Blue blocks:{:?}", blue_blocks); let mut opened_block = OpenedBlock::new( self.storage.clone(), previous_header, @@ -265,6 +327,8 @@ impl BlockChain { difficulty, strategy, None, + Some(tips_hash.unwrap_or_default()), + blue_blocks, )?; let excluded_txns = opened_block.push_txns(user_txns)?; let template = opened_block.finalize()?; @@ -324,7 +388,12 @@ impl BlockChain { where V: BlockVerifier, { - V::verify_block(self, block) + if self.is_dag(block.header())? { + let selected_chain = Self::new(self.time_service.clone(), block.parent_hash(), self.storage.clone(), self.vm_metrics.clone(), self.dag.clone())?; + V::verify_block(&selected_chain, block) + } else { + V::verify_block(self, block) + } } pub fn apply_with_verifier(&mut self, block: Block) -> Result @@ -347,6 +416,223 @@ impl BlockChain { self.connect(ExecutedBlock { block, block_info }) } + fn check_parents_coherent(&self, header: &BlockHeader) -> Result { + if !self.is_dag(header)? { + bail!("Block is not a dag block."); + } + + let results = header.parents_hash().ok_or_else(|| anyhow!("dag block has no parents."))?.into_iter().map(|parent_hash| { + let header = self.storage.get_block_header_by_hash(parent_hash)?.ok_or_else(|| anyhow!("failed to find the block header in the block storage when checking the dag block exists, block hash: {:?}, number: {:?}", header.id(), header.number()))?; + let dag_genesis_hash = self.get_block_dag_genesis(&header)?; + let dag_genesis = self.storage.get_block_header_by_hash(dag_genesis_hash)?.ok_or_else(|| anyhow!("failed to find the block header in the block storage when checking the dag block exists, block hash: {:?}, number: {:?}", header.id(), header.number()))?; + Ok(dag_genesis.parent_hash()) + }).collect::>>()?; + + if results.len() == 1 { + Ok(results + .into_iter() + .next() + .expect("the len of the results is larger than 1 but no the first elemen!")) + } else { + bail!("dag block: {:?}, number: {:?} has multiple parents whose dags are not the same one! Their dag genesis are: {:?}", header.id(), header.number(), results); + } + } + + fn execute_dag_block(&mut self, verified_block: VerifiedBlock) -> Result { + let origin = self.check_parents_coherent(verified_block.0.header())?; + info!("execute dag block:{:?}", verified_block.0); + let block = verified_block.0; + let selected_parent = block.parent_hash(); + let blues = block.uncle_ids(); + let block_info_past = self + .storage + .get_block_info(selected_parent)? + .expect("selected parent must executed"); + let header = block.header(); + let block_id = header.id(); + //TODO::FIXEME + let selected_head = self + .storage + .get_block_by_hash(selected_parent)? + .ok_or_else(|| { + format_err!("Can not find selected block by hash {:?}", selected_parent) + })?; + let block_metadata = block.to_metadata(selected_head.header().gas_used()); + let mut transactions = vec![Transaction::BlockMetadata(block_metadata)]; + let mut total_difficulty = header.difficulty() + block_info_past.total_difficulty; + + for blue in blues { + let blue_block = self + .storage + .get_block_by_hash(blue)? + .expect("block blue need exist"); + transactions.extend( + blue_block + .transactions() + .iter() + .skip(1) + .cloned() + .map(Transaction::UserTransaction), + ); + total_difficulty += blue_block.header.difficulty(); + } + transactions.extend( + block + .transactions() + .iter() + .cloned() + .map(Transaction::UserTransaction), + ); + watch(CHAIN_WATCH_NAME, "n21"); + let statedb = self.statedb.fork_at(selected_head.header.state_root()); + let epoch = get_epoch_from_statedb(&statedb)?; + let executed_data = starcoin_executor::block_execute( + &statedb, + transactions.clone(), + epoch.block_gas_limit(), //TODO: Fix me + self.vm_metrics.clone(), + )?; + watch(CHAIN_WATCH_NAME, "n22"); + let state_root = executed_data.state_root; + let vec_transaction_info = &executed_data.txn_infos; + verify_block!( + VerifyBlockField::State, + state_root == header.state_root(), + "verify block:{:?} state_root fail", + block_id, + ); + let block_gas_used = vec_transaction_info + .iter() + .fold(0u64, |acc, i| acc.saturating_add(i.gas_used())); + verify_block!( + VerifyBlockField::State, + block_gas_used == header.gas_used(), + "invalid block: gas_used is not match" + ); + + verify_block!( + VerifyBlockField::State, + vec_transaction_info.len() == transactions.len(), + "invalid txn num in the block" + ); + let txn_accumulator = info_2_accumulator( + block_info_past.txn_accumulator_info, + AccumulatorStoreType::Transaction, + self.storage.as_ref(), + ); + let block_accumulator = info_2_accumulator( + block_info_past.block_accumulator_info, + AccumulatorStoreType::Block, + self.storage.as_ref(), + ); + let transaction_global_index = txn_accumulator.num_leaves(); + + // txn accumulator verify. + let executed_accumulator_root = { + let included_txn_info_hashes: Vec<_> = + vec_transaction_info.iter().map(|info| info.id()).collect(); + txn_accumulator.append(&included_txn_info_hashes)? + }; + + verify_block!( + VerifyBlockField::State, + executed_accumulator_root == header.txn_accumulator_root(), + "verify block: txn accumulator root mismatch" + ); + + watch(CHAIN_WATCH_NAME, "n23"); + statedb + .flush() + .map_err(BlockExecutorError::BlockChainStateErr)?; + // If chain state is matched, and accumulator is matched, + // then, we save flush states, and save block data. + watch(CHAIN_WATCH_NAME, "n24"); + txn_accumulator + .flush() + .map_err(|_err| BlockExecutorError::BlockAccumulatorFlushErr)?; + + block_accumulator.append(&[block_id])?; + block_accumulator.flush()?; + + let txn_accumulator_info: AccumulatorInfo = txn_accumulator.get_info(); + let block_accumulator_info: AccumulatorInfo = block_accumulator.get_info(); + let block_info = BlockInfo::new( + block_id, + total_difficulty, + txn_accumulator_info, + block_accumulator_info, + ); + + watch(CHAIN_WATCH_NAME, "n25"); + + // save block's transaction relationship and save transaction + + let block_id = block.id(); + let txn_infos = executed_data.txn_infos; + let txn_events = executed_data.txn_events; + let txn_table_infos = executed_data + .txn_table_infos + .into_iter() + .collect::>(); + + debug_assert!( + txn_events.len() == txn_infos.len(), + "events' length should be equal to txn infos' length" + ); + let txn_info_ids: Vec<_> = txn_infos.iter().map(|info| info.id()).collect(); + for (info_id, events) in txn_info_ids.iter().zip(txn_events.into_iter()) { + self.storage.save_contract_events(*info_id, events)?; + } + + self.storage.save_transaction_infos( + txn_infos + .into_iter() + .enumerate() + .map(|(transaction_index, info)| { + RichTransactionInfo::new( + block_id, + block.header().number(), + info, + transaction_index as u32, + transaction_global_index + .checked_add(transaction_index as u64) + .expect("transaction_global_index overflow."), + ) + }) + .collect(), + )?; + + let txn_id_vec = transactions + .iter() + .map(|user_txn| user_txn.id()) + .collect::>(); + // save transactions + self.storage.save_transaction_batch(transactions)?; + + // save block's transactions + self.storage + .save_block_transaction_ids(block_id, txn_id_vec)?; + self.storage + .save_block_txn_info_ids(block_id, txn_info_ids)?; + self.storage.commit_block(block.clone())?; + self.storage.save_block_info(block_info.clone())?; + + self.storage.save_table_infos(txn_table_infos)?; + let result = self.dag.commit(header.to_owned(), origin); + match result { + anyhow::Result::Ok(_) => (), + Err(e) => { + if let Some(StoreError::KeyAlreadyExists(_)) = e.downcast_ref::() { + info!("dag block already exist, ignore"); + } else { + return Err(e); + } + } + } + watch(CHAIN_WATCH_NAME, "n26"); + Ok(ExecutedBlock { block, block_info }) + } + //TODO consider move this logic to BlockExecutor fn execute_block_and_save( storage: &dyn Store, @@ -394,7 +680,7 @@ impl BlockChain { verify_block!( VerifyBlockField::State, state_root == header.state_root(), - "verify block:{:?} state_root fail", + "verify legacy block:{:?} state_root fail", block_id, ); let block_gas_used = vec_transaction_info @@ -506,11 +792,8 @@ impl BlockChain { storage.save_block_transaction_ids(block_id, txn_id_vec)?; storage.save_block_txn_info_ids(block_id, txn_info_ids)?; storage.commit_block(block.clone())?; - storage.save_block_info(block_info.clone())?; - storage.save_table_infos(txn_table_infos)?; - watch(CHAIN_WATCH_NAME, "n26"); Ok(ExecutedBlock { block, block_info }) } @@ -522,6 +805,52 @@ impl BlockChain { pub fn get_block_accumulator(&self) -> &MerkleAccumulator { &self.block_accumulator } + + pub fn init_dag_with_genesis(&mut self, genesis: BlockHeader) -> Result<()> { + if self.is_dag_genesis(&genesis)? { + let _dag_genesis_id = genesis.id(); + self.dag.init_with_genesis(genesis)?; + } + Ok(()) + } + + pub fn get_block_dag_genesis(&self, header: &BlockHeader) -> Result { + let block_info = self + .storage + .get_block_info(header.id())? + .ok_or_else(|| anyhow!("Cannot find block info by hash {:?}", header.id()))?; + let block_accumulator = MerkleAccumulator::new_with_info( + block_info.get_block_accumulator_info().clone(), + self.storage + .get_accumulator_store(AccumulatorStoreType::Block), + ); + let dag_genesis = block_accumulator + .get_leaf(self.dag_fork_height()?)? + .ok_or_else(|| anyhow!("failed to get the dag genesis"))?; + + Ok(dag_genesis) + } + + pub fn get_block_dag_origin(&self) -> Result { + let dag_genesis = self.get_block_dag_genesis(&self.current_header())?; + let block_header = self + .storage + .get_block_header_by_hash(dag_genesis)? + .ok_or_else(|| anyhow!("Cannot find block by hash {:?}", dag_genesis))?; + + Ok(HashValue::sha3_256_of( + &[block_header.parent_hash(), block_header.id()].encode()?, + )) + } + + pub fn get_dag_state_by_block(&self, header: &BlockHeader) -> Result<(HashValue, DagState)> { + let dag_genesis = self.get_block_dag_genesis(header)?; + Ok((dag_genesis, self.dag.get_dag_state(dag_genesis)?)) + } + + pub fn get_dag_genesis(&self) -> Result { + self.get_block_dag_genesis(&self.current_header()) + } } impl ChainReader for BlockChain { @@ -577,9 +906,7 @@ impl ChainReader for BlockChain { None => self.current_header().number(), Some(number) => number, }; - let num_leaves = self.block_accumulator.num_leaves(); - if end_num > num_leaves.saturating_sub(1) { bail!("Can not find block by number {}", end_num); }; @@ -715,12 +1042,15 @@ impl ChainReader for BlockChain { } else { None }; + BlockChain::new_with_uncles( self.time_service.clone(), head, uncles, self.storage.clone(), self.vm_metrics.clone(), + self.dag.clone(), + //TODO: check missing blocks need to be clean ) } @@ -752,20 +1082,32 @@ impl ChainReader for BlockChain { } fn verify(&self, block: Block) -> Result { - FullVerifier::verify_block(self, block) + if self.is_dag(block.header())? { + DagBasicVerifier::verify_header(self, block.header())?; + Ok(VerifiedBlock(block)) + } else { + FullVerifier::verify_block(self, block) + } } - fn execute(&self, verified_block: VerifiedBlock) -> Result { - Self::execute_block_and_save( - self.storage.as_ref(), - self.statedb.fork(), - self.txn_accumulator.fork(None), - self.block_accumulator.fork(None), - &self.epoch, - Some(self.status.status.clone()), - verified_block.0, - self.vm_metrics.clone(), - ) + fn execute(&mut self, verified_block: VerifiedBlock) -> Result { + let header = verified_block.0.header().clone(); + if !self.is_dag(&header)? { + let executed = Self::execute_block_and_save( + self.storage.as_ref(), + self.statedb.fork(), + self.txn_accumulator.fork(None), + self.block_accumulator.fork(None), + &self.epoch, + Some(self.status.status.clone()), + verified_block.0, + self.vm_metrics.clone(), + )?; + self.init_dag_with_genesis(header)?; + Ok(executed) + } else { + self.execute_dag_block(verified_block) + } } fn get_transaction_infos( @@ -865,6 +1207,74 @@ impl ChainReader for BlockChain { state_proof, })) } + + fn current_tips_hash( + &self, + header: &BlockHeader, + ) -> Result)>> { + let (dag_genesis, dag_state) = self.get_dag_state_by_block(header)?; + Ok(Some((dag_genesis, dag_state.tips))) + } + + fn has_dag_block(&self, header_id: HashValue) -> Result { + let header = match self.storage.get_block_header_by_hash(header_id)? { + Some(header) => header, + None => return Ok(false), + }; + + let block_info = match self.storage.get_block_info(header.id())? { + Some(block_info) => block_info, + None => return Ok(false), + }; + let block_accumulator = MerkleAccumulator::new_with_info( + block_info.get_block_accumulator_info().clone(), + self.storage + .get_accumulator_store(AccumulatorStoreType::Block), + ); + let dag_genesis = match block_accumulator.get_leaf(self.dag_fork_height()?)? { + Some(dag_genesis) => dag_genesis, + None => return Ok(false), + }; + + let current_chain_block_accumulator = MerkleAccumulator::new_with_info( + self.status.status.info.get_block_accumulator_info().clone(), + self.storage + .get_accumulator_store(AccumulatorStoreType::Block), + ); + let current_chain_dag_genesis = match current_chain_block_accumulator + .get_leaf(self.dag_fork_height()?)? + { + Some(dag_genesis) => dag_genesis, + None => return Ok(false), + }; + + if current_chain_dag_genesis != dag_genesis { + return Ok(false); + } + + self.dag.has_dag_block(header.id()) + } + + + fn dag_fork_height(&self) -> Result { + // try to handle db io error + match self.dag.block_dag_config() { + BlockDAGType::BlockDAGFormal => Ok(self + .statedb + .get_on_chain_config::()? + .map(|c| c.effective_height) + .unwrap_or(u64::MAX)), + BlockDAGType::BlockDAGTestMock(dag_mock_config) => Ok(dag_mock_config.fork_number), + } + } + + fn is_dag(&self, block_header: &BlockHeader) -> Result { + Ok(block_header.number() > self.dag_fork_height()?) + } + + fn is_dag_genesis(&self, block_header: &BlockHeader) -> Result { + Ok(block_header.number() == self.dag_fork_height()?) + } } impl BlockChain { @@ -968,6 +1378,75 @@ impl BlockChain { } Ok(event_with_infos) } + + fn connect_dag(&mut self, executed_block: ExecutedBlock) -> Result { + let dag = self.dag.clone(); + let (new_tip_block, _) = (executed_block.block(), executed_block.block_info()); + let (dag_genesis, mut tips) = self + .current_tips_hash(new_tip_block.header())? + .expect("tips should exists in dag"); + let parents = executed_block + .block + .header + .parents_hash() + .expect("Dag parents need exist"); + if !tips.contains(&new_tip_block.id()) { + for hash in parents { + tips.retain(|x| *x != hash); + } + if !dag.check_ancestor_of(new_tip_block.id(), tips.clone())? { + tips.push(new_tip_block.id()); + } + } + // Caculate the ghostdata of the virutal node created by all tips. + // And the ghostdata.selected of the tips will be the latest head. + let block_hash = { + let ghost_of_tips = dag.ghostdata(tips.as_slice())?; + ghost_of_tips.selected_parent + }; + debug!( + "connect dag info block hash: {},tips: {:?}", + block_hash, tips + ); + let (block, block_info) = { + let block = self + .storage + .get_block(block_hash)? + .expect("Dag block should exist"); + let block_info = self + .storage + .get_block_info(block_hash)? + .expect("Dag block info should exist"); + (block, block_info) + }; + + let txn_accumulator_info = block_info.get_txn_accumulator_info(); + let block_accumulator_info = block_info.get_block_accumulator_info(); + let state_root = block.header().state_root(); + + self.txn_accumulator = info_2_accumulator( + txn_accumulator_info.clone(), + AccumulatorStoreType::Transaction, + self.storage.as_ref(), + ); + self.block_accumulator = info_2_accumulator( + block_accumulator_info.clone(), + AccumulatorStoreType::Block, + self.storage.as_ref(), + ); + + self.statedb = ChainStateDB::new(self.storage.clone().into_super_arc(), Some(state_root)); + + self.status = ChainStatusWithBlock { + status: ChainStatus::new(block.header().clone(), block_info.clone()), + head: block.clone(), + }; + if self.epoch.end_block_number() == block.header().number() { + self.epoch = get_epoch_from_statedb(&self.statedb)?; + } + self.dag.save_dag_state(dag_genesis, DagState { tips })?; + Ok(executed_block) + } } impl ChainWriter for BlockChain { @@ -976,8 +1455,15 @@ impl ChainWriter for BlockChain { } fn connect(&mut self, executed_block: ExecutedBlock) -> Result { + if self.is_dag(executed_block.block.header())? { + info!( + "connect a dag block, {:?}, number: {:?}", + executed_block.block.id(), + executed_block.block.header().number(), + ); + return self.connect_dag(executed_block); + } let (block, block_info) = (executed_block.block(), executed_block.block_info()); - debug_assert!(block.header().parent_hash() == self.status.status.head().id()); //TODO try reuse accumulator and state db. let txn_accumulator_info = block_info.get_txn_accumulator_info(); let block_accumulator_info = block_info.get_block_accumulator_info(); @@ -1011,7 +1497,11 @@ impl ChainWriter for BlockChain { } fn apply(&mut self, block: Block) -> Result { - self.apply_with_verifier::(block) + if !self.is_dag(block.header())? { + self.apply_with_verifier::(block) + } else { + self.apply_with_verifier::(block) + } } fn chain_state(&mut self) -> &ChainStateDB { diff --git a/chain/src/verifier/mod.rs b/chain/src/verifier/mod.rs index 5128715302..873f238a5a 100644 --- a/chain/src/verifier/mod.rs +++ b/chain/src/verifier/mod.rs @@ -8,7 +8,7 @@ use starcoin_chain_api::{ }; use starcoin_consensus::{Consensus, ConsensusVerifyError}; use starcoin_logger::prelude::debug; -use starcoin_types::block::{Block, BlockHeader, ALLOWED_FUTURE_BLOCKTIME}; +use starcoin_types::block::{Block, BlockHeader, LegacyBlockBody, ALLOWED_FUTURE_BLOCKTIME}; use std::{collections::HashSet, str::FromStr}; #[derive(Debug)] @@ -40,10 +40,15 @@ impl FromStr for Verifier { } pub struct StaticVerifier; + impl StaticVerifier { pub fn verify_body_hash(block: &Block) -> Result<()> { - //verify body - let body_hash = block.body.hash(); + // verify body + let body_hash = if block.is_legacy() { + LegacyBlockBody::from(block.body.clone()).hash() + } else { + block.body.hash() + }; verify_block!( VerifyBlockField::Body, body_hash == block.header().body_hash(), @@ -90,7 +95,11 @@ pub trait BlockVerifier { where R: ChainReader, { + if current_chain.is_dag(header)? { + return Ok(()); + } let epoch = current_chain.epoch(); + let is_legacy = header.is_legacy(); let switch_epoch = header.number() == epoch.end_block_number(); // epoch first block's uncles should empty. @@ -136,6 +145,21 @@ pub trait BlockVerifier { "invalid block: block {} can not be uncle.", uncle_id ); + + let valid_parents_hash = if is_legacy { + uncle.parents_hash().is_none() + } else { + uncle.parents_hash().unwrap_or_default().is_empty() + }; + + verify_block!( + VerifyBlockField::Uncle, + valid_parents_hash, + "uncle {} is not valid for a single-chain block, parents_hash len {}", + uncle.id(), + uncle.parents_hash().unwrap_or_default().len() + ); + debug!( "verify_uncle header number {} hash {:?} uncle number {} hash {:?}", header.number(), @@ -249,6 +273,19 @@ impl BlockVerifier for BasicVerifier { .get_accumulator_root(), new_block_header.block_accumulator_root(), ); + + verify_block!( + VerifyBlockField::Header, + !current_chain.is_dag(new_block_header)? + && new_block_header + .parents_hash() + .unwrap_or_default() + .is_empty(), + "Single chain block is invalid: number {} fork_height {} parents_hash len {}", + new_block_header.number(), + current_chain.dag_fork_height()?, + new_block_header.parents_hash().unwrap_or_default().len() + ); Ok(()) } } @@ -316,3 +353,121 @@ impl BlockVerifier for NoneVerifier { Ok(()) } } + +//TODO: Implement it. +pub struct DagVerifier; +impl BlockVerifier for DagVerifier { + fn verify_header(current_chain: &R, new_block_header: &BlockHeader) -> Result<()> + where + R: ChainReader, + { + let parents_hash = new_block_header.parents_hash().unwrap_or_default(); + let mut parents_hash_to_check = parents_hash.clone(); + parents_hash_to_check.sort(); + parents_hash_to_check.dedup(); + + verify_block!( + VerifyBlockField::Header, + !parents_hash_to_check.is_empty() && parents_hash.len() == parents_hash_to_check.len(), + "Invalid parents_hash {:?} for a dag block {}, fork height {}", + new_block_header.parents_hash(), + new_block_header.number(), + current_chain.dag_fork_height()?, + ); + + verify_block!( + VerifyBlockField::Header, + parents_hash_to_check.contains(&new_block_header.parent_hash()) + && current_chain + .get_block_info(Some(new_block_header.parent_hash()))? + .is_some(), + "Invalid block: parent {} might not exist.", + new_block_header.parent_hash() + ); + + ConsensusVerifier::verify_header(current_chain, new_block_header) + } + + fn verify_uncles( + _current_chain: &R, + _uncles: &[BlockHeader], + _header: &BlockHeader, + ) -> Result<()> + where + R: ChainReader, + { + // let mut uncle_ids = HashSet::new(); + // for uncle in uncles { + // let uncle_id = uncle.id(); + // verify_block!( + // VerifyBlockField::Uncle, + // !uncle_ids.contains(&uncle.id()), + // "repeat uncle {:?} in current block {:?}", + // uncle_id, + // header.id() + // ); + + // if !header.is_dag() { + // verify_block!( + // VerifyBlockField::Uncle, + // uncle.number() < header.number() , + // "uncle block number bigger than or equal to current block ,uncle block number is {} , current block number is {}", uncle.number(), header.number() + // ); + // } + + // verify_block!( + // VerifyBlockField::Uncle, + // current_chain.get_block_info(Some(uncle_id))?.is_some(), + // "Invalid block: uncle {} does not exist", + // uncle_id + // ); + + // debug!( + // "verify_uncle header number {} hash {:?} uncle number {} hash {:?}", + // header.number(), + // header.id(), + // uncle.number(), + // uncle.id() + // ); + // uncle_ids.insert(uncle_id); + // } + + Ok(()) + } +} + +//TODO: Implement it. +pub struct DagBasicVerifier; +impl BlockVerifier for DagBasicVerifier { + fn verify_header(current_chain: &R, new_block_header: &BlockHeader) -> Result<()> + where + R: ChainReader, + { + let parents_hash = new_block_header.parents_hash().unwrap_or_default(); + let mut parents_hash_to_check = parents_hash.clone(); + parents_hash_to_check.sort(); + parents_hash_to_check.dedup(); + + verify_block!( + VerifyBlockField::Header, + !parents_hash_to_check.is_empty() && parents_hash.len() == parents_hash_to_check.len(), + "Invalid parents_hash {:?} for a dag block {}, fork height {}", + new_block_header.parents_hash(), + new_block_header.number(), + current_chain.dag_fork_height()?, + ); + + verify_block!( + VerifyBlockField::Header, + parents_hash_to_check.contains(&new_block_header.parent_hash()) + && current_chain + .get_block_info(Some(new_block_header.parent_hash()))? + .is_some(), + "Invalid block: parent {} might not exist.", + new_block_header.parent_hash() + ); + + Ok(()) + // ConsensusVerifier::verify_header(current_chain, new_block_header) + } +} diff --git a/chain/tests/block_test_utils.rs b/chain/tests/block_test_utils.rs index f6d7016c26..ecf1ed4ae2 100644 --- a/chain/tests/block_test_utils.rs +++ b/chain/tests/block_test_utils.rs @@ -6,6 +6,7 @@ use starcoin_accumulator::{Accumulator, MerkleAccumulator}; use starcoin_chain::ChainWriter; use starcoin_config::{ChainNetwork, NodeConfig}; use starcoin_crypto::HashValue; +use starcoin_dag::blockdag::BlockDAG; use starcoin_executor::block_execute; use starcoin_genesis::Genesis; use starcoin_logger::prelude::*; @@ -34,7 +35,8 @@ fn get_storage() -> impl Strategy { pub fn genesis_strategy(storage: Arc) -> impl Strategy { let net = &ChainNetwork::new_test(); let genesis = Genesis::load_or_build(net).unwrap(); - genesis.execute_genesis_block(net, storage).unwrap(); + let dag = BlockDAG::create_for_testing().unwrap(); + genesis.execute_genesis_block(net, storage, dag).unwrap(); Just(genesis.block().clone()) } @@ -79,6 +81,7 @@ fn gen_header( parent_header.chain_id(), 0, BlockHeaderExtra::new([0u8; 4]), + None, ) } diff --git a/chain/tests/test_block_chain.rs b/chain/tests/test_block_chain.rs index 7b1d41411b..9f7bc6598d 100644 --- a/chain/tests/test_block_chain.rs +++ b/chain/tests/test_block_chain.rs @@ -1,7 +1,7 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use anyhow::Result; +use anyhow::{Ok, Result}; use starcoin_account_api::AccountInfo; use starcoin_accumulator::Accumulator; use starcoin_chain::BlockChain; @@ -11,9 +11,10 @@ use starcoin_config::NodeConfig; use starcoin_config::{BuiltinNetworkID, ChainNetwork}; use starcoin_consensus::Consensus; use starcoin_crypto::{ed25519::Ed25519PrivateKey, Genesis, PrivateKey}; +use starcoin_logger::prelude::debug; use starcoin_transaction_builder::{build_transfer_from_association, DEFAULT_EXPIRATION_TIME}; use starcoin_types::account_address; -use starcoin_types::block::{Block, BlockHeader}; +use starcoin_types::block::{Block, BlockHeader, TEST_FLEXIDAG_FORK_HEIGHT_FOR_DAG}; use starcoin_types::filter::Filter; use starcoin_types::identifier::Identifier; use starcoin_types::language_storage::TypeTag; @@ -31,7 +32,7 @@ fn test_chain_filter_events() { let event_type_tag = TypeTag::Struct(Box::new(StructTag { address: genesis_address(), module: Identifier::from_str("Block").unwrap(), - name: Identifier::from_str("NewBlockEvent").unwrap(), + name: Identifier::from_str("NewBlockEventV2").unwrap(), type_params: vec![], })); @@ -140,10 +141,22 @@ fn test_block_chain() -> Result<()> { Ok(()) } +#[stest::test] +fn test_block_chain_dag() -> Result<()> { + let mut mock_chain = MockChain::new_with_fork(ChainNetwork::new_test(), TEST_FLEXIDAG_FORK_HEIGHT_FOR_DAG)?; + (0..10).into_iter().try_for_each(|index| { + let block = mock_chain.produce()?; + assert_eq!(block.header().number(), index + 1); + mock_chain.apply(block)?; + assert_eq!(mock_chain.head().current_header().number(), index + 1); + Ok(()) + }) +} + #[stest::test(timeout = 480)] fn test_halley_consensus() { let mut mock_chain = - MockChain::new(ChainNetwork::new_builtin(BuiltinNetworkID::Halley)).unwrap(); + MockChain::new_with_fork(ChainNetwork::new_builtin(BuiltinNetworkID::Halley), TEST_FLEXIDAG_FORK_HEIGHT_FOR_DAG).unwrap(); let times = 20; mock_chain.produce_and_apply_times(times).unwrap(); assert_eq!(mock_chain.head().current_header().number(), times); @@ -151,7 +164,7 @@ fn test_halley_consensus() { #[stest::test(timeout = 240)] fn test_dev_consensus() { - let mut mock_chain = MockChain::new(ChainNetwork::new_builtin(BuiltinNetworkID::Dev)).unwrap(); + let mut mock_chain = MockChain::new_with_fork(ChainNetwork::new_builtin(BuiltinNetworkID::Dev), TEST_FLEXIDAG_FORK_HEIGHT_FOR_DAG).unwrap(); let times = 20; mock_chain.produce_and_apply_times(times).unwrap(); assert_eq!(mock_chain.head().current_header().number(), times); @@ -170,6 +183,19 @@ fn test_find_ancestor_genesis() -> Result<()> { Ok(()) } +#[stest::test] +fn test_find_ancestor_genesis_dag() -> Result<()> { + let mut mock_chain = MockChain::new_with_fork(ChainNetwork::new_test(), TEST_FLEXIDAG_FORK_HEIGHT_FOR_DAG)?; + mock_chain.produce_and_apply_times(10)?; + + let mut mock_chain2 = MockChain::new(ChainNetwork::new_test())?; + mock_chain2.produce_and_apply_times(20)?; + let ancestor = mock_chain.head().find_ancestor(mock_chain2.head())?; + assert!(ancestor.is_some()); + assert_eq!(ancestor.unwrap().number, 0); + Ok(()) +} + #[stest::test] fn test_find_ancestor_fork() -> Result<()> { let mut mock_chain = MockChain::new(ChainNetwork::new_test())?; @@ -177,8 +203,7 @@ fn test_find_ancestor_fork() -> Result<()> { let header = mock_chain.head().current_header(); let mut mock_chain2 = mock_chain.fork(None)?; mock_chain.produce_and_apply_times(2)?; - mock_chain2.produce_and_apply_times(3)?; - + mock_chain2.produce_and_apply_times(6)?; let ancestor = mock_chain.head().find_ancestor(mock_chain2.head())?; assert!(ancestor.is_some()); assert_eq!(ancestor.unwrap().id, header.id()); @@ -206,8 +231,9 @@ fn gen_uncle() -> (MockChain, BlockChain, BlockHeader) { fn product_a_block(branch: &BlockChain, miner: &AccountInfo, uncles: Vec) -> Block { let (block_template, _) = branch - .create_block_template(*miner.address(), None, Vec::new(), uncles, None) + .create_block_template(*miner.address(), None, Vec::new(), uncles, None, None) .unwrap(); + branch .consensus() .create_block(block_template, branch.time_service().as_ref()) @@ -368,6 +394,7 @@ fn test_block_chain_txn_info_fork_mapping() -> Result<()> { vec![], vec![], None, + None, )?; let block_b1 = block_chain @@ -398,6 +425,7 @@ fn test_block_chain_txn_info_fork_mapping() -> Result<()> { vec![signed_txn_t2.clone()], vec![], None, + None, )?; assert!(excluded.discarded_txns.is_empty(), "txn is discarded."); let block_b2 = block_chain @@ -411,6 +439,7 @@ fn test_block_chain_txn_info_fork_mapping() -> Result<()> { vec![signed_txn_t2], vec![], None, + None, )?; assert!(excluded.discarded_txns.is_empty(), "txn is discarded."); let block_b3 = block_chain2 @@ -514,3 +543,24 @@ fn test_get_blocks_by_number() -> Result<()> { Ok(()) } + +#[stest::test] +fn test_block_chain_for_dag_fork() -> Result<()> { + let mut mock_chain = MockChain::new(ChainNetwork::new_test())?; + + // generate the fork chain + mock_chain.produce_and_apply_times(3).unwrap(); + let fork_id = mock_chain.head().current_header().id(); + + // create the dag chain + mock_chain.produce_and_apply_times(10).unwrap(); + + // create the dag chain at the fork chain + let mut fork_block_chain = mock_chain.fork_new_branch(Some(fork_id)).unwrap(); + for _ in 0..15 { + let block = product_a_block(&fork_block_chain, mock_chain.miner(), Vec::new()); + fork_block_chain.apply(block)?; + } + + Ok(()) +} diff --git a/chain/tests/test_epoch_switch.rs b/chain/tests/test_epoch_switch.rs index 48143c3e9f..fb07291aff 100644 --- a/chain/tests/test_epoch_switch.rs +++ b/chain/tests/test_epoch_switch.rs @@ -33,7 +33,7 @@ pub fn create_new_block( txns: Vec, ) -> Result { let (template, _) = - chain.create_block_template(*account.address(), None, txns, vec![], None)?; + chain.create_block_template(*account.address(), None, txns, vec![], None, None)?; chain .consensus() .create_block(template, chain.time_service().as_ref()) @@ -198,6 +198,7 @@ pub fn modify_on_chain_config_by_dao_block( )?, vec![], None, + None, )?; let block1 = chain .consensus() diff --git a/chain/tests/test_opened_block.rs b/chain/tests/test_opened_block.rs index 33c922ba6b..121037ef5f 100644 --- a/chain/tests/test_opened_block.rs +++ b/chain/tests/test_opened_block.rs @@ -31,6 +31,8 @@ pub fn test_open_block() -> Result<()> { U256::from(0), chain.consensus(), None, + None, + None, )? }; diff --git a/chain/tests/test_txn_info_and_proof.rs b/chain/tests/test_txn_info_and_proof.rs index d817366953..c9f4081bfd 100644 --- a/chain/tests/test_txn_info_and_proof.rs +++ b/chain/tests/test_txn_info_and_proof.rs @@ -8,14 +8,121 @@ use starcoin_consensus::Consensus; use starcoin_crypto::HashValue; use starcoin_logger::prelude::debug; use starcoin_transaction_builder::{peer_to_peer_txn_sent_as_association, DEFAULT_EXPIRATION_TIME}; +use starcoin_types::account_config; +use starcoin_types::block::{BlockNumber, TEST_FLEXIDAG_FORK_HEIGHT_FOR_DAG, TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH}; use starcoin_vm_types::access_path::AccessPath; use starcoin_vm_types::account_address::AccountAddress; use starcoin_vm_types::account_config::AccountResource; use starcoin_vm_types::move_resource::MoveResource; +use starcoin_vm_types::state_view::StateReaderExt; use starcoin_vm_types::transaction::{SignedUserTransaction, Transaction}; use std::collections::HashMap; use std::sync::Arc; +pub fn gen_txns(seq_num: &mut u64) -> Result> { + let mut rng = rand::thread_rng(); + let txn_count: u64 = rng.gen_range(1..10); + let config = Arc::new(NodeConfig::random_for_test()); + debug!("input seq:{}", *seq_num); + let txns: Vec = (0..txn_count) + .map(|_txn_idx| { + let account_address = AccountAddress::random(); + + let txn = peer_to_peer_txn_sent_as_association( + account_address, + *seq_num, + 10000, + config.net().time_service().now_secs() + DEFAULT_EXPIRATION_TIME, + config.net(), + ); + *seq_num += 1; + txn + }) + .collect(); + Ok(txns) +} + +fn transaction_info_and_proof_1(fork_number: BlockNumber) -> Result<()> { + let config = Arc::new(NodeConfig::random_for_test()); + let mut block_chain = test_helper::gen_blockchain_for_dag_test(config.net(), fork_number)?; + let _current_header = block_chain.current_header(); + let miner_account = AccountInfo::random(); + let mut seq_num = 0; + (0..10).for_each(|_| { + let txns = gen_txns(&mut seq_num).unwrap(); + let (template, _) = block_chain + .create_block_template(*miner_account.address(), None, txns, vec![], None, None) + .unwrap(); + let block = block_chain + .consensus() + .create_block(template, config.net().time_service().as_ref()) + .unwrap(); + debug!("apply block:{:?}", &block); + if block.header().number() > fork_number { + assert!(block.header().parents_hash().map_or(false, |parents| parents.len() > 0)); + } + block_chain.apply(block).unwrap(); + }); + // fork from 6 block + let fork_point = block_chain.get_block_by_number(6).unwrap().unwrap(); + let fork_chain = block_chain.fork(fork_point.id()).unwrap(); + let account_reader = fork_chain.chain_state_reader(); + seq_num = account_reader.get_sequence_number(account_config::association_address())?; + let _txns = gen_txns(&mut seq_num).unwrap(); + let (template, _) = fork_chain + .create_block_template( + *miner_account.address(), + Some(fork_point.header.id()), + vec![], + vec![], + None, + Some(vec![fork_point.id()]), + ) + .unwrap(); + let block = fork_chain + .consensus() + .create_block(template, config.net().time_service().as_ref()) + .unwrap(); + debug!("Apply block:{:?}", &block); + if block.header().number() > fork_number { + assert!(block_chain.apply(block).is_ok()); // a dag block will be executed even though it is not in the main + } else { + assert!(block_chain.apply(block).is_err()); // block is 7, but block chain head is 10, it is expected to be failed + } + assert_eq!( + block_chain.current_header().id(), + block_chain.get_block_by_number(10).unwrap().unwrap().id() + ); + // create latest block + let account_reader = block_chain.chain_state_reader(); + seq_num = account_reader.get_sequence_number(account_config::association_address())?; + let _txns = gen_txns(&mut seq_num).unwrap(); + let (template, _) = block_chain + .create_block_template(*miner_account.address(), None, vec![], vec![], None, None) + .unwrap(); + let block = block_chain + .consensus() + .create_block(template, config.net().time_service().as_ref()) + .unwrap(); + debug!("Apply latest block:{:?}", &block); + block_chain.apply(block).unwrap(); + assert_eq!( + block_chain.current_header().id(), + block_chain.get_block_by_number(11).unwrap().unwrap().id() + ); + Ok(()) +} + +#[stest::test(timeout = 480)] +fn test_transaction_info_and_proof_1() -> Result<()> { + transaction_info_and_proof_1(TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH) +} + +#[stest::test(timeout = 480)] +fn test_dag_transaction_info_and_proof_1() -> Result<()> { + transaction_info_and_proof_1(TEST_FLEXIDAG_FORK_HEIGHT_FOR_DAG) +} + #[stest::test(timeout = 480)] fn test_transaction_info_and_proof() -> Result<()> { let config = Arc::new(NodeConfig::random_for_test()); @@ -63,6 +170,7 @@ fn test_transaction_info_and_proof() -> Result<()> { txns.clone(), vec![], None, + None, ) .unwrap(); @@ -70,6 +178,7 @@ fn test_transaction_info_and_proof() -> Result<()> { .consensus() .create_block(template, config.net().time_service().as_ref()) .unwrap(); + debug!("apply block:{:?}", &block); block_chain.apply(block.clone()).unwrap(); all_txns.push(Transaction::BlockMetadata( block.to_metadata(current_header.gas_used()), @@ -149,6 +258,5 @@ fn test_transaction_info_and_proof() -> Result<()> { ); } } - Ok(()) } diff --git a/cmd/db-exporter/Cargo.toml b/cmd/db-exporter/Cargo.toml index ee4b2dfb82..873416333a 100644 --- a/cmd/db-exporter/Cargo.toml +++ b/cmd/db-exporter/Cargo.toml @@ -35,7 +35,7 @@ starcoin-vm-runtime = { workspace = true } futures = { workspace = true } rayon = { workspace = true } num_cpus = { workspace = true } - +starcoin-dag ={ workspace = true } [package] authors = { workspace = true } edition = { workspace = true } diff --git a/cmd/db-exporter/src/main.rs b/cmd/db-exporter/src/main.rs index 98b9d02f2c..666afe87f9 100644 --- a/cmd/db-exporter/src/main.rs +++ b/cmd/db-exporter/src/main.rs @@ -20,6 +20,7 @@ use starcoin_chain::{ use starcoin_config::{BuiltinNetworkID, ChainNetwork, RocksdbConfig}; use starcoin_consensus::Consensus; use starcoin_crypto::HashValue; +use starcoin_dag::consensusdb::prelude::FlexiDagStorageConfig; use starcoin_genesis::Genesis; use starcoin_resource_viewer::{AnnotatedMoveStruct, AnnotatedMoveValue, MoveValueAnnotator}; use starcoin_statedb::{ChainStateDB, ChainStateReader, ChainStateWriter}; @@ -628,14 +629,26 @@ pub fn export_block_range( Default::default(), None, )?; + let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + from_dir.join("dag/db/starcoindb"), + FlexiDagStorageConfig::new(), + )?; + let dag = starcoin_dag::blockdag::BlockDAG::new(8, dag_storage); + let storage = Arc::new(Storage::new(StorageInstance::new_cache_and_db_instance( CacheStorage::new(None), db_storage, ))?); let (chain_info, _) = - Genesis::init_and_check_storage(&net, storage.clone(), from_dir.as_ref())?; - let chain = BlockChain::new(net.time_service(), chain_info.head().id(), storage, None) - .expect("create block chain should success."); + Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), from_dir.as_ref())?; + let chain = BlockChain::new( + net.time_service(), + chain_info.head().id(), + storage, + None, + dag, + ) + .expect("create block chain should success."); let cur_num = chain.status().head().number(); let end = if cur_num > end + BLOCK_GAP { end @@ -710,13 +723,20 @@ pub fn apply_block( CacheStorage::new(None), db_storage, ))?); - // StarcoinVM::set_concurrency_level_once(num_cpus::get()); - let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), to_dir.as_ref())?; + let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + to_dir.join("dag/db/starcoindb"), + FlexiDagStorageConfig::new(), + )?; + let dag = starcoin_dag::blockdag::BlockDAG::new(8, dag_storage); + StarcoinVM::set_concurrency_level_once(num_cpus::get()); + let (chain_info, _) = + Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; let mut chain = BlockChain::new( net.time_service(), chain_info.head().id(), storage.clone(), None, + dag, ) .expect("create block chain should success."); let start_time = SystemTime::now(); @@ -789,12 +809,19 @@ pub fn startup_info_back( CacheStorage::new(None), db_storage, ))?); - let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), to_dir.as_ref())?; + let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + to_dir.join("dag/db/starcoindb"), + FlexiDagStorageConfig::new(), + )?; + let dag = starcoin_dag::blockdag::BlockDAG::new(8, dag_storage); + let (chain_info, _) = + Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; let chain = BlockChain::new( net.time_service(), chain_info.head().id(), storage.clone(), None, + dag, ) .expect("create block chain should success."); @@ -834,12 +861,19 @@ pub fn gen_block_transactions( CacheStorage::new(None), db_storage, ))?); - let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), to_dir.as_ref())?; + let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + to_dir.join("dag/db/starcoindb"), + FlexiDagStorageConfig::new(), + )?; + let dag = starcoin_dag::blockdag::BlockDAG::new(8, dag_storage); + let (chain_info, _) = + Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; let mut chain = BlockChain::new( net.time_service(), chain_info.head().id(), storage.clone(), None, + dag, ) .expect("create block chain should success."); let block_num = block_num.unwrap_or(1000); @@ -856,6 +890,7 @@ pub fn gen_block_transactions( } } } + /// Returns a transaction to create a new account with the given arguments. pub fn create_account_txn_sent_as_association( new_account: &Account, @@ -933,7 +968,7 @@ pub fn execute_transaction_with_create_account( } let (block_template, _) = - chain.create_block_template(*miner_info.address(), None, txns, vec![], None)?; + chain.create_block_template(*miner_info.address(), None, txns, vec![], None, None)?; let block = ConsensusStrategy::Dummy.create_block(block_template, net.time_service().as_ref())?; if block.transactions().len() as u64 <= trans_num { @@ -959,7 +994,7 @@ pub fn execute_transaction_with_miner_create_account( let miner_info = AccountInfo::from(&miner_account); let mut send_sequence = 0u64; let (block_template, _) = - chain.create_block_template(*miner_info.address(), None, vec![], vec![], None)?; + chain.create_block_template(*miner_info.address(), None, vec![], vec![], None, None)?; let block = ConsensusStrategy::Dummy.create_block(block_template, net.time_service().as_ref())?; let block_hash = block.header.id(); @@ -984,7 +1019,7 @@ pub fn execute_transaction_with_miner_create_account( } let (block_template, _) = - chain.create_block_template(*miner_info.address(), None, txns, vec![], None)?; + chain.create_block_template(*miner_info.address(), None, txns, vec![], None, None)?; let block = ConsensusStrategy::Dummy.create_block(block_template, net.time_service().as_ref())?; if block.transactions().len() as u64 <= trans_num { @@ -1011,7 +1046,7 @@ pub fn execute_empty_transaction_with_miner( let miner_info = AccountInfo::from(&miner_account); let mut send_sequence = 0u64; let (block_template, _) = - chain.create_block_template(*miner_info.address(), None, vec![], vec![], None)?; + chain.create_block_template(*miner_info.address(), None, vec![], vec![], None, None)?; let block = ConsensusStrategy::Dummy.create_block(block_template, net.time_service().as_ref())?; let block_hash = block.header.id(); @@ -1034,7 +1069,7 @@ pub fn execute_empty_transaction_with_miner( } let (block_template, _) = - chain.create_block_template(*miner_info.address(), None, txns, vec![], None)?; + chain.create_block_template(*miner_info.address(), None, txns, vec![], None, None)?; let block = ConsensusStrategy::Dummy.create_block(block_template, net.time_service().as_ref())?; if block.transactions().len() as u64 <= trans_num { @@ -1062,7 +1097,7 @@ pub fn execute_transaction_with_fixed_account( let mut send_sequence = 0u64; let receiver = Account::new(); let (block_template, _) = - chain.create_block_template(*miner_info.address(), None, vec![], vec![], None)?; + chain.create_block_template(*miner_info.address(), None, vec![], vec![], None, None)?; let block = ConsensusStrategy::Dummy.create_block(block_template, net.time_service().as_ref())?; let block_hash = block.header.id(); @@ -1086,7 +1121,7 @@ pub fn execute_transaction_with_fixed_account( } let (block_template, _) = - chain.create_block_template(*miner_info.address(), None, txns, vec![], None)?; + chain.create_block_template(*miner_info.address(), None, txns, vec![], None, None)?; let block = ConsensusStrategy::Dummy.create_block(block_template, net.time_service().as_ref())?; if block.transactions().len() as u64 <= trans_num { @@ -1145,7 +1180,7 @@ pub fn execute_turbo_stm_transaction_with_fixed_account( } let (block_template, _) = - chain.create_block_template(*miner_info.address(), None, txns, vec![], None)?; + chain.create_block_template(*miner_info.address(), None, txns, vec![], None, None)?; let block = ConsensusStrategy::Dummy.create_block(block_template, net.time_service().as_ref())?; println!("create account trans {}", block.transactions().len()); @@ -1172,7 +1207,7 @@ pub fn execute_turbo_stm_transaction_with_fixed_account( } sequence += 1; let (block_template, _) = - chain.create_block_template(*miner_info.address(), None, txns, vec![], None)?; + chain.create_block_template(*miner_info.address(), None, txns, vec![], None, None)?; let block = ConsensusStrategy::Dummy.create_block(block_template, net.time_service().as_ref())?; println!("p2p trans {}", block.transactions().len()); @@ -1289,13 +1324,19 @@ pub fn export_snapshot( CacheStorage::new(None), db_storage, ))?); + let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + from_dir.join("dag/db/starcoindb"), + FlexiDagStorageConfig::new(), + )?; + let dag = starcoin_dag::blockdag::BlockDAG::new(8, dag_storage); let (chain_info, _) = - Genesis::init_and_check_storage(&net, storage.clone(), from_dir.as_ref())?; + Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), from_dir.as_ref())?; let chain = BlockChain::new( net.time_service(), chain_info.head().id(), storage.clone(), None, + dag.clone(), ) .expect("create block chain should success."); let block_num = chain.status().head().number(); @@ -1313,8 +1354,14 @@ pub fn export_snapshot( let cur_block = chain .get_block_by_number(cur_num)? .ok_or_else(|| format_err!("get block by number {} error", cur_num))?; - let chain = BlockChain::new(net.time_service(), cur_block.id(), storage.clone(), None) - .expect("create block chain should success."); + let chain = BlockChain::new( + net.time_service(), + cur_block.id(), + storage.clone(), + None, + dag, + ) + .expect("create block chain should success."); let cur_num = chain.epoch().start_block_number(); @@ -1629,14 +1676,21 @@ pub fn apply_snapshot( CacheStorage::new(None), db_storage, ))?); + let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + to_dir.join("dag/db/starcoindb"), + FlexiDagStorageConfig::new(), + )?; + let dag = starcoin_dag::blockdag::BlockDAG::new(8, dag_storage); - let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), to_dir.as_ref())?; + let (chain_info, _) = + Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; let chain = Arc::new(std::sync::Mutex::new( BlockChain::new( net.time_service(), chain_info.head().id(), storage.clone(), None, + dag, ) .expect("create block chain should success."), )); @@ -1964,12 +2018,19 @@ pub fn gen_turbo_stm_transactions(to_dir: PathBuf, block_num: Option) -> an CacheStorage::new(None), db_storage, ))?); - let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), to_dir.as_ref())?; + let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + to_dir.join("dag/db/starcoindb"), + FlexiDagStorageConfig::new(), + )?; + let dag = starcoin_dag::blockdag::BlockDAG::new(8, dag_storage); + let (chain_info, _) = + Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; let mut chain = BlockChain::new( net.time_service(), chain_info.head().id(), storage.clone(), None, + dag, ) .expect("create block chain should success."); let block_num = block_num.unwrap_or(1000); @@ -1989,13 +2050,19 @@ pub fn apply_turbo_stm_block( CacheStorage::new(None), db_storage_seq, ))?); + let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + to_dir.join("dag/db/starcoindb"), + FlexiDagStorageConfig::new(), + )?; + let dag = starcoin_dag::blockdag::BlockDAG::new(8, dag_storage); let (chain_info_seq, _) = - Genesis::init_and_check_storage(&net, storage_seq.clone(), to_dir.as_ref())?; + Genesis::init_and_check_storage(&net, storage_seq.clone(), dag.clone(), to_dir.as_ref())?; let mut chain_seq = BlockChain::new( net.time_service(), chain_info_seq.head().id(), storage_seq.clone(), None, + dag, ) .expect("create block chain should success."); let cur_num = chain_seq.status().head().number(); @@ -2047,13 +2114,23 @@ pub fn apply_turbo_stm_block( CacheStorage::new(None), db_storage_stm, ))?); - let (chain_info_stm, _) = - Genesis::init_and_check_storage(&net, storage_stm.clone(), turbo_stm_to_dir.as_ref())?; + let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + to_dir.join("dag/db/starcoindb"), + FlexiDagStorageConfig::new(), + )?; + let dag = starcoin_dag::blockdag::BlockDAG::new(8, dag_storage); + let (chain_info_stm, _) = Genesis::init_and_check_storage( + &net, + storage_stm.clone(), + dag.clone(), + turbo_stm_to_dir.as_ref(), + )?; let mut chain_stm = BlockChain::new( net.time_service(), chain_info_stm.head().id(), storage_stm.clone(), None, + dag, ) .expect("create block chain should success."); diff --git a/cmd/generator/Cargo.toml b/cmd/generator/Cargo.toml index ef79c878a2..fc305543c7 100644 --- a/cmd/generator/Cargo.toml +++ b/cmd/generator/Cargo.toml @@ -21,7 +21,7 @@ starcoin-genesis = { workspace = true } starcoin-logger = { workspace = true } starcoin-storage = { workspace = true } starcoin-types = { workspace = true } - +starcoin-dag = {workspace = true} [features] default = [] diff --git a/cmd/generator/src/gen_data.rs b/cmd/generator/src/gen_data.rs index 98c2be7873..78e59e4f11 100644 --- a/cmd/generator/src/gen_data.rs +++ b/cmd/generator/src/gen_data.rs @@ -46,7 +46,7 @@ impl CommandAction for GenDataCommand { ) -> Result { let opt = ctx.opt(); let global_opt = ctx.global_opt(); - let (config, storage, chain_info, account) = init_or_load_data_dir(global_opt, None)?; + let (config, storage, chain_info, account, dag) = init_or_load_data_dir(global_opt, None)?; if chain_info.head().id() != chain_info.genesis_hash() { warn!("start block is not genesis.") } @@ -56,6 +56,7 @@ impl CommandAction for GenDataCommand { storage.clone(), chain_info.head().id(), account, + dag, )?; let mut latest_header = mock_chain.head().current_header(); for i in 0..opt.count { diff --git a/cmd/generator/src/gen_genesis.rs b/cmd/generator/src/gen_genesis.rs index da971417a6..439d9dc931 100644 --- a/cmd/generator/src/gen_genesis.rs +++ b/cmd/generator/src/gen_genesis.rs @@ -48,7 +48,7 @@ impl CommandAction for GenGenesisCommand { if global_opt.base_data_dir.is_none() { warn!("data_dir option is none, use default data_dir.") } - let (config, .., chain_info, account) = + let (config, .., chain_info, account, _) = init_or_load_data_dir(global_opt, opt.password.clone())?; Ok(GenGenesisResult { net: config.net().id().clone(), diff --git a/cmd/generator/src/lib.rs b/cmd/generator/src/lib.rs index f884a986a1..4b3dc3d1bd 100644 --- a/cmd/generator/src/lib.rs +++ b/cmd/generator/src/lib.rs @@ -6,6 +6,7 @@ use starcoin_account::account_storage::AccountStorage; use starcoin_account::AccountManager; use starcoin_account_api::AccountInfo; use starcoin_config::{NodeConfig, StarcoinOpt}; +use starcoin_dag::blockdag::BlockDAG; use starcoin_genesis::Genesis; use starcoin_storage::cache_storage::CacheStorage; use starcoin_storage::db_storage::DBStorage; @@ -22,7 +23,7 @@ pub mod gen_genesis_config; pub fn init_or_load_data_dir( global_opt: &StarcoinOpt, password: Option, -) -> Result<(NodeConfig, Arc, ChainInfo, AccountInfo)> { +) -> Result<(NodeConfig, Arc, ChainInfo, AccountInfo, BlockDAG)> { let config = NodeConfig::load_with_opt(global_opt)?; if config.base().base_data_dir().is_temp() { bail!("Please set data_dir option.") @@ -31,8 +32,17 @@ pub fn init_or_load_data_dir( CacheStorage::new_with_capacity(config.storage.cache_size(), None), DBStorage::new(config.storage.dir(), config.storage.rocksdb_config(), None)?, ))?); - let (chain_info, _genesis) = - Genesis::init_and_check_storage(config.net(), storage.clone(), config.data_dir())?; + let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + config.storage.dag_dir(), + config.storage.clone().into(), + )?; + let dag = starcoin_dag::blockdag::BlockDAG::new(8, dag_storage); + let (chain_info, _genesis) = Genesis::init_and_check_storage( + config.net(), + storage.clone(), + dag.clone(), + config.data_dir(), + )?; let vault_config = &config.vault; let account_storage = AccountStorage::create_from_path(vault_config.dir(), config.storage.rocksdb_config())?; @@ -43,5 +53,5 @@ pub fn init_or_load_data_dir( .create_account(&password.unwrap_or_default())? .info(), }; - Ok((config, storage, chain_info, account)) + Ok((config, storage, chain_info, account, dag)) } diff --git a/cmd/peer-watcher/Cargo.toml b/cmd/peer-watcher/Cargo.toml index b47252658c..b2cd3b7e03 100644 --- a/cmd/peer-watcher/Cargo.toml +++ b/cmd/peer-watcher/Cargo.toml @@ -18,7 +18,7 @@ starcoin-network = { workspace = true } starcoin-storage = { workspace = true } starcoin-types = { workspace = true } bcs-ext = { package = "bcs-ext", workspace = true } - +starcoin-dag = {workspace = true} [package] authors = { workspace = true } edition = { workspace = true } diff --git a/cmd/peer-watcher/src/lib.rs b/cmd/peer-watcher/src/lib.rs index 0defa9ba3e..4c940d1d48 100644 --- a/cmd/peer-watcher/src/lib.rs +++ b/cmd/peer-watcher/src/lib.rs @@ -5,19 +5,20 @@ use anyhow::Result; use network_p2p::NetworkWorker; use network_types::peer_info::PeerInfo; use starcoin_config::{ChainNetwork, NetworkConfig}; +use starcoin_dag::blockdag::BlockDAG; use starcoin_network::network_p2p_handle::Networkp2pHandle; use starcoin_network::{build_network_worker, NotificationMessage}; use starcoin_storage::storage::StorageInstance; use starcoin_storage::Storage; use std::sync::Arc; - pub fn build_lighting_network( net: &ChainNetwork, network_config: &NetworkConfig, ) -> Result<(PeerInfo, NetworkWorker)> { let genesis = starcoin_genesis::Genesis::load_or_build(net)?; let storage = Arc::new(Storage::new(StorageInstance::new_cache_instance())?); - let chain_info = genesis.execute_genesis_block(net, storage)?; + let chain_info = + genesis.execute_genesis_block(net, storage, BlockDAG::create_for_testing()?)?; build_network_worker( network_config, chain_info, diff --git a/cmd/replay/Cargo.toml b/cmd/replay/Cargo.toml index fcdd912220..e8301f9075 100644 --- a/cmd/replay/Cargo.toml +++ b/cmd/replay/Cargo.toml @@ -13,7 +13,7 @@ starcoin-logger = { workspace = true } starcoin-storage = { workspace = true } starcoin-types = { workspace = true } starcoin-vm-types = { workspace = true } - +starcoin-dag = {workspace = true} [package] authors = { workspace = true } edition = { workspace = true } diff --git a/cmd/replay/src/main.rs b/cmd/replay/src/main.rs index d391c78fa3..896d0c2f98 100644 --- a/cmd/replay/src/main.rs +++ b/cmd/replay/src/main.rs @@ -78,10 +78,19 @@ fn main() -> anyhow::Result<()> { )) .unwrap(), ); - let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), from_dir.as_ref()) - .expect("init storage by genesis fail."); - let chain = BlockChain::new(net.time_service(), chain_info.head().id(), storage, None) - .expect("create block chain should success."); + //TODO:FIXME + let dag = starcoin_dag::blockdag::BlockDAG::create_for_testing().unwrap(); + let (chain_info, _) = + Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), from_dir.as_ref()) + .expect("init storage by genesis fail."); + let chain = BlockChain::new( + net.time_service(), + chain_info.head().id(), + storage, + None, + dag.clone(), + ) + .expect("create block chain should success."); let storage2 = Arc::new( Storage::new(StorageInstance::new_cache_and_db_instance( @@ -90,14 +99,16 @@ fn main() -> anyhow::Result<()> { )) .unwrap(), ); - let (chain_info2, _) = Genesis::init_and_check_storage(&net, storage2.clone(), to_dir.as_ref()) - .expect("init storage by genesis fail."); + let (chain_info2, _) = + Genesis::init_and_check_storage(&net, storage2.clone(), dag.clone(), to_dir.as_ref()) + .expect("init storage by genesis fail."); let mut chain2 = BlockChain::new( net.time_service(), chain_info2.status().head().id(), storage2.clone(), None, + dag, ) .expect("create block chain should success."); diff --git a/cmd/starcoin/Cargo.toml b/cmd/starcoin/Cargo.toml index 58e7ea8899..1945c9cfc9 100644 --- a/cmd/starcoin/Cargo.toml +++ b/cmd/starcoin/Cargo.toml @@ -53,6 +53,8 @@ stdlib = { workspace = true } tokio = { features = ["full"], workspace = true } vm-status-translator = { workspace = true } num_cpus = { workspace = true } +starcoin-flexidag = { workspace = true } +starcoin-dag = { workspace = true } [dev-dependencies] stest = { workspace = true } diff --git a/cmd/starcoin/src/chain/get_dag_state_cmd.rs b/cmd/starcoin/src/chain/get_dag_state_cmd.rs new file mode 100644 index 0000000000..90d5fb00dc --- /dev/null +++ b/cmd/starcoin/src/chain/get_dag_state_cmd.rs @@ -0,0 +1,30 @@ +// Copyright (c) The Starcoin Core Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::cli_state::CliState; +use crate::StarcoinOpt; +use anyhow::Result; +use clap::Parser; +use scmd::{CommandAction, ExecContext}; +use starcoin_dag::consensusdb::consenses_state::DagStateView; + +/// Get block info by number +#[derive(Debug, Parser)] +#[clap(name = "get-dag-state", alias = "get_dag_state")] +pub struct GetDagStateOpt {} + +pub struct GetDagStateCommand; + +impl CommandAction for GetDagStateCommand { + type State = CliState; + type GlobalOpt = StarcoinOpt; + type Opt = GetDagStateOpt; + type ReturnItem = DagStateView; + + fn run( + &self, + ctx: &ExecContext, + ) -> Result { + ctx.state().client().get_dag_state() + } +} diff --git a/cmd/starcoin/src/chain/mod.rs b/cmd/starcoin/src/chain/mod.rs index 0049c527ee..7affff217a 100644 --- a/cmd/starcoin/src/chain/mod.rs +++ b/cmd/starcoin/src/chain/mod.rs @@ -4,6 +4,7 @@ mod epoch_info; mod get_block_cmd; mod get_block_info_cmd; +mod get_dag_state_cmd; mod get_events_cmd; mod get_txn_cmd; mod get_txn_info_cmd; @@ -23,3 +24,4 @@ pub use get_txn_info_list_cmd::*; pub use get_txn_infos_cmd::*; pub use info_cmd::*; pub use list_block_cmd::*; +pub use get_dag_state_cmd::*; diff --git a/cmd/starcoin/src/lib.rs b/cmd/starcoin/src/lib.rs index 7b6a9c367f..bc2114cc75 100644 --- a/cmd/starcoin/src/lib.rs +++ b/cmd/starcoin/src/lib.rs @@ -102,7 +102,8 @@ pub fn add_command( .subcommand(chain::EpochInfoCommand) .subcommand(chain::GetTransactionInfoListCommand) .subcommand(chain::get_txn_proof_cmd::GetTransactionProofCommand) - .subcommand(chain::GetBlockInfoCommand), + .subcommand(chain::GetBlockInfoCommand) + .subcommand(chain::GetDagStateCommand), ) .command( CustomCommand::with_name("txpool") diff --git a/commons/stream-task/src/collector.rs b/commons/stream-task/src/collector.rs index 3e597fce95..cd0e317bbd 100644 --- a/commons/stream-task/src/collector.rs +++ b/commons/stream-task/src/collector.rs @@ -15,7 +15,7 @@ use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; use thiserror::Error; -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, PartialEq)] pub enum CollectorState { /// Collector is enough, do not feed more item, finish task. Enough, diff --git a/config/src/available_port.rs b/config/src/available_port.rs index 588b28ad81..f03bf1af60 100644 --- a/config/src/available_port.rs +++ b/config/src/available_port.rs @@ -57,7 +57,7 @@ fn get_ephemeral_port() -> ::std::io::Result { use std::net::{TcpListener, TcpStream}; // Request a random available port from the OS - let listener = TcpListener::bind(("localhost", 0))?; + let listener = TcpListener::bind(("127.0.0.1", 0))?; let addr = listener.local_addr()?; // Create and accept a connection (which we'll promptly drop) in order to force the port diff --git a/config/src/genesis_config.rs b/config/src/genesis_config.rs index f553cb5013..b5dfbec727 100644 --- a/config/src/genesis_config.rs +++ b/config/src/genesis_config.rs @@ -693,7 +693,7 @@ static G_DEFAULT_BASE_BLOCK_DIFF_WINDOW: u64 = 24; static G_BASE_REWARD_PER_UNCLE_PERCENT: u64 = 10; static G_MIN_BLOCK_TIME_TARGET: u64 = 5000; static G_MAX_BLOCK_TIME_TARGET: u64 = 60000; -static G_BASE_MAX_UNCLES_PER_BLOCK: u64 = 2; +pub static G_BASE_MAX_UNCLES_PER_BLOCK: u64 = 2; pub static G_TOTAL_STC_AMOUNT: Lazy> = Lazy::new(|| STCUnit::STC.value_of(3185136000)); diff --git a/config/src/lib.rs b/config/src/lib.rs index f15728e93e..84cf9b1ad7 100644 --- a/config/src/lib.rs +++ b/config/src/lib.rs @@ -472,6 +472,15 @@ impl NodeConfig { Self::load_with_opt(&opt).expect("Auto generate test config should success.") } + pub fn proxima_for_test(dir: PathBuf) -> Self { + let opt = StarcoinOpt { + net: Some(BuiltinNetworkID::Proxima.into()), + base_data_dir: Some(dir), + ..StarcoinOpt::default() + }; + Self::load_with_opt(&opt).expect("Auto generate proxima config should success.") + } + pub fn customize_for_test() -> Self { let opt = StarcoinOpt { net: Some(BuiltinNetworkID::Test.into()), diff --git a/config/src/storage_config.rs b/config/src/storage_config.rs index 38634026e0..e53fff65f1 100644 --- a/config/src/storage_config.rs +++ b/config/src/storage_config.rs @@ -34,6 +34,13 @@ pub struct RocksdbConfig { pub wal_bytes_per_sync: u64, #[clap(name = "rocksdb-bytes-per-sync", long, help = "rocksdb bytes per sync")] pub bytes_per_sync: u64, + + #[clap( + name = "rocksdb-parallelism", + long, + help = "rocksdb background threads, one for default" + )] + pub parallelism: u64, } impl RocksdbConfig { @@ -61,11 +68,14 @@ impl Default for RocksdbConfig { bytes_per_sync: 1u64 << 20, // For wal sync every size to be 1MB wal_bytes_per_sync: 1u64 << 20, + // For background threads + parallelism: 1u64, } } } static G_DEFAULT_DB_DIR: Lazy = Lazy::new(|| PathBuf::from("starcoindb/db")); +static G_DEFAULT_DAG_DB_DIR: Lazy = Lazy::new(|| PathBuf::from("dag/db")); pub const DEFAULT_CACHE_SIZE: usize = 20000; #[derive(Clone, Default, Debug, Deserialize, PartialEq, Serialize, Parser)] @@ -102,6 +112,14 @@ pub struct StorageConfig { #[serde(skip_serializing_if = "Option::is_none")] #[clap(name = "rocksdb-bytes-per-sync", long, help = "rocksdb bytes per sync")] pub bytes_per_sync: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + #[clap( + name = "rocksdb-parallelism", + long, + help = "rocksdb background threads" + )] + pub parallelism: Option, } impl StorageConfig { @@ -112,7 +130,9 @@ impl StorageConfig { pub fn dir(&self) -> PathBuf { self.base().data_dir().join(G_DEFAULT_DB_DIR.as_path()) } - + pub fn dag_dir(&self) -> PathBuf { + self.base().data_dir().join(G_DEFAULT_DAG_DB_DIR.as_path()) + } pub fn rocksdb_config(&self) -> RocksdbConfig { let default = RocksdbConfig::default(); RocksdbConfig { @@ -124,6 +144,7 @@ impl StorageConfig { wal_bytes_per_sync: self .wal_bytes_per_sync .unwrap_or(default.wal_bytes_per_sync), + parallelism: self.parallelism.unwrap_or(default.parallelism), } } pub fn cache_size(&self) -> usize { diff --git a/consensus/src/consensus_test.rs b/consensus/src/consensus_test.rs index 0bf608fc3f..2c38d31388 100644 --- a/consensus/src/consensus_test.rs +++ b/consensus/src/consensus_test.rs @@ -91,6 +91,7 @@ fn verify_header_test_barnard_block3_ubuntu22() { ChainId::new(251), 2894404328, BlockHeaderExtra::new([0u8; 4]), + None, ); G_CRYPTONIGHT .verify_header_difficulty(header.difficulty(), &header) diff --git a/etc/starcoin_types.yml b/etc/starcoin_types.yml index ea11e85123..34cfe67cd0 100644 --- a/etc/starcoin_types.yml +++ b/etc/starcoin_types.yml @@ -47,6 +47,10 @@ BlockMetadata: - chain_id: TYPENAME: ChainId - parent_gas_used: U64 + - parents_hash: + OPTION: + SEQ: + TYPENAME: HashValue ChainId: STRUCT: - id: U8 diff --git a/executor/tests/executor_test.rs b/executor/tests/executor_test.rs index 9b057d9b24..1747d1f7a4 100644 --- a/executor/tests/executor_test.rs +++ b/executor/tests/executor_test.rs @@ -83,6 +83,34 @@ fn test_vm_version() { assert_eq!(readed_version, version); } +#[stest::test] +fn test_flexidag_config_get() { + let (chain_state, _net) = prepare_genesis(); + + let version_module_id = ModuleId::new( + genesis_address(), + Identifier::new("FlexiDagConfig").unwrap(), + ); + let mut value = starcoin_dev::playground::call_contract( + &chain_state, + version_module_id, + "effective_height", + vec![], + vec![TransactionArgument::Address(genesis_address())], + None, + ) + .unwrap(); + + let read_version: u64 = bcs_ext::from_bytes(&value.pop().unwrap().1).unwrap(); + let version = { + let mut vm = StarcoinVM::new(None); + vm.load_configs(&chain_state).unwrap(); + vm.get_flexidag_config().unwrap().effective_height + }; + + assert_eq!(read_version, version); +} + #[stest::test] fn test_consensus_config_get() -> Result<()> { let (chain_state, _net) = prepare_genesis(); diff --git a/executor/tests/module_upgrade_test.rs b/executor/tests/module_upgrade_test.rs index c9d4e949b6..e8bc8c4318 100644 --- a/executor/tests/module_upgrade_test.rs +++ b/executor/tests/module_upgrade_test.rs @@ -18,7 +18,9 @@ use starcoin_vm_types::account_config::{association_address, core_code_address, use starcoin_vm_types::account_config::{genesis_address, stc_type_tag}; use starcoin_vm_types::genesis_config::{ChainId, StdlibVersion}; use starcoin_vm_types::move_resource::MoveResource; -use starcoin_vm_types::on_chain_config::{MoveLanguageVersion, TransactionPublishOption, Version}; +use starcoin_vm_types::on_chain_config::{ + FlexiDagConfig, MoveLanguageVersion, TransactionPublishOption, Version, +}; use starcoin_vm_types::on_chain_resource::LinearWithdrawCapability; use starcoin_vm_types::state_store::state_key::StateKey; use starcoin_vm_types::token::stc::G_STC_TOKEN_CODE; @@ -28,7 +30,8 @@ use std::fs::File; use std::io::Read; use stdlib::{load_upgrade_package, StdlibCompat, G_STDLIB_VERSIONS}; use test_helper::dao::{ - dao_vote_test, execute_script_on_chain_config, on_chain_config_type_tag, vote_language_version, + dao_vote_test, execute_script_on_chain_config, on_chain_config_type_tag, vote_flexi_dag_config, + vote_language_version, }; use test_helper::executor::*; use test_helper::starcoin_dao; @@ -113,7 +116,7 @@ fn test_init_script() -> Result<()> { } #[stest::test] -fn test_upgrade_stdlib_with_incremental_package() -> Result<()> { +fn test_stdlib_upgrade_with_incremental_package() -> Result<()> { let alice = Account::new(); let mut genesis_config = BuiltinNetworkID::Test.genesis_config().clone(); genesis_config.stdlib_version = StdlibVersion::Version(1); @@ -196,6 +199,7 @@ fn test_stdlib_upgrade() -> Result<()> { let alice = Account::new(); for new_version in stdlib_versions.into_iter().skip(1) { + debug!("=== upgrading {current_version} to {new_version}"); // if upgrade from 7 to later, we need to update language version to 3. if let StdlibVersion::Version(7) = current_version { dao_vote_test( @@ -235,6 +239,18 @@ fn test_stdlib_upgrade() -> Result<()> { )?; proposal_id += 1; } + if let StdlibVersion::Version(13) = current_version { + dao_vote_test( + &alice, + &chain_state, + &net, + vote_flexi_dag_config(&net, 1234567890u64), + on_chain_config_type_tag(FlexiDagConfig::type_tag()), + execute_script_on_chain_config(&net, FlexiDagConfig::type_tag(), proposal_id), + proposal_id, + )?; + proposal_id += 1; + } verify_version_state(current_version, &chain_state)?; let dao_action_type_tag = new_version.upgrade_module_type_tag(); let package = match load_upgrade_package(current_version, new_version)? { @@ -244,6 +260,7 @@ fn test_stdlib_upgrade() -> Result<()> { "{:?} is same as {:?}, continue", current_version, new_version ); + ext_execute_after_upgrade(new_version, &net, &chain_state)?; continue; } }; @@ -458,6 +475,12 @@ fn ext_execute_after_upgrade( "expect 0x1::GenesisNFT::GenesisNFTInfo in global storage, but go none." ); } + StdlibVersion::Version(12) => { + let version_resource = chain_state.get_on_chain_config::()?; + assert!(version_resource.is_some()); + let version = version_resource.unwrap(); + assert_eq!(version.major, 6, "expect language version is 6"); + } // this is old daospace-v12 starcoin-framework, // https://github.com/starcoinorg/starcoin-framework/releases/tag/daospace-v12 @@ -695,6 +718,15 @@ where "expect LinearWithdrawCapability exist at association_address" ); } + StdlibVersion::Version(13) => { + let config = chain_state.get_on_chain_config::()?; + assert!(config.is_some()); + assert_eq!( + config.unwrap().effective_height, + 1234567890, + "expect dag effective height is 1234567890" + ); + } _ => { //do nothing. } diff --git a/flexidag/Cargo.toml b/flexidag/Cargo.toml new file mode 100644 index 0000000000..f45a263f7e --- /dev/null +++ b/flexidag/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "starcoin-flexidag" +authors = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +version = "1.13.8" +homepage = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = { workspace = true } +async-trait = { workspace = true } +futures = { workspace = true } +starcoin-config = { workspace = true } +starcoin-crypto = { workspace = true } +starcoin-logger = { workspace = true } +starcoin-service-registry = { workspace = true } +starcoin-storage = { workspace = true } +starcoin-types = { workspace = true } +tokio = { workspace = true } +starcoin-consensus = { workspace = true } +starcoin-accumulator = { workspace = true } +thiserror = { workspace = true } +starcoin-dag = { workspace = true } +bcs-ext = { workspace = true } diff --git a/flexidag/dag/Cargo.toml b/flexidag/dag/Cargo.toml new file mode 100644 index 0000000000..9f33035e49 --- /dev/null +++ b/flexidag/dag/Cargo.toml @@ -0,0 +1,51 @@ +[dependencies] +anyhow = { workspace = true } +byteorder = { workspace = true } +cryptonight-rs = { workspace = true } +futures = { workspace = true } +hex = { default-features = false, workspace = true } +once_cell = { workspace = true } +proptest = { default-features = false, optional = true, workspace = true } +proptest-derive = { default-features = false, optional = true, workspace = true } +rand = { workspace = true } +rand_core = { default-features = false, workspace = true } +rust-argon2 = { workspace = true } +sha3 = { workspace = true } +starcoin-crypto = { workspace = true } +starcoin-logger = { workspace = true } +starcoin-state-api = { workspace = true } +starcoin-time-service = { workspace = true } +starcoin-types = { workspace = true } +starcoin-vm-types = { workspace = true } +thiserror = { workspace = true } +rocksdb = { workspace = true } +bincode = { version = "1", default-features = false } +serde = { workspace = true } +starcoin-storage = { workspace = true } +parking_lot = { workspace = true } +itertools = { workspace = true } +starcoin-config = { workspace = true } +bcs-ext = { workspace = true } +starcoin-accumulator = { workspace = true } +schemars = { workspace = true } + +[dev-dependencies] +proptest = { workspace = true } +proptest-derive = { workspace = true } +stest = { workspace = true } +tempfile = { workspace = true } +tokio = {workspace = true } +[features] +default = [] +fuzzing = ["proptest", "proptest-derive", "starcoin-types/fuzzing"] + +[package] +authors = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +name = "starcoin-dag" +publish = { workspace = true } +version = "1.13.8" +homepage = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } diff --git a/flexidag/dag/src/block_dag_config.rs b/flexidag/dag/src/block_dag_config.rs new file mode 100644 index 0000000000..346f124990 --- /dev/null +++ b/flexidag/dag/src/block_dag_config.rs @@ -0,0 +1,12 @@ +use starcoin_types::block::BlockNumber; + +#[derive(Clone, Debug)] +pub struct BlockDAGConfigMock { + pub fork_number: BlockNumber, +} + +#[derive(Clone, Debug)] +pub enum BlockDAGType { + BlockDAGFormal, + BlockDAGTestMock(BlockDAGConfigMock), +} diff --git a/flexidag/dag/src/blockdag.rs b/flexidag/dag/src/blockdag.rs new file mode 100644 index 0000000000..22090345db --- /dev/null +++ b/flexidag/dag/src/blockdag.rs @@ -0,0 +1,430 @@ +use super::reachability::{inquirer, reachability_service::MTReachabilityService}; +use super::types::ghostdata::GhostdagData; +use crate::block_dag_config::{BlockDAGConfigMock, BlockDAGType}; +use crate::consensusdb::consenses_state::{DagState, DagStateReader, DagStateStore}; +use crate::consensusdb::prelude::{FlexiDagStorageConfig, StoreError}; +use crate::consensusdb::schemadb::{GhostdagStoreReader, ReachabilityStore, REINDEX_ROOT_KEY}; +use crate::consensusdb::{ + prelude::FlexiDagStorage, + schemadb::{ + DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, GhostdagStore, + HeaderStore, ReachabilityStoreReader, RelationsStore, RelationsStoreReader, + }, +}; +use crate::ghostdag::protocol::GhostdagManager; +use crate::{process_key_already_error, reachability}; +use anyhow::{anyhow, bail, Ok}; +use bcs_ext::BCSCodec; +use parking_lot::RwLock; +use starcoin_config::{temp_dir, RocksdbConfig}; +use starcoin_crypto::{HashValue as Hash, HashValue}; +use starcoin_logger::prelude::info; +use starcoin_types::block::{BlockHeader, TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH}; +use starcoin_types::{ + blockhash::{BlockHashes, KType}, + consensus_header::ConsensusHeader, +}; +use std::path::Path; +use std::sync::Arc; + +pub type DbGhostdagManager = GhostdagManager< + DbGhostdagStore, + DbRelationsStore, + MTReachabilityService, + DbHeadersStore, +>; + +#[derive(Clone)] +pub struct BlockDAG { + pub storage: FlexiDagStorage, + ghostdag_manager: DbGhostdagManager, + dag_config: BlockDAGType, +} + +impl BlockDAG { + pub fn new_with_type(k: KType, db: FlexiDagStorage, dag_config: BlockDAGType) -> Self { + let ghostdag_store = db.ghost_dag_store.clone(); + let header_store = db.header_store.clone(); + let relations_store = db.relations_store.clone(); + let reachability_store = db.reachability_store.clone(); + let reachability_service = + MTReachabilityService::new(Arc::new(RwLock::new(reachability_store))); + let ghostdag_manager = DbGhostdagManager::new( + k, + ghostdag_store, + relations_store, + header_store, + reachability_service, + ); + + Self { + ghostdag_manager, + storage: db, + dag_config, + } + } + + pub fn new(k: KType, db: FlexiDagStorage) -> Self { + Self::new_with_type(k, db, BlockDAGType::BlockDAGFormal) + } + pub fn create_for_testing() -> anyhow::Result { + let dag_storage = + FlexiDagStorage::create_from_path(temp_dir(), FlexiDagStorageConfig::default())?; + Ok(BlockDAG::new_with_type( + 8, + dag_storage, + BlockDAGType::BlockDAGTestMock(BlockDAGConfigMock { + fork_number: TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH, + }), + )) + } + + pub fn create_for_testing_mock(config: BlockDAGConfigMock) -> anyhow::Result { + let dag_storage = + FlexiDagStorage::create_from_path(temp_dir(), FlexiDagStorageConfig::default())?; + Ok(BlockDAG::new_with_type( + 8, + dag_storage, + BlockDAGType::BlockDAGTestMock(config), + )) + } + + pub fn new_by_config(db_path: &Path) -> anyhow::Result { + let config = FlexiDagStorageConfig::create_with_params(1, RocksdbConfig::default()); + let db = FlexiDagStorage::create_from_path(db_path, config)?; + let dag = Self::new(8, db); + Ok(dag) + } + + pub fn block_dag_config(&self) -> BlockDAGType { + self.dag_config.clone() + } + + pub fn has_dag_block(&self, hash: Hash) -> anyhow::Result { + Ok(self.storage.header_store.has(hash)?) + } + + pub fn check_ancestor_of(&self, ancestor: Hash, descendant: Vec) -> anyhow::Result { + self.ghostdag_manager + .check_ancestor_of(ancestor, descendant) + } + + pub fn init_with_genesis(&mut self, genesis: BlockHeader) -> anyhow::Result { + let genesis_id = genesis.id(); + let origin = genesis.parent_hash(); + + let real_origin = Hash::sha3_256_of(&[origin, genesis_id].encode()?); + + if self.storage.relations_store.has(real_origin)? { + return Ok(real_origin); + } + inquirer::init(&mut self.storage.reachability_store.clone(), real_origin)?; + + self.storage + .relations_store + .insert(real_origin, BlockHashes::new(vec![]))?; + // self.storage + // .relations_store + // .insert(origin, BlockHashes::new(vec![]))?; + self.commit_genesis(genesis, real_origin)?; + self.save_dag_state( + genesis_id, + DagState { + tips: vec![genesis_id], + }, + )?; + Ok(real_origin) + } + pub fn ghostdata(&self, parents: &[HashValue]) -> Result { + self.ghostdag_manager.ghostdag(parents) + } + + pub fn ghostdata_by_hash(&self, hash: HashValue) -> anyhow::Result>> { + match self.storage.ghost_dag_store.get_data(hash) { + Result::Ok(value) => Ok(Some(value)), + Err(StoreError::KeyNotFound(_)) => Ok(None), + Err(e) => Err(e.into()), + } + } + + pub fn set_reindex_root(&mut self, hash: HashValue) -> anyhow::Result<()> { + self.storage.reachability_store.set_reindex_root(hash)?; + Ok(()) + } + + fn commit_genesis(&mut self, genesis: BlockHeader, origin: HashValue) -> anyhow::Result<()> { + self.commit_inner(genesis, origin, true) + } + + pub fn commit(&mut self, header: BlockHeader, origin: HashValue) -> anyhow::Result<()> { + self.commit_inner(header, origin, false) + } + + pub fn commit_inner(&mut self, header: BlockHeader, origin: HashValue, is_dag_genesis: bool) -> anyhow::Result<()> { + // Generate ghostdag data + let parents = header.parents(); + let ghostdata = match self.ghostdata_by_hash(header.id())? { + None => { + if is_dag_genesis { + Arc::new(self.ghostdag_manager.genesis_ghostdag_data(&header)) + } else { + let ghostdata = self.ghostdag_manager.ghostdag(&parents)?; + Arc::new(ghostdata) + } + } + Some(ghostdata) => ghostdata, + }; + // Store ghostdata + process_key_already_error( + self.storage + .ghost_dag_store + .insert(header.id(), ghostdata.clone()), + )?; + + // Update reachability store + let mut reachability_store = self.storage.reachability_store.clone(); + let mut merge_set = ghostdata + .unordered_mergeset_without_selected_parent() + .filter(|hash| self.storage.reachability_store.has(*hash).unwrap()); + match inquirer::add_block( + &mut reachability_store, + header.id(), + ghostdata.selected_parent, + &mut merge_set, + ) { + Result::Ok(_) => (), + Err(reachability::ReachabilityError::DataInconsistency) => { + let _future_covering_set = + reachability_store.get_future_covering_set(header.id())?; + info!( + "the key {:?} was already processed, original error message: {:?}", + header.id(), + reachability::ReachabilityError::DataInconsistency + ); + } + Err(reachability::ReachabilityError::StoreError(StoreError::KeyNotFound(msg))) => { + if msg == *REINDEX_ROOT_KEY.to_string() { + info!( + "the key {:?} was already processed, original error message: {:?}", + header.id(), + reachability::ReachabilityError::StoreError(StoreError::KeyNotFound( + REINDEX_ROOT_KEY.to_string() + )) + ); + info!("now set the reindex key to origin: {:?}", origin); + // self.storage.reachability_store.set_reindex_root(origin)?; + self.set_reindex_root(origin)?; + bail!( + "failed to add a block when committing, e: {:?}", + reachability::ReachabilityError::StoreError(StoreError::KeyNotFound(msg)) + ); + } else { + bail!( + "failed to add a block when committing, e: {:?}", + reachability::ReachabilityError::StoreError(StoreError::KeyNotFound(msg)) + ); + } + } + Err(e) => { + bail!("failed to add a block when committing, e: {:?}", e); + } + } + + // store relations + if is_dag_genesis { + let origin = header.parent_hash(); + let real_origin = Hash::sha3_256_of(&[origin, header.id()].encode()?); + process_key_already_error( + self.storage + .relations_store + .insert(header.id(), BlockHashes::new(vec![real_origin])), + )?; + } else { + process_key_already_error( + self.storage + .relations_store + .insert(header.id(), BlockHashes::new(parents)), + )?; + } + // Store header store + process_key_already_error(self.storage.header_store.insert( + header.id(), + Arc::new(header), + 0, + ))?; + Ok(()) + } + + pub fn get_parents(&self, hash: Hash) -> anyhow::Result> { + match self.storage.relations_store.get_parents(hash) { + anyhow::Result::Ok(parents) => anyhow::Result::Ok((*parents).clone()), + Err(error) => { + bail!("failed to get parents by hash: {}", error); + } + } + } + + pub fn get_children(&self, hash: Hash) -> anyhow::Result> { + match self.storage.relations_store.get_children(hash) { + anyhow::Result::Ok(children) => anyhow::Result::Ok((*children).clone()), + Err(error) => { + bail!("failed to get parents by hash: {}", error); + } + } + } + + pub fn get_dag_state(&self, hash: Hash) -> anyhow::Result { + Ok(self.storage.state_store.get_state(hash)?) + } + + pub fn save_dag_state(&self, hash: Hash, state: DagState) -> anyhow::Result<()> { + self.storage.state_store.insert(hash, state)?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::consensusdb::prelude::FlexiDagStorageConfig; + use starcoin_config::RocksdbConfig; + use starcoin_types::block::{ + BlockHeader, BlockHeaderBuilder, TEST_FLEXIDAG_FORK_HEIGHT_FOR_DAG, + }; + use std::{env, fs}; + + fn build_block_dag(k: KType) -> BlockDAG { + let db_path = env::temp_dir().join("smolstc"); + println!("db path:{}", db_path.to_string_lossy()); + if db_path + .as_path() + .try_exists() + .unwrap_or_else(|_| panic!("Failed to check {db_path:?}")) + { + fs::remove_dir_all(db_path.as_path()).expect("Failed to delete temporary directory"); + } + let config = FlexiDagStorageConfig::create_with_params(1, RocksdbConfig::default()); + let db = FlexiDagStorage::create_from_path(db_path, config) + .expect("Failed to create flexidag storage"); + BlockDAG::new(k, db) + } + + #[test] + fn test_dag_0() { + let mut dag = BlockDAG::create_for_testing().unwrap(); + let genesis = BlockHeader::dag_genesis_random(TEST_FLEXIDAG_FORK_HEIGHT_FOR_DAG) + .as_builder() + .with_difficulty(0.into()) + .build(); + + let mut parents_hash = vec![genesis.id()]; + let origin = dag.init_with_genesis(genesis).unwrap(); + + for _ in 0..10 { + let header_builder = BlockHeaderBuilder::random(); + let header = header_builder + .with_parents_hash(Some(parents_hash.clone())) + .build(); + parents_hash = vec![header.id()]; + dag.commit(header.to_owned(), origin).unwrap(); + let ghostdata = dag.ghostdata_by_hash(header.id()).unwrap().unwrap(); + println!("{:?},{:?}", header, ghostdata); + } + } + + #[test] + fn test_dag_1() { + let genesis = BlockHeader::dag_genesis_random(TEST_FLEXIDAG_FORK_HEIGHT_FOR_DAG) + .as_builder() + .with_difficulty(0.into()) + .build(); + let block1 = BlockHeaderBuilder::random() + .with_difficulty(1.into()) + .with_parents_hash(Some(vec![genesis.id()])) + .build(); + let block2 = BlockHeaderBuilder::random() + .with_difficulty(2.into()) + .with_parents_hash(Some(vec![genesis.id()])) + .build(); + let block3_1 = BlockHeaderBuilder::random() + .with_difficulty(1.into()) + .with_parents_hash(Some(vec![genesis.id()])) + .build(); + let block3 = BlockHeaderBuilder::random() + .with_difficulty(3.into()) + .with_parents_hash(Some(vec![block3_1.id()])) + .build(); + let block4 = BlockHeaderBuilder::random() + .with_difficulty(4.into()) + .with_parents_hash(Some(vec![block1.id(), block2.id()])) + .build(); + let block5 = BlockHeaderBuilder::random() + .with_difficulty(4.into()) + .with_parents_hash(Some(vec![block2.id(), block3.id()])) + .build(); + let block6 = BlockHeaderBuilder::random() + .with_difficulty(5.into()) + .with_parents_hash(Some(vec![block4.id(), block5.id()])) + .build(); + let mut latest_id = block6.id(); + let genesis_id = genesis.id(); + let mut dag = build_block_dag(3); + let expect_selected_parented = vec![block5.id(), block3.id(), block3_1.id(), genesis_id]; + let origin = dag.init_with_genesis(genesis).unwrap(); + + dag.commit(block1, origin).unwrap(); + dag.commit(block2, origin).unwrap(); + dag.commit(block3_1, origin).unwrap(); + dag.commit(block3, origin).unwrap(); + dag.commit(block4, origin).unwrap(); + dag.commit(block5, origin).unwrap(); + dag.commit(block6, origin).unwrap(); + let mut count = 0; + while latest_id != genesis_id && count < 4 { + let ghostdata = dag.ghostdata_by_hash(latest_id).unwrap().unwrap(); + latest_id = ghostdata.selected_parent; + assert_eq!(expect_selected_parented[count], latest_id); + count += 1; + } + } + + #[tokio::test] + async fn test_with_spawn() { + use starcoin_types::block::{BlockHeader, BlockHeaderBuilder}; + let genesis = BlockHeader::dag_genesis_random(TEST_FLEXIDAG_FORK_HEIGHT_FOR_DAG) + .as_builder() + .with_difficulty(0.into()) + .build(); + let block1 = BlockHeaderBuilder::random() + .with_difficulty(1.into()) + .with_parents_hash(Some(vec![genesis.id()])) + .build(); + let block2 = BlockHeaderBuilder::random() + .with_difficulty(2.into()) + .with_parents_hash(Some(vec![genesis.id()])) + .build(); + let mut dag = BlockDAG::create_for_testing().unwrap(); + let real_origin = dag.init_with_genesis(genesis).unwrap(); + dag.commit(block1.clone(), real_origin).unwrap(); + dag.commit(block2.clone(), real_origin).unwrap(); + let block3 = BlockHeaderBuilder::random() + .with_difficulty(3.into()) + .with_parents_hash(Some(vec![block1.id(), block2.id()])) + .build(); + let mut handles = vec![]; + for _i in 1..100 { + let mut dag_clone = dag.clone(); + let block_clone = block3.clone(); + let handle = tokio::task::spawn_blocking(move || { + let _ = dag_clone.commit(block_clone, real_origin); + }); + handles.push(handle); + } + for handle in handles { + handle.await.unwrap(); + } + let mut child = dag.get_children(block1.id()).unwrap(); + assert_eq!(child.pop().unwrap(), block3.id()); + assert_eq!(child.len(), 0); + } +} diff --git a/flexidag/dag/src/consensusdb/access.rs b/flexidag/dag/src/consensusdb/access.rs new file mode 100644 index 0000000000..9d6a8ceedf --- /dev/null +++ b/flexidag/dag/src/consensusdb/access.rs @@ -0,0 +1,199 @@ +use super::{cache::DagCache, db::DBStorage, error::StoreError}; + +use super::prelude::DbWriter; +use super::schema::{KeyCodec, Schema, ValueCodec}; +use itertools::Itertools; +use rocksdb::{Direction, IteratorMode, ReadOptions}; +use starcoin_storage::storage::RawDBStorage; +use std::{ + collections::hash_map::RandomState, error::Error, hash::BuildHasher, marker::PhantomData, + sync::Arc, +}; + +/// A concurrent DB store access with typed caching. +#[derive(Clone)] +pub struct CachedDbAccess { + db: Arc, + + // Cache + cache: DagCache, + + _phantom: PhantomData, +} + +impl CachedDbAccess +where + R: BuildHasher + Default, +{ + pub fn new(db: Arc, cache_size: usize) -> Self { + Self { + db, + cache: DagCache::new_with_capacity(cache_size), + _phantom: Default::default(), + } + } + + pub fn read_from_cache(&self, key: S::Key) -> Option { + self.cache.get(&key) + } + + pub fn has(&self, key: S::Key) -> Result { + Ok(self.cache.contains_key(&key) + || self + .db + .raw_get_pinned_cf(S::COLUMN_FAMILY, key.encode_key().unwrap()) + .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? + .is_some()) + } + + pub fn read(&self, key: S::Key) -> Result { + if let Some(data) = self.cache.get(&key) { + Ok(data) + } else if let Some(slice) = self + .db + .raw_get_pinned_cf(S::COLUMN_FAMILY, key.encode_key().unwrap()) + .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? + { + let data = S::Value::decode_value(slice.as_ref()) + .map_err(|o| StoreError::DecodeError(o.to_string()))?; + self.cache.insert(key, data.clone()); + Ok(data) + } else { + Err(StoreError::KeyNotFound(format!("{:?}", key))) + } + } + + pub fn iterator( + &self, + ) -> Result, S::Value), Box>> + '_, StoreError> + { + let db_iterator = self + .db + .raw_iterator_cf_opt( + S::COLUMN_FAMILY, + IteratorMode::Start, + ReadOptions::default(), + ) + .map_err(|e| StoreError::CFNotExist(e.to_string()))?; + + Ok(db_iterator.map(|iter_result| match iter_result { + Ok((key, data_bytes)) => match S::Value::decode_value(&data_bytes) { + Ok(data) => Ok((key, data)), + Err(e) => Err(e.into()), + }, + Err(e) => Err(e.into()), + })) + } + + pub fn write( + &self, + mut writer: impl DbWriter, + key: S::Key, + data: S::Value, + ) -> Result<(), StoreError> { + writer.put::(&key, &data)?; + self.cache.insert(key, data); + Ok(()) + } + + pub fn write_many( + &self, + mut writer: impl DbWriter, + iter: &mut (impl Iterator + Clone), + ) -> Result<(), StoreError> { + for (key, data) in iter { + writer.put::(&key, &data)?; + self.cache.insert(key, data); + } + Ok(()) + } + + /// Write directly from an iterator and do not cache any data. NOTE: this action also clears the cache + pub fn write_many_without_cache( + &self, + mut writer: impl DbWriter, + iter: &mut impl Iterator, + ) -> Result<(), StoreError> { + for (key, data) in iter { + writer.put::(&key, &data)?; + } + // The cache must be cleared in order to avoid invalidated entries + self.cache.remove_all(); + Ok(()) + } + + pub fn delete(&self, mut writer: impl DbWriter, key: S::Key) -> Result<(), StoreError> { + self.cache.remove(&key); + writer.delete::(&key)?; + Ok(()) + } + + pub fn delete_many( + &self, + mut writer: impl DbWriter, + key_iter: &mut (impl Iterator + Clone), + ) -> Result<(), StoreError> { + let key_iter_clone = key_iter.clone(); + self.cache.remove_many(key_iter); + for key in key_iter_clone { + writer.delete::(&key)?; + } + Ok(()) + } + + pub fn delete_all(&self, mut writer: impl DbWriter) -> Result<(), StoreError> { + self.cache.remove_all(); + let keys = self + .db + .raw_iterator_cf_opt( + S::COLUMN_FAMILY, + IteratorMode::Start, + ReadOptions::default(), + ) + .map_err(|e| StoreError::CFNotExist(e.to_string()))? + .map(|iter_result| match iter_result { + Ok((key, _)) => Ok::<_, rocksdb::Error>(key), + Err(e) => Err(e), + }) + .collect_vec(); + for key in keys { + writer.delete::(&S::Key::decode_key(&key?)?)?; + } + Ok(()) + } + + /// A dynamic iterator that can iterate through a specific prefix, and from a certain start point. + //TODO: loop and chain iterators for multi-prefix iterator. + pub fn seek_iterator( + &self, + seek_from: Option, // iter whole range if None + limit: usize, // amount to take. + skip_first: bool, // skips the first value, (useful in conjunction with the seek-key, as to not re-retrieve). + ) -> Result, S::Value), Box>> + '_, StoreError> + { + let read_opts = ReadOptions::default(); + let mut db_iterator = match seek_from { + Some(seek_key) => self.db.raw_iterator_cf_opt( + S::COLUMN_FAMILY, + IteratorMode::From(seek_key.encode_key()?.as_slice(), Direction::Forward), + read_opts, + ), + None => self + .db + .raw_iterator_cf_opt(S::COLUMN_FAMILY, IteratorMode::Start, read_opts), + } + .map_err(|e| StoreError::CFNotExist(e.to_string()))?; + + if skip_first { + db_iterator.next(); + } + + Ok(db_iterator.take(limit).map(move |item| match item { + Ok((key_bytes, value_bytes)) => match S::Value::decode_value(value_bytes.as_ref()) { + Ok(value) => Ok((key_bytes, value)), + Err(err) => Err(err.into()), + }, + Err(err) => Err(err.into()), + })) + } +} diff --git a/flexidag/dag/src/consensusdb/cache.rs b/flexidag/dag/src/consensusdb/cache.rs new file mode 100644 index 0000000000..51d3dda9b3 --- /dev/null +++ b/flexidag/dag/src/consensusdb/cache.rs @@ -0,0 +1,44 @@ +use core::hash::Hash; +use starcoin_storage::cache_storage::GCacheStorage; +use std::sync::Arc; + +#[derive(Clone)] +pub struct DagCache { + cache: Arc>, +} + +impl DagCache +where + K: Hash + Eq + Default, + V: Default + Clone, +{ + pub(crate) fn new_with_capacity(size: usize) -> Self { + Self { + cache: Arc::new(GCacheStorage::new_with_capacity(size, None)), + } + } + + pub(crate) fn get(&self, key: &K) -> Option { + self.cache.get_inner(key) + } + + pub(crate) fn contains_key(&self, key: &K) -> bool { + self.get(key).is_some() + } + + pub(crate) fn insert(&self, key: K, data: V) { + self.cache.put_inner(key, data); + } + + pub(crate) fn remove(&self, key: &K) { + self.cache.remove_inner(key); + } + + pub(crate) fn remove_many(&self, key_iter: &mut impl Iterator) { + key_iter.for_each(|k| self.remove(&k)); + } + + pub(crate) fn remove_all(&self) { + self.cache.remove_all(); + } +} diff --git a/flexidag/dag/src/consensusdb/consenses_state.rs b/flexidag/dag/src/consensusdb/consenses_state.rs new file mode 100644 index 0000000000..29f77501a4 --- /dev/null +++ b/flexidag/dag/src/consensusdb/consenses_state.rs @@ -0,0 +1,86 @@ +use super::schema::{KeyCodec, ValueCodec}; +use super::{db::DBStorage, error::StoreError, prelude::CachedDbAccess, writer::DirectDbWriter}; +use crate::define_schema; +use schemars::{self, JsonSchema}; +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue as Hash; +use std::sync::Arc; + +#[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug, Default)] +pub struct DagState { + pub tips: Vec, +} + +pub(crate) const DAG_STATE_STORE_CF: &str = "dag-state-store"; +define_schema!(DagStateData, Hash, DagState, DAG_STATE_STORE_CF); + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for DagState { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +pub trait DagStateReader { + fn get_state(&self, dag_gensis: Hash) -> Result; +} + +pub trait DagStateStore: DagStateReader { + // This is append only + fn insert(&self, dag_gensis: Hash, state: DagState) -> Result<(), StoreError>; +} + +/// A DB + cache implementation of `HeaderStore` trait, with concurrency support. +#[derive(Clone)] +pub struct DbDagStateStore { + db: Arc, + dag_state_access: CachedDbAccess, +} + +impl DbDagStateStore { + pub fn new(db: Arc, cache_size: usize) -> Self { + Self { + db: Arc::clone(&db), + dag_state_access: CachedDbAccess::new(db.clone(), cache_size), + } + } +} + +impl DagStateReader for DbDagStateStore { + fn get_state(&self, dag_gensis: Hash) -> Result { + let result = self.dag_state_access.read(dag_gensis)?; + Ok(result) + } +} + +impl DagStateStore for DbDagStateStore { + fn insert(&self, dag_gensis: Hash, state: DagState) -> Result<(), StoreError> { + self.dag_state_access + .write(DirectDbWriter::new(&self.db), dag_gensis, state)?; + Ok(()) + } +} + +#[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug, JsonSchema)] +pub struct DagStateView { + pub dag_genesis: Hash, + pub tips: Vec, +} + +impl DagStateView { + pub fn into_state(self) -> DagState { + DagState { tips: self.tips } + } +} diff --git a/flexidag/dag/src/consensusdb/consensus_ghostdag.rs b/flexidag/dag/src/consensusdb/consensus_ghostdag.rs new file mode 100644 index 0000000000..abf781e175 --- /dev/null +++ b/flexidag/dag/src/consensusdb/consensus_ghostdag.rs @@ -0,0 +1,516 @@ +use super::schema::{KeyCodec, ValueCodec}; +use super::{ + db::DBStorage, + error::StoreError, + prelude::{CachedDbAccess, DirectDbWriter}, + writer::BatchDbWriter, +}; +use crate::define_schema; +use starcoin_types::blockhash::{ + BlockHashMap, BlockHashes, BlockLevel, BlueWorkType, HashKTypeMap, +}; + +use crate::types::{ + ghostdata::{CompactGhostdagData, GhostdagData}, + ordering::SortableBlock, +}; +use itertools::{ + EitherOrBoth::{Both, Left, Right}, + Itertools, +}; +use rocksdb::WriteBatch; +use starcoin_crypto::HashValue as Hash; +use std::{cell::RefCell, cmp, iter::once, sync::Arc}; + +pub trait GhostdagStoreReader { + fn get_blue_score(&self, hash: Hash) -> Result; + fn get_blue_work(&self, hash: Hash) -> Result; + fn get_selected_parent(&self, hash: Hash) -> Result; + fn get_mergeset_blues(&self, hash: Hash) -> Result; + fn get_mergeset_reds(&self, hash: Hash) -> Result; + fn get_blues_anticone_sizes(&self, hash: Hash) -> Result; + + /// Returns full block data for the requested hash + fn get_data(&self, hash: Hash) -> Result, StoreError>; + + fn get_compact_data(&self, hash: Hash) -> Result; + + /// Check if the store contains data for the requested hash + fn has(&self, hash: Hash) -> Result; +} + +pub trait GhostdagStore: GhostdagStoreReader { + /// Insert GHOSTDAG data for block `hash` into the store. Note that GHOSTDAG data + /// is added once and never modified, so no need for specific setters for each element. + /// Additionally, this means writes are semantically "append-only", which is why + /// we can keep the `insert` method non-mutable on self. See "Parallel Processing.md" for an overview. + fn insert(&self, hash: Hash, data: Arc) -> Result<(), StoreError>; +} + +pub struct GhostDagDataWrapper(GhostdagData); + +impl From for GhostDagDataWrapper { + fn from(value: GhostdagData) -> Self { + Self(value) + } +} + +impl GhostDagDataWrapper { + /// Returns an iterator to the mergeset in ascending blue work order (tie-breaking by hash) + pub fn ascending_mergeset_without_selected_parent<'a>( + &'a self, + store: &'a (impl GhostdagStoreReader + ?Sized), + ) -> impl Iterator> + '_ { + self.0 + .mergeset_blues + .iter() + .skip(1) // Skip the selected parent + .cloned() + .map(|h| { + store + .get_blue_work(h) + .map(|blue| SortableBlock::new(h, blue)) + }) + .merge_join_by( + self.0 + .mergeset_reds + .iter() + .cloned() + .map(|h| store.get_blue_work(h).map(|red| SortableBlock::new(h, red))), + |a, b| match (a, b) { + (Ok(a), Ok(b)) => a.cmp(b), + (Err(_), Ok(_)) => cmp::Ordering::Less, // select left Err node + (Ok(_), Err(_)) => cmp::Ordering::Greater, // select right Err node + (Err(_), Err(_)) => cmp::Ordering::Equal, // remove both Err nodes + }, + ) + .map(|r| match r { + Left(b) | Right(b) => b, + Both(c, _) => Err(StoreError::DAGDupBlocksError(format!("{c:?}"))), + }) + } + + /// Returns an iterator to the mergeset in descending blue work order (tie-breaking by hash) + pub fn descending_mergeset_without_selected_parent<'a>( + &'a self, + store: &'a (impl GhostdagStoreReader + ?Sized), + ) -> impl Iterator> + '_ { + self.0 + .mergeset_blues + .iter() + .skip(1) // Skip the selected parent + .rev() // Reverse since blues and reds are stored with ascending blue work order + .cloned() + .map(|h| { + store + .get_blue_work(h) + .map(|blue| SortableBlock::new(h, blue)) + }) + .merge_join_by( + self.0 + .mergeset_reds + .iter() + .rev() // Reverse + .cloned() + .map(|h| store.get_blue_work(h).map(|red| SortableBlock::new(h, red))), + |a, b| match (b, a) { + (Ok(b), Ok(a)) => b.cmp(a), + (Err(_), Ok(_)) => cmp::Ordering::Less, // select left Err node + (Ok(_), Err(_)) => cmp::Ordering::Greater, // select right Err node + (Err(_), Err(_)) => cmp::Ordering::Equal, // select both Err nodes + }, // Reverse + ) + .map(|r| match r { + Left(b) | Right(b) => b, + Both(c, _) => Err(StoreError::DAGDupBlocksError(format!("{c:?}"))), + }) + } + + /// Returns an iterator to the mergeset in topological consensus order -- starting with the selected parent, + /// and adding the mergeset in increasing blue work order. Note that this is a topological order even though + /// the selected parent has highest blue work by def -- since the mergeset is in its anticone. + pub fn consensus_ordered_mergeset<'a>( + &'a self, + store: &'a (impl GhostdagStoreReader + ?Sized), + ) -> impl Iterator> + '_ { + once(Ok(self.0.selected_parent)).chain( + self.ascending_mergeset_without_selected_parent(store) + .map(|s| s.map(|s| s.hash)), + ) + } + + /// Returns an iterator to the mergeset in topological consensus order without the selected parent + pub fn consensus_ordered_mergeset_without_selected_parent<'a>( + &'a self, + store: &'a (impl GhostdagStoreReader + ?Sized), + ) -> impl Iterator> + '_ { + self.ascending_mergeset_without_selected_parent(store) + .map(|s| s.map(|s| s.hash)) + } +} + +pub(crate) const GHOST_DAG_STORE_CF: &str = "block-ghostdag-data"; +pub(crate) const COMPACT_GHOST_DAG_STORE_CF: &str = "compact-block-ghostdag-data"; + +define_schema!(GhostDag, Hash, Arc, GHOST_DAG_STORE_CF); +define_schema!( + CompactGhostDag, + Hash, + CompactGhostdagData, + COMPACT_GHOST_DAG_STORE_CF +); + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for Arc { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for CompactGhostdagData { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +/// A DB + cache implementation of `GhostdagStore` trait, with concurrency support. +#[derive(Clone)] +pub struct DbGhostdagStore { + db: Arc, + level: BlockLevel, + access: CachedDbAccess, + compact_access: CachedDbAccess, +} + +impl DbGhostdagStore { + pub fn new(db: Arc, level: BlockLevel, cache_size: usize) -> Self { + Self { + db: Arc::clone(&db), + level, + access: CachedDbAccess::new(db.clone(), cache_size), + compact_access: CachedDbAccess::new(db, cache_size), + } + } + + pub fn clone_with_new_cache(&self, cache_size: usize) -> Self { + Self::new(Arc::clone(&self.db), self.level, cache_size) + } + + pub fn insert_batch( + &self, + batch: &mut WriteBatch, + hash: Hash, + data: &Arc, + ) -> Result<(), StoreError> { + if self.access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.access + .write(BatchDbWriter::new(batch), hash, data.clone())?; + self.compact_access.write( + BatchDbWriter::new(batch), + hash, + CompactGhostdagData { + blue_score: data.blue_score, + blue_work: data.blue_work, + selected_parent: data.selected_parent, + }, + )?; + Ok(()) + } +} + +impl GhostdagStoreReader for DbGhostdagStore { + fn get_blue_score(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.blue_score) + } + + fn get_blue_work(&self, hash: Hash) -> Result { + match self.access.read(hash) { + Ok(ghost_data) => Ok(ghost_data.blue_work), + Err(StoreError::KeyNotFound(_)) => Err(StoreError::HashValueNotFound(hash)), + Err(e) => Err(e), + } + } + + fn get_selected_parent(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.selected_parent) + } + + fn get_mergeset_blues(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.access.read(hash)?.mergeset_blues)) + } + + fn get_mergeset_reds(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.access.read(hash)?.mergeset_reds)) + } + + fn get_blues_anticone_sizes(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.access.read(hash)?.blues_anticone_sizes)) + } + + fn get_data(&self, hash: Hash) -> Result, StoreError> { + self.access.read(hash) + } + + fn get_compact_data(&self, hash: Hash) -> Result { + self.compact_access.read(hash) + } + + fn has(&self, hash: Hash) -> Result { + self.access.has(hash) + } +} + +impl GhostdagStore for DbGhostdagStore { + fn insert(&self, hash: Hash, data: Arc) -> Result<(), StoreError> { + if self.access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.access + .write(DirectDbWriter::new(&self.db), hash, data.clone())?; + if self.compact_access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.compact_access.write( + DirectDbWriter::new(&self.db), + hash, + CompactGhostdagData { + blue_score: data.blue_score, + blue_work: data.blue_work, + selected_parent: data.selected_parent, + }, + )?; + Ok(()) + } +} + +/// An in-memory implementation of `GhostdagStore` trait to be used for tests. +/// Uses `RefCell` for interior mutability in order to workaround `insert` +/// being non-mutable. +pub struct MemoryGhostdagStore { + blue_score_map: RefCell>, + blue_work_map: RefCell>, + selected_parent_map: RefCell>, + mergeset_blues_map: RefCell>, + mergeset_reds_map: RefCell>, + blues_anticone_sizes_map: RefCell>, +} + +impl MemoryGhostdagStore { + pub fn new() -> Self { + Self { + blue_score_map: RefCell::new(BlockHashMap::new()), + blue_work_map: RefCell::new(BlockHashMap::new()), + selected_parent_map: RefCell::new(BlockHashMap::new()), + mergeset_blues_map: RefCell::new(BlockHashMap::new()), + mergeset_reds_map: RefCell::new(BlockHashMap::new()), + blues_anticone_sizes_map: RefCell::new(BlockHashMap::new()), + } + } +} + +impl Default for MemoryGhostdagStore { + fn default() -> Self { + Self::new() + } +} + +impl GhostdagStore for MemoryGhostdagStore { + fn insert(&self, hash: Hash, data: Arc) -> Result<(), StoreError> { + if self.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.blue_score_map + .borrow_mut() + .insert(hash, data.blue_score); + self.blue_work_map.borrow_mut().insert(hash, data.blue_work); + self.selected_parent_map + .borrow_mut() + .insert(hash, data.selected_parent); + self.mergeset_blues_map + .borrow_mut() + .insert(hash, data.mergeset_blues.clone()); + self.mergeset_reds_map + .borrow_mut() + .insert(hash, data.mergeset_reds.clone()); + self.blues_anticone_sizes_map + .borrow_mut() + .insert(hash, data.blues_anticone_sizes.clone()); + Ok(()) + } +} + +impl GhostdagStoreReader for MemoryGhostdagStore { + fn get_blue_score(&self, hash: Hash) -> Result { + match self.blue_score_map.borrow().get(&hash) { + Some(blue_score) => Ok(*blue_score), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_blue_work(&self, hash: Hash) -> Result { + match self.blue_work_map.borrow().get(&hash) { + Some(blue_work) => Ok(*blue_work), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_selected_parent(&self, hash: Hash) -> Result { + match self.selected_parent_map.borrow().get(&hash) { + Some(selected_parent) => Ok(*selected_parent), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_mergeset_blues(&self, hash: Hash) -> Result { + match self.mergeset_blues_map.borrow().get(&hash) { + Some(mergeset_blues) => Ok(BlockHashes::clone(mergeset_blues)), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_mergeset_reds(&self, hash: Hash) -> Result { + match self.mergeset_reds_map.borrow().get(&hash) { + Some(mergeset_reds) => Ok(BlockHashes::clone(mergeset_reds)), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_blues_anticone_sizes(&self, hash: Hash) -> Result { + match self.blues_anticone_sizes_map.borrow().get(&hash) { + Some(sizes) => Ok(HashKTypeMap::clone(sizes)), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_data(&self, hash: Hash) -> Result, StoreError> { + if !self.has(hash)? { + return Err(StoreError::KeyNotFound(hash.to_string())); + } + Ok(Arc::new(GhostdagData::new( + self.blue_score_map.borrow()[&hash], + self.blue_work_map.borrow()[&hash], + self.selected_parent_map.borrow()[&hash], + self.mergeset_blues_map.borrow()[&hash].clone(), + self.mergeset_reds_map.borrow()[&hash].clone(), + self.blues_anticone_sizes_map.borrow()[&hash].clone(), + ))) + } + + fn get_compact_data(&self, hash: Hash) -> Result { + Ok(self.get_data(hash)?.to_compact()) + } + + fn has(&self, hash: Hash) -> Result { + Ok(self.blue_score_map.borrow().contains_key(&hash)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use starcoin_types::blockhash::BlockHashSet; + use std::iter::once; + + #[test] + fn test_mergeset_iterators() { + let store = MemoryGhostdagStore::new(); + + let factory = |w: u64| { + Arc::new(GhostdagData { + blue_score: Default::default(), + blue_work: w.into(), + selected_parent: Default::default(), + mergeset_blues: Default::default(), + mergeset_reds: Default::default(), + blues_anticone_sizes: Default::default(), + }) + }; + + // Blues + store.insert(1.into(), factory(2)).unwrap(); + store.insert(2.into(), factory(7)).unwrap(); + store.insert(3.into(), factory(11)).unwrap(); + + // Reds + store.insert(4.into(), factory(4)).unwrap(); + store.insert(5.into(), factory(9)).unwrap(); + store.insert(6.into(), factory(11)).unwrap(); // Tie-breaking case + + let mut data = GhostdagData::new_with_selected_parent(1.into(), 5); + data.add_blue(2.into(), Default::default(), &Default::default()); + data.add_blue(3.into(), Default::default(), &Default::default()); + + data.add_red(4.into()); + data.add_red(5.into()); + data.add_red(6.into()); + + let wrapper: GhostDagDataWrapper = data.clone().into(); + + let mut expected: Vec = vec![4.into(), 2.into(), 5.into(), 3.into(), 6.into()]; + assert_eq!( + expected, + wrapper + .ascending_mergeset_without_selected_parent(&store) + .filter_map(|b| b.map(|b| b.hash).ok()) + .collect::>() + ); + + itertools::assert_equal( + once(1.into()).chain(expected.iter().cloned()), + wrapper + .consensus_ordered_mergeset(&store) + .filter_map(|b| b.ok()), + ); + + expected.reverse(); + assert_eq!( + expected, + wrapper + .descending_mergeset_without_selected_parent(&store) + .filter_map(|b| b.map(|b| b.hash).ok()) + .collect::>() + ); + + // Use sets since the below functions have no order guarantee + let expected = BlockHashSet::from_iter([4.into(), 2.into(), 5.into(), 3.into(), 6.into()]); + assert_eq!( + expected, + data.unordered_mergeset_without_selected_parent() + .collect::() + ); + + let expected = + BlockHashSet::from_iter([1.into(), 4.into(), 2.into(), 5.into(), 3.into(), 6.into()]); + assert_eq!( + expected, + data.unordered_mergeset().collect::() + ); + } +} diff --git a/flexidag/dag/src/consensusdb/consensus_header.rs b/flexidag/dag/src/consensusdb/consensus_header.rs new file mode 100644 index 0000000000..11b842be47 --- /dev/null +++ b/flexidag/dag/src/consensusdb/consensus_header.rs @@ -0,0 +1,217 @@ +use super::schema::{KeyCodec, ValueCodec}; +use super::{ + db::DBStorage, + error::{StoreError, StoreResult}, + prelude::CachedDbAccess, + writer::{BatchDbWriter, DirectDbWriter}, +}; +use crate::define_schema; +use rocksdb::WriteBatch; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::block::BlockHeader; +use starcoin_types::{ + blockhash::BlockLevel, + consensus_header::{CompactHeaderData, HeaderWithBlockLevel}, + U256, +}; +use std::sync::Arc; + +pub trait HeaderStoreReader { + fn get_daa_score(&self, hash: Hash) -> Result; + fn get_blue_score(&self, hash: Hash) -> Result; + fn get_timestamp(&self, hash: Hash) -> Result; + fn get_difficulty(&self, hash: Hash) -> Result; + fn get_header(&self, hash: Hash) -> Result, StoreError>; + fn get_header_with_block_level(&self, hash: Hash) -> Result; + fn get_compact_header_data(&self, hash: Hash) -> Result; +} + +pub trait HeaderStore: HeaderStoreReader { + // This is append only + fn insert( + &self, + hash: Hash, + header: Arc, + block_level: BlockLevel, + ) -> Result<(), StoreError>; +} + +pub(crate) const HEADERS_STORE_CF: &str = "headers-store"; +pub(crate) const COMPACT_HEADER_DATA_STORE_CF: &str = "compact-header-data"; + +define_schema!(DagHeader, Hash, HeaderWithBlockLevel, HEADERS_STORE_CF); +define_schema!( + CompactBlockHeader, + Hash, + CompactHeaderData, + COMPACT_HEADER_DATA_STORE_CF +); + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for HeaderWithBlockLevel { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for CompactHeaderData { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +/// A DB + cache implementation of `HeaderStore` trait, with concurrency support. +#[derive(Clone)] +pub struct DbHeadersStore { + db: Arc, + headers_access: CachedDbAccess, + compact_headers_access: CachedDbAccess, +} + +impl DbHeadersStore { + pub fn new(db: Arc, cache_size: usize) -> Self { + Self { + db: Arc::clone(&db), + headers_access: CachedDbAccess::new(db.clone(), cache_size), + compact_headers_access: CachedDbAccess::new(db, cache_size), + } + } + + pub fn clone_with_new_cache(&self, cache_size: usize) -> Self { + Self::new(Arc::clone(&self.db), cache_size) + } + + pub fn has(&self, hash: Hash) -> StoreResult { + self.headers_access.has(hash) + } + + pub fn get_header(&self, hash: Hash) -> Result { + let result = self.headers_access.read(hash)?; + Ok((*result.header).clone()) + } + + pub fn insert_batch( + &self, + batch: &mut WriteBatch, + hash: Hash, + header: Arc, + block_level: BlockLevel, + ) -> Result<(), StoreError> { + if self.headers_access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.headers_access.write( + BatchDbWriter::new(batch), + hash, + HeaderWithBlockLevel { + header: header.clone(), + block_level, + }, + )?; + self.compact_headers_access.write( + BatchDbWriter::new(batch), + hash, + CompactHeaderData { + timestamp: header.timestamp(), + difficulty: header.difficulty(), + }, + )?; + Ok(()) + } +} + +impl HeaderStoreReader for DbHeadersStore { + fn get_daa_score(&self, _hash: Hash) -> Result { + unimplemented!() + } + + fn get_blue_score(&self, _hash: Hash) -> Result { + unimplemented!() + } + + fn get_timestamp(&self, hash: Hash) -> Result { + if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash) { + return Ok(header_with_block_level.header.timestamp()); + } + Ok(self.compact_headers_access.read(hash)?.timestamp) + } + + fn get_difficulty(&self, hash: Hash) -> Result { + if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash) { + return Ok(header_with_block_level.header.difficulty()); + } + Ok(self.compact_headers_access.read(hash)?.difficulty) + } + + fn get_header(&self, hash: Hash) -> Result, StoreError> { + Ok(self.headers_access.read(hash)?.header) + } + + fn get_header_with_block_level(&self, hash: Hash) -> Result { + self.headers_access.read(hash) + } + + fn get_compact_header_data(&self, hash: Hash) -> Result { + if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash) { + return Ok(CompactHeaderData { + timestamp: header_with_block_level.header.timestamp(), + difficulty: header_with_block_level.header.difficulty(), + }); + } + self.compact_headers_access.read(hash) + } +} + +impl HeaderStore for DbHeadersStore { + fn insert( + &self, + hash: Hash, + header: Arc, + block_level: u8, + ) -> Result<(), StoreError> { + if self.headers_access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.compact_headers_access.write( + DirectDbWriter::new(&self.db), + hash, + CompactHeaderData { + timestamp: header.timestamp(), + difficulty: header.difficulty(), + }, + )?; + self.headers_access.write( + DirectDbWriter::new(&self.db), + hash, + HeaderWithBlockLevel { + header, + block_level, + }, + )?; + Ok(()) + } +} diff --git a/flexidag/dag/src/consensusdb/consensus_reachability.rs b/flexidag/dag/src/consensusdb/consensus_reachability.rs new file mode 100644 index 0000000000..d593e3921f --- /dev/null +++ b/flexidag/dag/src/consensusdb/consensus_reachability.rs @@ -0,0 +1,549 @@ +use super::{ + db::DBStorage, + prelude::{BatchDbWriter, CachedDbAccess, CachedDbItem, DirectDbWriter, StoreError}, +}; +use starcoin_crypto::HashValue as Hash; +use starcoin_storage::storage::RawDBStorage; + +use crate::{ + consensusdb::schema::{KeyCodec, ValueCodec}, + define_schema, + types::{interval::Interval, reachability::ReachabilityData}, +}; +use starcoin_types::blockhash::{self, BlockHashMap, BlockHashes}; + +use parking_lot::{RwLockUpgradableReadGuard, RwLockWriteGuard}; +use rocksdb::WriteBatch; +use std::{collections::hash_map::Entry::Vacant, sync::Arc}; + +/// Reader API for `ReachabilityStore`. +pub trait ReachabilityStoreReader { + fn has(&self, hash: Hash) -> Result; + fn get_interval(&self, hash: Hash) -> Result; + fn get_parent(&self, hash: Hash) -> Result; + fn get_children(&self, hash: Hash) -> Result; + fn get_future_covering_set(&self, hash: Hash) -> Result; +} + +/// Write API for `ReachabilityStore`. All write functions are deliberately `mut` +/// since reachability writes are not append-only and thus need to be guarded. +pub trait ReachabilityStore: ReachabilityStoreReader { + fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError>; + fn insert( + &mut self, + hash: Hash, + parent: Hash, + interval: Interval, + height: u64, + ) -> Result<(), StoreError>; + fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError>; + fn append_child(&mut self, hash: Hash, child: Hash) -> Result; + fn insert_future_covering_item( + &mut self, + hash: Hash, + fci: Hash, + insertion_index: usize, + ) -> Result<(), StoreError>; + fn get_height(&self, hash: Hash) -> Result; + fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError>; + fn get_reindex_root(&self) -> Result; +} + +pub const REINDEX_ROOT_KEY: &str = "reachability-reindex-root"; +pub(crate) const REACHABILITY_DATA_CF: &str = "reachability-data"; +// TODO: explore perf to see if using fixed-length constants for store prefixes is preferable + +define_schema!( + Reachability, + Hash, + Arc, + REACHABILITY_DATA_CF +); +define_schema!(ReachabilityCache, Vec, Hash, REACHABILITY_DATA_CF); + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for Arc { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl KeyCodec for Vec { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Ok(data.to_vec()) + } +} +impl ValueCodec for Hash { + fn encode_value(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_value(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +/// A DB + cache implementation of `ReachabilityStore` trait, with concurrent readers support. +#[derive(Clone)] +pub struct DbReachabilityStore { + db: Arc, + access: CachedDbAccess, + reindex_root: CachedDbItem, +} + +impl DbReachabilityStore { + pub fn new(db: Arc, cache_size: usize) -> Self { + Self::new_with_prefix_end(db, cache_size) + } + + pub fn new_with_alternative_prefix_end(db: Arc, cache_size: usize) -> Self { + Self::new_with_prefix_end(db, cache_size) + } + + fn new_with_prefix_end(db: Arc, cache_size: usize) -> Self { + Self { + db: Arc::clone(&db), + access: CachedDbAccess::new(Arc::clone(&db), cache_size), + reindex_root: CachedDbItem::new(db, REINDEX_ROOT_KEY.as_bytes().to_vec()), + } + } + + pub fn clone_with_new_cache(&self, cache_size: usize) -> Self { + Self::new_with_prefix_end(Arc::clone(&self.db), cache_size) + } +} + +impl ReachabilityStore for DbReachabilityStore { + fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError> { + debug_assert!(!self.access.has(origin)?); + + let data = Arc::new(ReachabilityData::new( + Hash::new(blockhash::NONE), + capacity, + 0, + )); + let mut batch = WriteBatch::default(); + self.access + .write(BatchDbWriter::new(&mut batch), origin, data)?; + self.reindex_root + .write(BatchDbWriter::new(&mut batch), &origin)?; + self.db + .raw_write_batch(batch) + .map_err(|e| StoreError::DBIoError(e.to_string()))?; + + Ok(()) + } + + fn insert( + &mut self, + hash: Hash, + parent: Hash, + interval: Interval, + height: u64, + ) -> Result<(), StoreError> { + if self.access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + let data = Arc::new(ReachabilityData::new(parent, interval, height)); + self.access + .write(DirectDbWriter::new(&self.db), hash, data)?; + Ok(()) + } + + fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError> { + let mut data = self.access.read(hash)?; + Arc::make_mut(&mut data).interval = interval; + self.access + .write(DirectDbWriter::new(&self.db), hash, data)?; + Ok(()) + } + + fn append_child(&mut self, hash: Hash, child: Hash) -> Result { + let mut data = self.access.read(hash)?; + let height = data.height; + if data.children.contains(&child) { + return Ok(height); + } + let mut_data = Arc::make_mut(&mut data); + Arc::make_mut(&mut mut_data.children).push(child); + self.access + .write(DirectDbWriter::new(&self.db), hash, data)?; + Ok(height) + } + + fn insert_future_covering_item( + &mut self, + hash: Hash, + fci: Hash, + insertion_index: usize, + ) -> Result<(), StoreError> { + let mut data = self.access.read(hash)?; + let mut_data = Arc::make_mut(&mut data); + Arc::make_mut(&mut mut_data.future_covering_set).insert(insertion_index, fci); + self.access + .write(DirectDbWriter::new(&self.db), hash, data)?; + Ok(()) + } + + fn get_height(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.height) + } + + fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError> { + self.reindex_root + .write(DirectDbWriter::new(&self.db), &root) + } + + fn get_reindex_root(&self) -> Result { + self.reindex_root.read() + } +} + +impl ReachabilityStoreReader for DbReachabilityStore { + fn has(&self, hash: Hash) -> Result { + self.access.has(hash) + } + + fn get_interval(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.interval) + } + + fn get_parent(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.parent) + } + + fn get_children(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.access.read(hash)?.children)) + } + + fn get_future_covering_set(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.access.read(hash)?.future_covering_set)) + } +} + +pub struct StagingReachabilityStore<'a> { + store_read: RwLockUpgradableReadGuard<'a, DbReachabilityStore>, + staging_writes: BlockHashMap, + staging_reindex_root: Option, +} + +impl<'a> StagingReachabilityStore<'a> { + pub fn new(store_read: RwLockUpgradableReadGuard<'a, DbReachabilityStore>) -> Self { + Self { + store_read, + staging_writes: BlockHashMap::new(), + staging_reindex_root: None, + } + } + + pub fn commit( + self, + batch: &mut WriteBatch, + ) -> Result, StoreError> { + let mut store_write = RwLockUpgradableReadGuard::upgrade(self.store_read); + for (k, v) in self.staging_writes { + let data = Arc::new(v); + store_write + .access + .write(BatchDbWriter::new(batch), k, data)? + } + if let Some(root) = self.staging_reindex_root { + store_write + .reindex_root + .write(BatchDbWriter::new(batch), &root)?; + } + Ok(store_write) + } +} + +impl ReachabilityStore for StagingReachabilityStore<'_> { + fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError> { + self.insert(origin, Hash::new(blockhash::NONE), capacity, 0)?; + self.set_reindex_root(origin)?; + Ok(()) + } + + fn insert( + &mut self, + hash: Hash, + parent: Hash, + interval: Interval, + height: u64, + ) -> Result<(), StoreError> { + if self.store_read.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + if let Vacant(e) = self.staging_writes.entry(hash) { + e.insert(ReachabilityData::new(parent, interval, height)); + Ok(()) + } else { + Err(StoreError::KeyAlreadyExists(hash.to_string())) + } + } + + fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError> { + if let Some(data) = self.staging_writes.get_mut(&hash) { + data.interval = interval; + return Ok(()); + } + + let mut data = (*self.store_read.access.read(hash)?).clone(); + data.interval = interval; + self.staging_writes.insert(hash, data); + + Ok(()) + } + + fn append_child(&mut self, hash: Hash, child: Hash) -> Result { + if let Some(data) = self.staging_writes.get_mut(&hash) { + if data.children.contains(&child) { + return Ok(data.height); + } + Arc::make_mut(&mut data.children).push(child); + return Ok(data.height); + } + + let mut data = (*self.store_read.access.read(hash)?).clone(); + if data.children.contains(&child) { + return Ok(data.height); + } + let height = data.height; + Arc::make_mut(&mut data.children).push(child); + self.staging_writes.insert(hash, data); + + Ok(height) + } + + fn insert_future_covering_item( + &mut self, + hash: Hash, + fci: Hash, + insertion_index: usize, + ) -> Result<(), StoreError> { + if let Some(data) = self.staging_writes.get_mut(&hash) { + Arc::make_mut(&mut data.future_covering_set).insert(insertion_index, fci); + return Ok(()); + } + + let mut data = (*self.store_read.access.read(hash)?).clone(); + Arc::make_mut(&mut data.future_covering_set).insert(insertion_index, fci); + self.staging_writes.insert(hash, data); + + Ok(()) + } + + fn get_height(&self, hash: Hash) -> Result { + if let Some(data) = self.staging_writes.get(&hash) { + Ok(data.height) + } else { + Ok(self.store_read.access.read(hash)?.height) + } + } + + fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError> { + self.staging_reindex_root = Some(root); + Ok(()) + } + + fn get_reindex_root(&self) -> Result { + if let Some(root) = self.staging_reindex_root { + Ok(root) + } else { + Ok(self.store_read.get_reindex_root()?) + } + } +} + +impl ReachabilityStoreReader for StagingReachabilityStore<'_> { + fn has(&self, hash: Hash) -> Result { + Ok(self.staging_writes.contains_key(&hash) || self.store_read.access.has(hash)?) + } + + fn get_interval(&self, hash: Hash) -> Result { + if let Some(data) = self.staging_writes.get(&hash) { + Ok(data.interval) + } else { + Ok(self.store_read.access.read(hash)?.interval) + } + } + + fn get_parent(&self, hash: Hash) -> Result { + if let Some(data) = self.staging_writes.get(&hash) { + Ok(data.parent) + } else { + Ok(self.store_read.access.read(hash)?.parent) + } + } + + fn get_children(&self, hash: Hash) -> Result { + if let Some(data) = self.staging_writes.get(&hash) { + Ok(BlockHashes::clone(&data.children)) + } else { + Ok(BlockHashes::clone( + &self.store_read.access.read(hash)?.children, + )) + } + } + + fn get_future_covering_set(&self, hash: Hash) -> Result { + if let Some(data) = self.staging_writes.get(&hash) { + Ok(BlockHashes::clone(&data.future_covering_set)) + } else { + Ok(BlockHashes::clone( + &self.store_read.access.read(hash)?.future_covering_set, + )) + } + } +} + +pub struct MemoryReachabilityStore { + map: BlockHashMap, + reindex_root: Option, +} + +impl Default for MemoryReachabilityStore { + fn default() -> Self { + Self::new() + } +} + +impl MemoryReachabilityStore { + pub fn new() -> Self { + Self { + map: BlockHashMap::new(), + reindex_root: None, + } + } + + fn get_data_mut(&mut self, hash: Hash) -> Result<&mut ReachabilityData, StoreError> { + match self.map.get_mut(&hash) { + Some(data) => Ok(data), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_data(&self, hash: Hash) -> Result<&ReachabilityData, StoreError> { + match self.map.get(&hash) { + Some(data) => Ok(data), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } +} + +impl ReachabilityStore for MemoryReachabilityStore { + fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError> { + self.insert(origin, Hash::new(blockhash::NONE), capacity, 0)?; + self.set_reindex_root(origin)?; + Ok(()) + } + + fn insert( + &mut self, + hash: Hash, + parent: Hash, + interval: Interval, + height: u64, + ) -> Result<(), StoreError> { + if let Vacant(e) = self.map.entry(hash) { + e.insert(ReachabilityData::new(parent, interval, height)); + Ok(()) + } else { + Err(StoreError::KeyAlreadyExists(hash.to_string())) + } + } + + fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError> { + let data = self.get_data_mut(hash)?; + data.interval = interval; + Ok(()) + } + + fn append_child(&mut self, hash: Hash, child: Hash) -> Result { + let data = self.get_data_mut(hash)?; + Arc::make_mut(&mut data.children).push(child); + Ok(data.height) + } + + fn insert_future_covering_item( + &mut self, + hash: Hash, + fci: Hash, + insertion_index: usize, + ) -> Result<(), StoreError> { + let data = self.get_data_mut(hash)?; + Arc::make_mut(&mut data.future_covering_set).insert(insertion_index, fci); + Ok(()) + } + + fn get_height(&self, hash: Hash) -> Result { + Ok(self.get_data(hash)?.height) + } + + fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError> { + self.reindex_root = Some(root); + Ok(()) + } + + fn get_reindex_root(&self) -> Result { + match self.reindex_root { + Some(root) => Ok(root), + None => Err(StoreError::KeyNotFound(REINDEX_ROOT_KEY.to_string())), + } + } +} + +impl ReachabilityStoreReader for MemoryReachabilityStore { + fn has(&self, hash: Hash) -> Result { + Ok(self.map.contains_key(&hash)) + } + + fn get_interval(&self, hash: Hash) -> Result { + Ok(self.get_data(hash)?.interval) + } + + fn get_parent(&self, hash: Hash) -> Result { + Ok(self.get_data(hash)?.parent) + } + + fn get_children(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.get_data(hash)?.children)) + } + + fn get_future_covering_set(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.get_data(hash)?.future_covering_set)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_store_basics() { + let mut store: Box = Box::new(MemoryReachabilityStore::new()); + let (hash, parent) = (7.into(), 15.into()); + let interval = Interval::maximal(); + store.insert(hash, parent, interval, 5).unwrap(); + let height = store.append_child(hash, 31.into()).unwrap(); + assert_eq!(height, 5); + let children = store.get_children(hash).unwrap(); + println!("{children:?}"); + store.get_interval(7.into()).unwrap(); + println!("{children:?}"); + } +} diff --git a/flexidag/dag/src/consensusdb/consensus_relations.rs b/flexidag/dag/src/consensusdb/consensus_relations.rs new file mode 100644 index 0000000000..d54f2bd50d --- /dev/null +++ b/flexidag/dag/src/consensusdb/consensus_relations.rs @@ -0,0 +1,240 @@ +use super::schema::{KeyCodec, ValueCodec}; +use super::{ + db::DBStorage, + prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter, StoreError}, +}; +use crate::define_schema; +use rocksdb::WriteBatch; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::{BlockHashes, BlockLevel}; +use std::sync::Arc; + +/// Reader API for `RelationsStore`. +pub trait RelationsStoreReader { + fn get_parents(&self, hash: Hash) -> Result; + fn get_children(&self, hash: Hash) -> Result; + fn has(&self, hash: Hash) -> Result; +} + +/// Write API for `RelationsStore`. The insert function is deliberately `mut` +/// since it modifies the children arrays for previously added parents which is +/// non-append-only and thus needs to be guarded. +pub trait RelationsStore: RelationsStoreReader { + /// Inserts `parents` into a new store entry for `hash`, and for each `parent ∈ parents` adds `hash` to `parent.children` + fn insert(&self, hash: Hash, parents: BlockHashes) -> Result<(), StoreError>; +} + +pub(crate) const PARENTS_CF: &str = "block-parents"; +pub(crate) const CHILDREN_CF: &str = "block-children"; + +define_schema!(RelationParent, Hash, Arc>, PARENTS_CF); +define_schema!(RelationChildren, Hash, Arc>, CHILDREN_CF); + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for Arc> { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +impl ValueCodec for Arc> { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +/// A DB + cache implementation of `RelationsStore` trait, with concurrent readers support. +#[derive(Clone)] +pub struct DbRelationsStore { + db: Arc, + level: BlockLevel, + parents_access: CachedDbAccess, + children_access: CachedDbAccess, +} + +impl DbRelationsStore { + pub fn new(db: Arc, level: BlockLevel, cache_size: usize) -> Self { + Self { + db: Arc::clone(&db), + level, + parents_access: CachedDbAccess::new(Arc::clone(&db), cache_size), + children_access: CachedDbAccess::new(db, cache_size), + } + } + + pub fn clone_with_new_cache(&self, cache_size: usize) -> Self { + Self::new(Arc::clone(&self.db), self.level, cache_size) + } + + pub fn insert_batch( + &mut self, + batch: &mut WriteBatch, + hash: Hash, + parents: BlockHashes, + ) -> Result<(), StoreError> { + if self.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + + // Insert a new entry for `hash` + self.parents_access + .write(BatchDbWriter::new(batch), hash, parents.clone())?; + + // The new hash has no children yet + self.children_access.write( + BatchDbWriter::new(batch), + hash, + BlockHashes::new(Vec::new()), + )?; + + // Update `children` for each parent + for parent in parents.iter().cloned() { + let mut children = (*self.get_children(parent)?).clone(); + children.push(hash); + self.children_access.write( + BatchDbWriter::new(batch), + parent, + BlockHashes::new(children), + )?; + } + + Ok(()) + } +} + +impl RelationsStoreReader for DbRelationsStore { + fn get_parents(&self, hash: Hash) -> Result { + self.parents_access.read(hash) + } + + fn get_children(&self, hash: Hash) -> Result { + self.children_access.read(hash) + } + + fn has(&self, hash: Hash) -> Result { + if self.parents_access.has(hash)? { + debug_assert!(self.children_access.has(hash)?); + Ok(true) + } else { + Ok(false) + } + } +} + +impl RelationsStore for DbRelationsStore { + /// See `insert_batch` as well + /// TODO: use one function with DbWriter for both this function and insert_batch + fn insert(&self, hash: Hash, parents: BlockHashes) -> Result<(), StoreError> { + if self.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + + // Insert a new entry for `hash` + self.parents_access + .write(DirectDbWriter::new(&self.db), hash, parents.clone())?; + + // The new hash has no children yet + self.children_access.write( + DirectDbWriter::new(&self.db), + hash, + BlockHashes::new(Vec::new()), + )?; + + // Update `children` for each parent + for parent in parents.iter().cloned() { + let mut children = (*self.get_children(parent)?).clone(); + children.push(hash); + self.children_access.write( + DirectDbWriter::new(&self.db), + parent, + BlockHashes::new(children), + )?; + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::consensusdb::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; + + #[test] + fn test_db_relations_store() { + let db_tempdir = tempfile::tempdir().unwrap(); + let config = FlexiDagStorageConfig::new(); + + let db = FlexiDagStorage::create_from_path(db_tempdir.path(), config) + .expect("failed to create flexidag storage"); + test_relations_store(db.relations_store); + } + + fn test_relations_store(store: T) { + let parents = [ + (1, vec![]), + (2, vec![1]), + (3, vec![1]), + (4, vec![2, 3]), + (5, vec![1, 4]), + ]; + for (i, vec) in parents.iter().cloned() { + store + .insert( + i.into(), + BlockHashes::new(vec.iter().copied().map(Hash::from).collect()), + ) + .unwrap(); + } + + let expected_children = [ + (1, vec![2, 3, 5]), + (2, vec![4]), + (3, vec![4]), + (4, vec![5]), + (5, vec![]), + ]; + for (i, vec) in expected_children { + assert!(store + .get_children(i.into()) + .unwrap() + .iter() + .copied() + .eq(vec.iter().copied().map(Hash::from))); + } + + for (i, vec) in parents { + assert!(store + .get_parents(i.into()) + .unwrap() + .iter() + .copied() + .eq(vec.iter().copied().map(Hash::from))); + } + } +} diff --git a/flexidag/dag/src/consensusdb/db.rs b/flexidag/dag/src/consensusdb/db.rs new file mode 100644 index 0000000000..5a3fef3066 --- /dev/null +++ b/flexidag/dag/src/consensusdb/db.rs @@ -0,0 +1,97 @@ +use super::{ + consenses_state::{DbDagStateStore, DAG_STATE_STORE_CF}, + error::StoreError, + schemadb::{ + DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, CHILDREN_CF, + COMPACT_GHOST_DAG_STORE_CF, COMPACT_HEADER_DATA_STORE_CF, GHOST_DAG_STORE_CF, + HEADERS_STORE_CF, PARENTS_CF, REACHABILITY_DATA_CF, + }, +}; +use starcoin_config::{RocksdbConfig, StorageConfig}; +pub(crate) use starcoin_storage::db_storage::DBStorage; +use std::{path::Path, sync::Arc}; + +#[derive(Clone)] +pub struct FlexiDagStorage { + pub ghost_dag_store: DbGhostdagStore, + pub header_store: DbHeadersStore, + pub reachability_store: DbReachabilityStore, + pub relations_store: DbRelationsStore, + pub state_store: DbDagStateStore, +} + +#[derive(Clone)] +pub struct FlexiDagStorageConfig { + pub cache_size: usize, + pub rocksdb_config: RocksdbConfig, +} +impl Default for FlexiDagStorageConfig { + fn default() -> Self { + Self { + cache_size: 1, + rocksdb_config: Default::default(), + } + } +} +impl FlexiDagStorageConfig { + pub fn new() -> Self { + FlexiDagStorageConfig::default() + } + + pub fn create_with_params(cache_size: usize, rocksdb_config: RocksdbConfig) -> Self { + Self { + cache_size, + rocksdb_config, + } + } +} + +impl From for FlexiDagStorageConfig { + fn from(value: StorageConfig) -> Self { + Self { + cache_size: value.cache_size(), + rocksdb_config: value.rocksdb_config(), + } + } +} + +impl FlexiDagStorage { + /// Creates or loads an existing storage from the provided directory path. + pub fn create_from_path>( + db_path: P, + config: FlexiDagStorageConfig, + ) -> Result { + let db = Arc::new( + DBStorage::open_with_cfs( + db_path, + vec![ + // consensus headers + HEADERS_STORE_CF, + COMPACT_HEADER_DATA_STORE_CF, + // consensus relations + PARENTS_CF, + CHILDREN_CF, + // consensus reachability + REACHABILITY_DATA_CF, + // consensus ghostdag + GHOST_DAG_STORE_CF, + COMPACT_GHOST_DAG_STORE_CF, + DAG_STATE_STORE_CF, + ], + false, + config.rocksdb_config, + None, + ) + .map_err(|e| StoreError::DBIoError(e.to_string()))?, + ); + + Ok(Self { + ghost_dag_store: DbGhostdagStore::new(db.clone(), 1, config.cache_size), + + header_store: DbHeadersStore::new(db.clone(), config.cache_size), + reachability_store: DbReachabilityStore::new(db.clone(), config.cache_size), + relations_store: DbRelationsStore::new(db.clone(), 1, config.cache_size), + state_store: DbDagStateStore::new(db, config.cache_size), + }) + } +} diff --git a/flexidag/dag/src/consensusdb/error.rs b/flexidag/dag/src/consensusdb/error.rs new file mode 100644 index 0000000000..ec8be5527e --- /dev/null +++ b/flexidag/dag/src/consensusdb/error.rs @@ -0,0 +1,80 @@ +use starcoin_crypto::HashValue; +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum StoreError { + #[error("key {0} not found in store")] + KeyNotFound(String), + + #[error("key {0} already exists in store")] + KeyAlreadyExists(String), + + #[error("column family {0} not exist in db")] + CFNotExist(String), + + #[error("IO error {0}")] + DBIoError(String), + + #[error("rocksdb error {0}")] + DbError(#[from] rocksdb::Error), + + #[error("encode error {0}")] + EncodeError(String), + + #[error("decode error {0}")] + DecodeError(String), + + #[error("ghostdag {0} duplicate blocks")] + DAGDupBlocksError(String), + + #[error("max blue work not found")] + MaxBlueworkNotFound, + + #[error("blue score overflow {0}")] + BlueScoreOverflow(String), + + #[error("blue anticore size overflow, the current size is {0}")] + BlueAnticoreSizeOverflow(String), + + #[error("anticore size not found")] + AnticoreSizeNotFound, + + #[error("k overflow, the current value is {0}")] + KOverflow(String), + + #[error("the block hash value {0} not found")] + HashValueNotFound(HashValue), + + #[error("invalid start({0}) and end({1}) interval")] + InvalidInterval(u64, u64), +} + +pub type StoreResult = std::result::Result; + +pub trait StoreResultExtensions { + fn unwrap_option(self) -> Option; +} + +impl StoreResultExtensions for StoreResult { + fn unwrap_option(self) -> Option { + match self { + Ok(value) => Some(value), + Err(StoreError::KeyNotFound(_)) => None, + Err(err) => panic!("Unexpected store error: {err:?}"), + } + } +} + +pub trait StoreResultEmptyTuple { + fn unwrap_and_ignore_key_already_exists(self); +} + +impl StoreResultEmptyTuple for StoreResult<()> { + fn unwrap_and_ignore_key_already_exists(self) { + match self { + Ok(_) => (), + Err(StoreError::KeyAlreadyExists(_)) => (), + Err(err) => panic!("Unexpected store error: {err:?}"), + } + } +} diff --git a/flexidag/dag/src/consensusdb/item.rs b/flexidag/dag/src/consensusdb/item.rs new file mode 100644 index 0000000000..e4a85426f9 --- /dev/null +++ b/flexidag/dag/src/consensusdb/item.rs @@ -0,0 +1,80 @@ +use super::prelude::DbWriter; +use super::schema::{KeyCodec, Schema, ValueCodec}; +use super::{db::DBStorage, error::StoreError}; +use parking_lot::RwLock; +use starcoin_storage::storage::RawDBStorage; +use std::sync::Arc; + +/// A cached DB item with concurrency support +#[derive(Clone)] +pub struct CachedDbItem { + db: Arc, + key: S::Key, + cached_item: Arc>>, +} + +impl CachedDbItem { + pub fn new(db: Arc, key: S::Key) -> Self { + Self { + db, + key, + cached_item: Arc::new(RwLock::new(None)), + } + } + + pub fn read(&self) -> Result { + if let Some(item) = self.cached_item.read().clone() { + return Ok(item); + } + if let Some(slice) = self + .db + .raw_get_pinned_cf(S::COLUMN_FAMILY, &self.key.encode_key()?) + .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? + { + let item = S::Value::decode_value(&slice)?; + *self.cached_item.write() = Some(item.clone()); + Ok(item) + } else { + Err(StoreError::KeyNotFound( + String::from_utf8(self.key.encode_key()?) + .unwrap_or_else(|_| ("unrecoverable key string").to_string()), + )) + } + } + + pub fn write(&mut self, mut writer: impl DbWriter, item: &S::Value) -> Result<(), StoreError> { + *self.cached_item.write() = Some(item.clone()); + writer.put::(&self.key, item)?; + Ok(()) + } + + pub fn remove(&mut self, mut writer: impl DbWriter) -> Result<(), StoreError> +where { + *self.cached_item.write() = None; + writer.delete::(&self.key)?; + Ok(()) + } + + pub fn update(&mut self, mut writer: impl DbWriter, op: F) -> Result + where + F: Fn(S::Value) -> S::Value, + { + let mut guard = self.cached_item.write(); + let mut item = if let Some(item) = guard.take() { + item + } else if let Some(slice) = self + .db + .raw_get_pinned_cf(S::COLUMN_FAMILY, &self.key.encode_key()?) + .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? + { + S::Value::decode_value(&slice)? + } else { + return Err(StoreError::KeyNotFound(format!("{:?}", self.key))); + }; + + item = op(item); // Apply the update op + *guard = Some(item.clone()); + writer.put::(&self.key, &item)?; + Ok(item) + } +} diff --git a/flexidag/dag/src/consensusdb/mod.rs b/flexidag/dag/src/consensusdb/mod.rs new file mode 100644 index 0000000000..331f288847 --- /dev/null +++ b/flexidag/dag/src/consensusdb/mod.rs @@ -0,0 +1,32 @@ +mod access; +mod cache; +pub mod consenses_state; +mod consensus_ghostdag; +mod consensus_header; +mod consensus_reachability; +pub mod consensus_relations; +mod db; +mod error; +mod item; +pub mod schema; +mod writer; + +pub mod prelude { + use super::{db, error}; + + pub use super::{ + access::CachedDbAccess, + cache::DagCache, + item::CachedDbItem, + writer::{BatchDbWriter, DbWriter, DirectDbWriter}, + }; + pub use db::{FlexiDagStorage, FlexiDagStorageConfig}; + pub use error::{StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions}; +} + +pub mod schemadb { + pub use super::{ + consensus_ghostdag::*, consensus_header::*, consensus_reachability::*, + consensus_relations::*, + }; +} diff --git a/flexidag/dag/src/consensusdb/schema.rs b/flexidag/dag/src/consensusdb/schema.rs new file mode 100644 index 0000000000..502ee9c8c7 --- /dev/null +++ b/flexidag/dag/src/consensusdb/schema.rs @@ -0,0 +1,40 @@ +use super::error::StoreError; +use core::hash::Hash; +use std::fmt::Debug; +use std::result::Result; + +pub trait KeyCodec: Clone + Sized + Debug + Send + Sync { + /// Converts `self` to bytes to be stored in DB. + fn encode_key(&self) -> Result, StoreError>; + /// Converts bytes fetched from DB to `Self`. + fn decode_key(data: &[u8]) -> Result; +} + +pub trait ValueCodec: Clone + Sized + Debug + Send + Sync { + /// Converts `self` to bytes to be stored in DB. + fn encode_value(&self) -> Result, StoreError>; + /// Converts bytes fetched from DB to `Self`. + fn decode_value(data: &[u8]) -> Result; +} + +pub trait Schema: Debug + Send + Sync + 'static { + const COLUMN_FAMILY: &'static str; + + type Key: KeyCodec + Hash + Eq + Default; + type Value: ValueCodec + Default + Clone; +} + +#[macro_export] +macro_rules! define_schema { + ($schema_type: ident, $key_type: ty, $value_type: ty, $cf_name: expr) => { + #[derive(Clone, Debug)] + pub(crate) struct $schema_type; + + impl $crate::consensusdb::schema::Schema for $schema_type { + type Key = $key_type; + type Value = $value_type; + + const COLUMN_FAMILY: &'static str = $cf_name; + } + }; +} diff --git a/flexidag/dag/src/consensusdb/writer.rs b/flexidag/dag/src/consensusdb/writer.rs new file mode 100644 index 0000000000..717d7d7e1c --- /dev/null +++ b/flexidag/dag/src/consensusdb/writer.rs @@ -0,0 +1,75 @@ +use rocksdb::WriteBatch; +use starcoin_storage::storage::InnerStore; + +use super::schema::{KeyCodec, Schema, ValueCodec}; +use super::{db::DBStorage, error::StoreError}; + +/// Abstraction over direct/batched DB writing +pub trait DbWriter { + fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError>; + fn delete(&mut self, key: &S::Key) -> Result<(), StoreError>; +} + +pub struct DirectDbWriter<'a> { + db: &'a DBStorage, +} + +impl<'a> DirectDbWriter<'a> { + pub fn new(db: &'a DBStorage) -> Self { + Self { db } + } +} + +impl DbWriter for DirectDbWriter<'_> { + fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError> { + let bin_key = key.encode_key()?; + let bin_data = value.encode_value()?; + self.db + .put(S::COLUMN_FAMILY, bin_key, bin_data) + .map_err(|e| StoreError::DBIoError(e.to_string())) + } + + fn delete(&mut self, key: &S::Key) -> Result<(), StoreError> { + let key = key.encode_key()?; + self.db + .remove(S::COLUMN_FAMILY, key) + .map_err(|e| StoreError::DBIoError(e.to_string())) + } +} + +pub struct BatchDbWriter<'a> { + batch: &'a mut WriteBatch, +} + +impl<'a> BatchDbWriter<'a> { + pub fn new(batch: &'a mut WriteBatch) -> Self { + Self { batch } + } +} + +impl DbWriter for BatchDbWriter<'_> { + fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError> { + let key = key.encode_key()?; + let value = value.encode_value()?; + self.batch.put(key, value); + Ok(()) + } + + fn delete(&mut self, key: &S::Key) -> Result<(), StoreError> { + let key = key.encode_key()?; + self.batch.delete(key); + Ok(()) + } +} + +impl DbWriter for &mut T { + #[inline] + fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError> { + (*self).put::(key, value) + } + + #[inline] + fn delete(&mut self, key: &S::Key) -> Result<(), StoreError> { + (*self).delete::(key) + } +} diff --git a/flexidag/dag/src/ghostdag/mergeset.rs b/flexidag/dag/src/ghostdag/mergeset.rs new file mode 100644 index 0000000000..db3f617dda --- /dev/null +++ b/flexidag/dag/src/ghostdag/mergeset.rs @@ -0,0 +1,72 @@ +use super::protocol::GhostdagManager; +use crate::consensusdb::prelude::StoreError; +use crate::consensusdb::schemadb::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; +use crate::reachability::reachability_service::ReachabilityService; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::BlockHashSet; +use std::collections::VecDeque; + +impl< + T: GhostdagStoreReader, + S: RelationsStoreReader, + U: ReachabilityService, + V: HeaderStoreReader, + > GhostdagManager +{ + pub fn ordered_mergeset_without_selected_parent( + &self, + selected_parent: Hash, + parents: &[Hash], + ) -> Result, StoreError> { + self.sort_blocks(self.unordered_mergeset_without_selected_parent(selected_parent, parents)) + } + + pub fn unordered_mergeset_without_selected_parent( + &self, + selected_parent: Hash, + parents: &[Hash], + ) -> BlockHashSet { + let mut queue: VecDeque<_> = parents + .iter() + .copied() + .filter(|p| p != &selected_parent) + .collect(); + let mut mergeset: BlockHashSet = queue.iter().copied().collect(); + let mut selected_parent_past = BlockHashSet::new(); + + while let Some(current) = queue.pop_front() { + let current_parents = self + .relations_store + .get_parents(current) + .unwrap_or_else(|err| { + println!("WUT"); + panic!("{err:?}"); + }); + + // For each parent of the current block we check whether it is in the past of the selected parent. If not, + // we add it to the resulting merge-set and queue it for further processing. + for parent in current_parents.iter() { + if mergeset.contains(parent) { + continue; + } + + if selected_parent_past.contains(parent) { + continue; + } + + if self + .reachability_service + .is_dag_ancestor_of(*parent, selected_parent) + { + selected_parent_past.insert(*parent); + continue; + } + + mergeset.insert(*parent); + queue.push_back(*parent); + } + } + + mergeset + } +} diff --git a/flexidag/dag/src/ghostdag/mod.rs b/flexidag/dag/src/ghostdag/mod.rs new file mode 100644 index 0000000000..51a2c8fc82 --- /dev/null +++ b/flexidag/dag/src/ghostdag/mod.rs @@ -0,0 +1,4 @@ +pub mod mergeset; +pub mod protocol; + +mod util; diff --git a/flexidag/dag/src/ghostdag/protocol.rs b/flexidag/dag/src/ghostdag/protocol.rs new file mode 100644 index 0000000000..f99b91dd97 --- /dev/null +++ b/flexidag/dag/src/ghostdag/protocol.rs @@ -0,0 +1,373 @@ +use super::util::Refs; +use crate::consensusdb::prelude::StoreError; +use crate::consensusdb::schemadb::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; +use crate::reachability::reachability_service::ReachabilityService; +use crate::types::{ghostdata::GhostdagData, ordering::*}; +use anyhow::{Context, Result}; +use bcs_ext::BCSCodec; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::block::BlockHeader; +use starcoin_types::blockhash::{BlockHashMap, BlockHashes, BlueWorkType, HashKTypeMap, KType}; +use std::sync::Arc; +#[derive(Clone)] +pub struct GhostdagManager< + T: GhostdagStoreReader, + S: RelationsStoreReader, + U: ReachabilityService, + V: HeaderStoreReader, +> { + pub(super) k: KType, + pub(super) ghostdag_store: T, + pub(super) relations_store: S, + pub(super) headers_store: V, + pub(super) reachability_service: U, +} + +impl< + T: GhostdagStoreReader, + S: RelationsStoreReader, + U: ReachabilityService, + V: HeaderStoreReader, + > GhostdagManager +{ + pub fn new( + k: KType, + ghostdag_store: T, + relations_store: S, + headers_store: V, + reachability_service: U, + ) -> Self { + Self { + k, + ghostdag_store, + relations_store, + reachability_service, + headers_store, + } + } + + pub fn genesis_ghostdag_data(&self, genesis: &BlockHeader) -> GhostdagData { + GhostdagData::new( + 0, + genesis.difficulty(), + Hash::sha3_256_of( + &[genesis.parent_hash(), genesis.id()] + .encode() + .expect("failed to encode hash for dag gensis and its parent"), + ), + BlockHashes::new(vec![]), + BlockHashes::new(Vec::new()), + HashKTypeMap::new(BlockHashMap::new()), + ) + } + + pub fn origin_ghostdag_data(&self) -> Arc { + Arc::new(GhostdagData::new( + 0, + Default::default(), + 0.into(), + BlockHashes::new(Vec::new()), + BlockHashes::new(Vec::new()), + HashKTypeMap::new(BlockHashMap::new()), + )) + } + + pub fn check_ancestor_of(&self, ancestor: Hash, descendant: Vec) -> anyhow::Result { + self.reachability_service + .is_dag_ancestor_of_any_result(ancestor, &mut descendant.into_iter()) + .map_err(|e| e.into()) + } + + pub fn find_selected_parent( + &self, + parents: impl IntoIterator, + ) -> Result { + Ok(parents + .into_iter() + .map(|parent| match self.ghostdag_store.get_blue_work(parent) { + Ok(blue_work) => Ok(SortableBlock { + hash: parent, + blue_work, + }), + Err(e) => Err(e), + }) + .collect::, StoreError>>()? + .into_iter() + .max() + .ok_or(StoreError::MaxBlueworkNotFound)? + .hash) + } + + /// Runs the GHOSTDAG protocol and calculates the block GhostdagData by the given parents. + /// The function calculates mergeset blues by iterating over the blocks in + /// the anticone of the new block selected parent (which is the parent with the + /// highest blue work) and adds any block to the blue set if by adding + /// it these conditions will not be violated: + /// + /// 1) |anticone-of-candidate-block ∩ blue-set-of-new-block| ≤ K + /// + /// 2) For every blue block in blue-set-of-new-block: + /// |(anticone-of-blue-block ∩ blue-set-new-block) ∪ {candidate-block}| ≤ K. + /// We validate this condition by maintaining a map blues_anticone_sizes for + /// each block which holds all the blue anticone sizes that were affected by + /// the new added blue blocks. + /// So to find out what is |anticone-of-blue ∩ blue-set-of-new-block| we just iterate in + /// the selected parent chain of the new block until we find an existing entry in + /// blues_anticone_sizes. + /// + /// For further details see the article https://eprint.iacr.org/2018/104.pdf + pub fn ghostdag(&self, parents: &[Hash]) -> Result { + assert!( + !parents.is_empty(), + "genesis must be added via a call to init" + ); + // Run the GHOSTDAG parent selection algorithm + let selected_parent = self.find_selected_parent(parents.iter().copied())?; + // Initialize new GHOSTDAG block data with the selected parent + let mut new_block_data = GhostdagData::new_with_selected_parent(selected_parent, self.k); + // Get the mergeset in consensus-agreed topological order (topological here means forward in time from blocks to children) + let ordered_mergeset = + self.ordered_mergeset_without_selected_parent(selected_parent, parents)?; + + for blue_candidate in ordered_mergeset.iter().cloned() { + let coloring = self.check_blue_candidate(&new_block_data, blue_candidate)?; + + if let ColoringOutput::Blue(blue_anticone_size, blues_anticone_sizes) = coloring { + // No k-cluster violation found, we can now set the candidate block as blue + new_block_data.add_blue(blue_candidate, blue_anticone_size, &blues_anticone_sizes); + } else { + new_block_data.add_red(blue_candidate); + } + } + + let blue_score = self + .ghostdag_store + .get_blue_score(selected_parent)? + .checked_add(new_block_data.mergeset_blues.len() as u64) + .ok_or_else(|| { + StoreError::BlueScoreOverflow(format!( + "{:?}", + new_block_data.mergeset_blues.len() as u64 + )) + })?; + + let added_blue_work: BlueWorkType = new_block_data + .mergeset_blues + .iter() + .cloned() + .map(|hash| { + self.headers_store + .get_difficulty(hash) + .unwrap_or_else(|_| 0.into()) + }) + .sum(); + + let blue_work = self + .ghostdag_store + .get_blue_work(selected_parent)? + .checked_add(added_blue_work) + .ok_or_else(|| StoreError::BlueScoreOverflow(format!("{added_blue_work:?}")))?; // TODO: handle overflow + + new_block_data.finalize_score_and_work(blue_score, blue_work); + + Ok(new_block_data) + } + + fn check_blue_candidate_with_chain_block( + &self, + new_block_data: &GhostdagData, + chain_block: &ChainBlock, + blue_candidate: Hash, + candidate_blues_anticone_sizes: &mut BlockHashMap, + candidate_blue_anticone_size: &mut KType, + ) -> Result { + // If blue_candidate is in the future of chain_block, it means + // that all remaining blues are in the past of chain_block and thus + // in the past of blue_candidate. In this case we know for sure that + // the anticone of blue_candidate will not exceed K, and we can mark + // it as blue. + // + // The new block is always in the future of blue_candidate, so there's + // no point in checking it. + + // We check if chain_block is not the new block by checking if it has a hash. + if let Some(hash) = chain_block.hash { + if self + .reachability_service + .is_dag_ancestor_of(hash, blue_candidate) + { + return Ok(ColoringState::Blue); + } + } + + for &block in chain_block.data.mergeset_blues.iter() { + // Skip blocks that exist in the past of blue_candidate. + if self + .reachability_service + .is_dag_ancestor_of(block, blue_candidate) + { + continue; + } + + candidate_blues_anticone_sizes + .insert(block, self.blue_anticone_size(block, new_block_data)?); + + *candidate_blue_anticone_size = (*candidate_blue_anticone_size) + .checked_add(1) + .ok_or_else(|| { + StoreError::BlueAnticoreSizeOverflow(format!( + "{:?}", + *candidate_blue_anticone_size + )) + })?; + if *candidate_blue_anticone_size > self.k { + // k-cluster violation: The candidate's blue anticone exceeded k + return Ok(ColoringState::Red); + } + + if *candidate_blues_anticone_sizes + .get(&block) + .ok_or(StoreError::AnticoreSizeNotFound)? + == self.k + { + // k-cluster violation: A block in candidate's blue anticone already + // has k blue blocks in its own anticone + return Ok(ColoringState::Red); + } + + // This is a sanity check that validates that a blue + // block's blue anticone is not already larger than K. + assert!( + *candidate_blues_anticone_sizes + .get(&block) + .ok_or(StoreError::AnticoreSizeNotFound)? + <= self.k, + "found blue anticone larger than K" + ); + } + + Ok(ColoringState::Pending) + } + + /// Returns the blue anticone size of `block` from the worldview of `context`. + /// Expects `block` to be in the blue set of `context` + fn blue_anticone_size(&self, block: Hash, context: &GhostdagData) -> Result { + let mut current_blues_anticone_sizes = HashKTypeMap::clone(&context.blues_anticone_sizes); + let mut current_selected_parent = context.selected_parent; + loop { + if let Some(size) = current_blues_anticone_sizes.get(&block) { + return Ok(*size); + } + /* TODO: consider refactor it + if current_selected_parent == self.genesis_hash + || current_selected_parent == Hash::new(blockhash::ORIGIN) + { + panic!("block {block} is not in blue set of the given context"); + } + */ + current_blues_anticone_sizes = self + .ghostdag_store + .get_blues_anticone_sizes(current_selected_parent)?; + current_selected_parent = self + .ghostdag_store + .get_selected_parent(current_selected_parent)?; + } + } + + pub fn check_blue_candidate( + &self, + new_block_data: &GhostdagData, + blue_candidate: Hash, + ) -> Result { + // The maximum length of new_block_data.mergeset_blues can be K+1 because + // it contains the selected parent. + if new_block_data.mergeset_blues.len() as KType + == self + .k + .checked_add(1) + .ok_or_else(|| StoreError::KOverflow(format!("{:?}", self.k)))? + { + return Ok(ColoringOutput::Red); + } + + let mut candidate_blues_anticone_sizes: BlockHashMap = + BlockHashMap::with_capacity(self.k as usize); + // Iterate over all blocks in the blue past of the new block that are not in the past + // of blue_candidate, and check for each one of them if blue_candidate potentially + // enlarges their blue anticone to be over K, or that they enlarge the blue anticone + // of blue_candidate to be over K. + let mut chain_block = ChainBlock { + hash: None, + data: new_block_data.into(), + }; + let mut candidate_blue_anticone_size: KType = 0; + + loop { + let state = self.check_blue_candidate_with_chain_block( + new_block_data, + &chain_block, + blue_candidate, + &mut candidate_blues_anticone_sizes, + &mut candidate_blue_anticone_size, + )?; + + match state { + ColoringState::Blue => { + return Ok(ColoringOutput::Blue( + candidate_blue_anticone_size, + candidate_blues_anticone_sizes, + )); + } + ColoringState::Red => return Ok(ColoringOutput::Red), + ColoringState::Pending => (), // continue looping + } + + chain_block = ChainBlock { + hash: Some(chain_block.data.selected_parent), + data: self + .ghostdag_store + .get_data(chain_block.data.selected_parent)? + .into(), + } + } + } + + pub fn sort_blocks( + &self, + blocks: impl IntoIterator, + ) -> Result, StoreError> { + let mut sorted_blocks = blocks + .into_iter() + .map(|block| { + Ok(SortableBlock { + hash: block, + blue_work: self.ghostdag_store.get_blue_work(block)?, + }) + }) + .collect::, StoreError>>()?; + sorted_blocks.sort(); + Ok(sorted_blocks.into_iter().map(|block| block.hash).collect()) + } +} + +/// Chain block with attached ghostdag data +struct ChainBlock<'a> { + hash: Option, + // if set to `None`, signals being the new block + data: Refs<'a, GhostdagData>, +} + +/// Represents the intermediate GHOSTDAG coloring state for the current candidate +enum ColoringState { + Blue, + Red, + Pending, +} + +#[derive(Debug)] +/// Represents the final output of GHOSTDAG coloring for the current candidate +pub enum ColoringOutput { + Blue(KType, BlockHashMap), + // (blue anticone size, map of blue anticone sizes for each affected blue) + Red, +} diff --git a/flexidag/dag/src/ghostdag/util.rs b/flexidag/dag/src/ghostdag/util.rs new file mode 100644 index 0000000000..68eb4b9b31 --- /dev/null +++ b/flexidag/dag/src/ghostdag/util.rs @@ -0,0 +1,57 @@ +use std::{ops::Deref, rc::Rc, sync::Arc}; +/// Enum used to represent a concrete varying pointer type which only needs to be accessed by ref. +/// We avoid adding a `Val(T)` variant in order to keep the size of the enum minimal +pub enum Refs<'a, T> { + Ref(&'a T), + Arc(Arc), + Rc(Rc), + Box(Box), +} + +impl AsRef for Refs<'_, T> { + fn as_ref(&self) -> &T { + match self { + Refs::Ref(r) => r, + Refs::Arc(a) => a, + Refs::Rc(r) => r, + Refs::Box(b) => b, + } + } +} + +impl Deref for Refs<'_, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + match self { + Refs::Ref(r) => r, + Refs::Arc(a) => a, + Refs::Rc(r) => r, + Refs::Box(b) => b, + } + } +} + +impl<'a, T> From<&'a T> for Refs<'a, T> { + fn from(r: &'a T) -> Self { + Self::Ref(r) + } +} + +impl From> for Refs<'_, T> { + fn from(a: Arc) -> Self { + Self::Arc(a) + } +} + +impl From> for Refs<'_, T> { + fn from(r: Rc) -> Self { + Self::Rc(r) + } +} + +impl From> for Refs<'_, T> { + fn from(b: Box) -> Self { + Self::Box(b) + } +} diff --git a/flexidag/dag/src/lib.rs b/flexidag/dag/src/lib.rs new file mode 100644 index 0000000000..ba9f5df9ff --- /dev/null +++ b/flexidag/dag/src/lib.rs @@ -0,0 +1,16 @@ +use consensusdb::prelude::StoreError; +pub mod block_dag_config; + +pub mod blockdag; +pub mod consensusdb; +pub mod ghostdag; +pub mod reachability; +pub mod types; + +pub fn process_key_already_error(result: Result<(), StoreError>) -> Result<(), StoreError> { + if let Err(StoreError::KeyAlreadyExists(_)) = result { + Result::Ok(()) + } else { + result + } +} diff --git a/flexidag/dag/src/reachability/extensions.rs b/flexidag/dag/src/reachability/extensions.rs new file mode 100644 index 0000000000..cddcd6d6b9 --- /dev/null +++ b/flexidag/dag/src/reachability/extensions.rs @@ -0,0 +1,57 @@ +use crate::consensusdb::prelude::StoreError; +use crate::consensusdb::{prelude::StoreResult, schemadb::ReachabilityStoreReader}; +use crate::types::interval::Interval; +use starcoin_crypto::hash::HashValue as Hash; + +pub(super) trait ReachabilityStoreIntervalExtensions { + fn interval_children_capacity(&self, block: Hash) -> StoreResult; + fn interval_remaining_before(&self, block: Hash) -> StoreResult; + fn interval_remaining_after(&self, block: Hash) -> StoreResult; +} + +impl ReachabilityStoreIntervalExtensions for T { + /// Returns the reachability allocation capacity for children of `block` + fn interval_children_capacity(&self, block: Hash) -> StoreResult { + // The interval of a block should *strictly* contain the intervals of its + // tree children, hence we subtract 1 from the end of the range. + Ok(self.get_interval(block)?.decrease_end(1)) + } + + /// Returns the available interval to allocate for tree children, taken from the + /// beginning of children allocation capacity + fn interval_remaining_before(&self, block: Hash) -> StoreResult { + let alloc_capacity = self.interval_children_capacity(block)?; + match self.get_children(block)?.first() { + Some(first_child) => { + let first_alloc = self.get_interval(*first_child)?; + Ok(Interval::new( + alloc_capacity.start, + first_alloc.start.checked_sub(1).unwrap(), + )) + } + None => Ok(alloc_capacity), + } + } + + /// Returns the available interval to allocate for tree children, taken from the + /// end of children allocation capacity + fn interval_remaining_after(&self, block: Hash) -> StoreResult { + let alloc_capacity = self.interval_children_capacity(block)?; + match self.get_children(block)?.last() { + Some(last_child) => { + let last_alloc = self.get_interval(*last_child)?; + let start = last_alloc.end.checked_add(1).unwrap(); + let end = alloc_capacity.end; + let check = start > 0 && end < u64::MAX && end >= start.checked_sub(1).unwrap(); // TODO: make sure this is actually debug-only + if !check { + return Err(StoreError::InvalidInterval(start, end)); + } + Ok(Interval::new( + last_alloc.end.checked_add(1).unwrap(), + alloc_capacity.end, + )) + } + None => Ok(alloc_capacity), + } + } +} diff --git a/flexidag/dag/src/reachability/inquirer.rs b/flexidag/dag/src/reachability/inquirer.rs new file mode 100644 index 0000000000..8717efe10a --- /dev/null +++ b/flexidag/dag/src/reachability/inquirer.rs @@ -0,0 +1,357 @@ +use super::{tree::*, *}; +use crate::consensusdb::schemadb::{ReachabilityStore, ReachabilityStoreReader}; +use crate::process_key_already_error; +use crate::types::{interval::Interval, perf}; +use starcoin_crypto::{HashValue as Hash, HashValue}; + +/// Init the reachability store to match the state required by the algorithmic layer. +/// The function first checks the store for possibly being initialized already. +pub fn init(store: &mut (impl ReachabilityStore + ?Sized), origin: HashValue) -> Result<()> { + init_with_params(store, origin, Interval::maximal()) +} + +pub fn init_for_test( + store: &mut (impl ReachabilityStore + ?Sized), + origin: HashValue, + capacity: Interval, +) -> Result<()> { + init_with_params(store, origin, capacity) +} + +pub(super) fn init_with_params( + store: &mut (impl ReachabilityStore + ?Sized), + origin: Hash, + capacity: Interval, +) -> Result<()> { + if store.has(origin)? { + return Ok(()); + } + store.init(origin, capacity)?; + Ok(()) +} + +type HashIterator<'a> = &'a mut dyn Iterator; + +/// Add a block to the DAG reachability data structures and persist using the provided `store`. +pub fn add_block( + store: &mut (impl ReachabilityStore + ?Sized), + new_block: Hash, + selected_parent: Hash, + mergeset_iterator: HashIterator, +) -> Result<()> { + add_block_with_params( + store, + new_block, + selected_parent, + mergeset_iterator, + None, + None, + ) +} + +fn add_block_with_params( + store: &mut (impl ReachabilityStore + ?Sized), + new_block: Hash, + selected_parent: Hash, + mergeset_iterator: HashIterator, + reindex_depth: Option, + reindex_slack: Option, +) -> Result<()> { + add_tree_block( + store, + new_block, + selected_parent, + reindex_depth.unwrap_or(perf::DEFAULT_REINDEX_DEPTH), + reindex_slack.unwrap_or(perf::DEFAULT_REINDEX_SLACK), + )?; + add_dag_block(store, new_block, mergeset_iterator)?; + Ok(()) +} + +fn add_dag_block( + store: &mut (impl ReachabilityStore + ?Sized), + new_block: Hash, + mergeset_iterator: HashIterator, +) -> Result<()> { + // Update the future covering set for blocks in the mergeset + for merged_block in mergeset_iterator { + insert_to_future_covering_set(store, merged_block, new_block)?; + } + Ok(()) +} + +fn insert_to_future_covering_set( + store: &mut (impl ReachabilityStore + ?Sized), + merged_block: Hash, + new_block: Hash, +) -> Result<()> { + match binary_search_descendant( + store, + store.get_future_covering_set(merged_block)?.as_slice(), + new_block, + )? { + // We expect the query to not succeed, and to only return the correct insertion index. + // The existences of a `future covering item` (`FCI`) which is a chain ancestor of `new_block` + // contradicts `merged_block ∈ mergeset(new_block)`. Similarly, the existence of an FCI + // which `new_block` is a chain ancestor of, contradicts processing order. + SearchOutput::Found(_, _) => Err(ReachabilityError::DataInconsistency), + SearchOutput::NotFound(i) => { + process_key_already_error(store.insert_future_covering_item( + merged_block, + new_block, + i, + ))?; + Ok(()) + } + } +} + +/// Hint to the reachability algorithm that `hint` is a candidate to become +/// the `virtual selected parent` (`VSP`). This might affect internal reachability heuristics such +/// as moving the reindex point. The consensus runtime is expected to call this function +/// for a new header selected tip which is `header only` / `pending UTXO verification`, or for a completely resolved `VSP`. +pub fn hint_virtual_selected_parent( + store: &mut (impl ReachabilityStore + ?Sized), + hint: Hash, +) -> Result<()> { + try_advancing_reindex_root( + store, + hint, + perf::DEFAULT_REINDEX_DEPTH, + perf::DEFAULT_REINDEX_SLACK, + ) +} + +/// Checks if the `this` block is a strict chain ancestor of the `queried` block (aka `this ∈ chain(queried)`). +/// Note that this results in `false` if `this == queried` +pub fn is_strict_chain_ancestor_of( + store: &(impl ReachabilityStoreReader + ?Sized), + this: Hash, + queried: Hash, +) -> Result { + Ok(store + .get_interval(this)? + .strictly_contains(store.get_interval(queried)?)) +} + +/// Checks if `this` block is a chain ancestor of `queried` block (aka `this ∈ chain(queried) ∪ {queried}`). +/// Note that we use the graph theory convention here which defines that a block is also an ancestor of itself. +pub fn is_chain_ancestor_of( + store: &(impl ReachabilityStoreReader + ?Sized), + this: Hash, + queried: Hash, +) -> Result { + Ok(store + .get_interval(this)? + .contains(store.get_interval(queried)?)) +} + +/// Returns true if `this` is a DAG ancestor of `queried` (aka `queried ∈ future(this) ∪ {this}`). +/// Note: this method will return true if `this == queried`. +/// The complexity of this method is O(log(|future_covering_set(this)|)) +pub fn is_dag_ancestor_of( + store: &(impl ReachabilityStoreReader + ?Sized), + this: Hash, + queried: Hash, +) -> Result { + // First, check if `this` is a chain ancestor of queried + if is_chain_ancestor_of(store, this, queried)? { + return Ok(true); + } + // Otherwise, use previously registered future blocks to complete the + // DAG reachability test + match binary_search_descendant( + store, + store.get_future_covering_set(this)?.as_slice(), + queried, + )? { + SearchOutput::Found(_, _) => Ok(true), + SearchOutput::NotFound(_) => Ok(false), + } +} + +/// Finds the child of `ancestor` which is also a chain ancestor of `descendant`. +pub fn get_next_chain_ancestor( + store: &(impl ReachabilityStoreReader + ?Sized), + descendant: Hash, + ancestor: Hash, +) -> Result { + if descendant == ancestor { + // The next ancestor does not exist + return Err(ReachabilityError::BadQuery); + } + if !is_strict_chain_ancestor_of(store, ancestor, descendant)? { + // `ancestor` isn't actually a chain ancestor of `descendant`, so by def + // we cannot find the next ancestor as well + return Err(ReachabilityError::BadQuery); + } + + get_next_chain_ancestor_unchecked(store, descendant, ancestor) +} + +/// Note: it is important to keep the unchecked version for internal module use, +/// since in some scenarios during reindexing `descendant` might have a modified +/// interval which was not propagated yet. +pub(super) fn get_next_chain_ancestor_unchecked( + store: &(impl ReachabilityStoreReader + ?Sized), + descendant: Hash, + ancestor: Hash, +) -> Result { + match binary_search_descendant(store, store.get_children(ancestor)?.as_slice(), descendant)? { + SearchOutput::Found(hash, _) => Ok(hash), + SearchOutput::NotFound(_) => Err(ReachabilityError::BadQuery), + } +} + +enum SearchOutput { + NotFound(usize), // `usize` is the position to insert at + Found(Hash, usize), +} + +fn binary_search_descendant( + store: &(impl ReachabilityStoreReader + ?Sized), + ordered_hashes: &[Hash], + descendant: Hash, +) -> Result { + if cfg!(debug_assertions) { + // This is a linearly expensive assertion, keep it debug only + assert_hashes_ordered(store, ordered_hashes); + } + + // `Interval::end` represents the unique number allocated to this block + let point = store.get_interval(descendant)?.end; + + // We use an `unwrap` here since otherwise we need to implement `binary_search` + // ourselves, which is not worth the effort given that this would be an unrecoverable + // error anyhow + match ordered_hashes.binary_search_by_key(&point, |c| store.get_interval(*c).unwrap().start) { + Ok(i) => Ok(SearchOutput::Found(ordered_hashes[i], i)), + Err(i) => { + // `i` is where `point` was expected (i.e., point < ordered_hashes[i].interval.start), + // so we expect `ordered_hashes[i - 1].interval` to be the only candidate to contain `point` + if i > 0 + && is_chain_ancestor_of( + store, + ordered_hashes[i.checked_sub(1).unwrap()], + descendant, + )? + { + Ok(SearchOutput::Found( + ordered_hashes[i.checked_sub(1).unwrap()], + i.checked_sub(1).unwrap(), + )) + } else { + Ok(SearchOutput::NotFound(i)) + } + } + } +} + +fn assert_hashes_ordered(store: &(impl ReachabilityStoreReader + ?Sized), ordered_hashes: &[Hash]) { + let intervals: Vec = ordered_hashes + .iter() + .cloned() + .map(|c| store.get_interval(c).unwrap()) + .collect(); + debug_assert!(intervals + .as_slice() + .windows(2) + .all(|w| w[0].end < w[1].start)) +} + +#[cfg(test)] +mod tests { + use super::{super::tests::*, *}; + use crate::consensusdb::schemadb::MemoryReachabilityStore; + use starcoin_types::blockhash::ORIGIN; + + #[test] + fn test_add_tree_blocks() { + // Arrange + let mut store = MemoryReachabilityStore::new(); + // Act + let root: Hash = 1.into(); + TreeBuilder::new(&mut store) + .init_with_params(root, Interval::new(1, 15)) + .add_block(2.into(), root) + .add_block(3.into(), 2.into()) + .add_block(4.into(), 2.into()) + .add_block(5.into(), 3.into()) + .add_block(6.into(), 5.into()) + .add_block(7.into(), 1.into()) + .add_block(8.into(), 6.into()) + .add_block(9.into(), 6.into()) + .add_block(10.into(), 6.into()) + .add_block(11.into(), 6.into()); + // Assert + store.validate_intervals(root).unwrap(); + } + + #[test] + fn test_add_early_blocks() { + // Arrange + let mut store = MemoryReachabilityStore::new(); + + // Act + let root: Hash = Hash::from_u64(1); + let mut builder = TreeBuilder::new_with_params(&mut store, 2, 5); + builder.init_with_params(root, Interval::maximal()); + for i in 2u64..100 { + builder.add_block(Hash::from_u64(i), Hash::from_u64(i / 2)); + } + + // Should trigger an earlier than reindex root allocation + builder.add_block(Hash::from_u64(100), Hash::from_u64(2)); + store.validate_intervals(root).unwrap(); + } + + #[test] + fn test_add_dag_blocks() { + // Arrange + let mut store = MemoryReachabilityStore::new(); + let origin_hash = Hash::new(ORIGIN); + // Act + DagBuilder::new(&mut store) + .init(origin_hash) + .add_block(DagBlock::new(1.into(), vec![origin_hash])) + .add_block(DagBlock::new(2.into(), vec![1.into()])) + .add_block(DagBlock::new(3.into(), vec![1.into()])) + .add_block(DagBlock::new(4.into(), vec![2.into(), 3.into()])) + .add_block(DagBlock::new(5.into(), vec![4.into()])) + .add_block(DagBlock::new(6.into(), vec![1.into()])) + .add_block(DagBlock::new(7.into(), vec![5.into(), 6.into()])) + .add_block(DagBlock::new(8.into(), vec![1.into()])) + .add_block(DagBlock::new(9.into(), vec![1.into()])) + .add_block(DagBlock::new(10.into(), vec![7.into(), 8.into(), 9.into()])) + .add_block(DagBlock::new(11.into(), vec![1.into()])) + .add_block(DagBlock::new(12.into(), vec![11.into(), 10.into()])); + + // Assert intervals + store.validate_intervals(origin_hash).unwrap(); + + // Assert genesis + for i in 2u64..=12 { + assert!(store.in_past_of(1, i)); + } + + // Assert some futures + assert!(store.in_past_of(2, 4)); + assert!(store.in_past_of(2, 5)); + assert!(store.in_past_of(2, 7)); + assert!(store.in_past_of(5, 10)); + assert!(store.in_past_of(6, 10)); + assert!(store.in_past_of(10, 12)); + assert!(store.in_past_of(11, 12)); + + // Assert some anticones + assert!(store.are_anticone(2, 3)); + assert!(store.are_anticone(2, 6)); + assert!(store.are_anticone(3, 6)); + assert!(store.are_anticone(5, 6)); + assert!(store.are_anticone(3, 8)); + assert!(store.are_anticone(11, 2)); + assert!(store.are_anticone(11, 4)); + assert!(store.are_anticone(11, 6)); + assert!(store.are_anticone(11, 9)); + } +} diff --git a/flexidag/dag/src/reachability/mod.rs b/flexidag/dag/src/reachability/mod.rs new file mode 100644 index 0000000000..5635f50052 --- /dev/null +++ b/flexidag/dag/src/reachability/mod.rs @@ -0,0 +1,53 @@ +mod extensions; +pub mod inquirer; +pub mod reachability_service; +mod reindex; +pub mod relations_service; + +#[cfg(test)] +mod tests; +mod tree; + +use crate::consensusdb::prelude::StoreError; +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum ReachabilityError { + #[error("data store error")] + StoreError(#[from] StoreError), + + #[error("data overflow error")] + DataOverflow(String), + + #[error("data inconsistency error")] + DataInconsistency, + + #[error("query is inconsistent")] + BadQuery, + + #[error("key not found: {0}")] + KeyNotFound(String), +} + +impl ReachabilityError { + pub fn is_key_not_found(&self) -> bool { + matches!(self, ReachabilityError::StoreError(e) if matches!(e, StoreError::KeyNotFound(_))) + } +} + +pub type Result = std::result::Result; + +pub trait ReachabilityResultExtensions { + /// Unwraps the error into `None` if the internal error is `StoreError::KeyNotFound` or panics otherwise + fn unwrap_option(self) -> Option; +} + +impl ReachabilityResultExtensions for Result { + fn unwrap_option(self) -> Option { + match self { + Ok(value) => Some(value), + Err(err) if err.is_key_not_found() => None, + Err(err) => panic!("Unexpected reachability error: {err:?}"), + } + } +} diff --git a/flexidag/dag/src/reachability/reachability_service.rs b/flexidag/dag/src/reachability/reachability_service.rs new file mode 100644 index 0000000000..4fc86705f8 --- /dev/null +++ b/flexidag/dag/src/reachability/reachability_service.rs @@ -0,0 +1,337 @@ +use super::{inquirer, Result}; +use crate::consensusdb::schemadb::ReachabilityStoreReader; +use parking_lot::RwLock; +use starcoin_crypto::{HashValue as Hash, HashValue}; +use starcoin_types::blockhash; +use std::{ops::Deref, sync::Arc}; + +pub trait ReachabilityService { + fn is_chain_ancestor_of(&self, this: Hash, queried: Hash) -> bool; + fn is_dag_ancestor_of_result(&self, this: Hash, queried: Hash) -> Result; + fn is_dag_ancestor_of(&self, this: Hash, queried: Hash) -> bool; + fn is_dag_ancestor_of_any(&self, this: Hash, queried: &mut impl Iterator) -> bool; + fn is_any_dag_ancestor(&self, list: &mut impl Iterator, queried: Hash) -> bool; + fn is_any_dag_ancestor_result( + &self, + list: &mut impl Iterator, + queried: Hash, + ) -> Result; + fn is_dag_ancestor_of_any_result( + &self, + this: Hash, + queried: &mut impl Iterator, + ) -> Result; + fn get_next_chain_ancestor(&self, descendant: Hash, ancestor: Hash) -> Hash; +} + +/// Multi-threaded reachability service imp +#[derive(Clone)] +pub struct MTReachabilityService { + store: Arc>, +} + +impl MTReachabilityService { + pub fn new(store: Arc>) -> Self { + Self { store } + } +} + +impl ReachabilityService for MTReachabilityService { + fn is_chain_ancestor_of(&self, this: Hash, queried: Hash) -> bool { + let read_guard = self.store.read(); + inquirer::is_chain_ancestor_of(read_guard.deref(), this, queried).unwrap() + } + + fn is_dag_ancestor_of_result(&self, this: Hash, queried: Hash) -> Result { + let read_guard = self.store.read(); + inquirer::is_dag_ancestor_of(read_guard.deref(), this, queried) + } + + fn is_dag_ancestor_of(&self, this: Hash, queried: Hash) -> bool { + let read_guard = self.store.read(); + inquirer::is_dag_ancestor_of(read_guard.deref(), this, queried).unwrap() + } + + fn is_any_dag_ancestor(&self, list: &mut impl Iterator, queried: Hash) -> bool { + let read_guard = self.store.read(); + list.any(|hash| inquirer::is_dag_ancestor_of(read_guard.deref(), hash, queried).unwrap()) + } + + fn is_any_dag_ancestor_result( + &self, + list: &mut impl Iterator, + queried: Hash, + ) -> Result { + let read_guard = self.store.read(); + for hash in list { + if inquirer::is_dag_ancestor_of(read_guard.deref(), hash, queried)? { + return Ok(true); + } + } + Ok(false) + } + + fn is_dag_ancestor_of_any(&self, this: Hash, queried: &mut impl Iterator) -> bool { + let read_guard = self.store.read(); + queried.any(|hash| inquirer::is_dag_ancestor_of(read_guard.deref(), this, hash).unwrap()) + } + + fn is_dag_ancestor_of_any_result( + &self, + this: Hash, + queried: &mut impl Iterator, + ) -> Result { + let read_guard = self.store.read(); + queried.try_fold(false, |acc, descendant| { + if acc { + Ok(true) + } else { + inquirer::is_dag_ancestor_of(read_guard.deref(), this, descendant) + .map(|is_ancestor| acc || is_ancestor) + } + }) + } + + fn get_next_chain_ancestor(&self, descendant: Hash, ancestor: Hash) -> Hash { + let read_guard = self.store.read(); + inquirer::get_next_chain_ancestor(read_guard.deref(), descendant, ancestor).unwrap() + } +} + +impl MTReachabilityService { + /// Returns a forward iterator walking up the chain-selection tree from `from_ancestor` + /// to `to_descendant`, where `to_descendant` is included if `inclusive` is set to true. + /// + /// To skip `from_ancestor` simply apply `skip(1)`. + /// + /// The caller is expected to verify that `from_ancestor` is indeed a chain ancestor of + /// `to_descendant`, otherwise the function will panic. + pub fn forward_chain_iterator( + &self, + from_ancestor: Hash, + to_descendant: Hash, + inclusive: bool, + ) -> impl Iterator { + ForwardChainIterator::new(self.store.clone(), from_ancestor, to_descendant, inclusive) + } + + /// Returns a backward iterator walking down the selected chain from `from_descendant` + /// to `to_ancestor`, where `to_ancestor` is included if `inclusive` is set to true. + /// + /// To skip `from_descendant` simply apply `skip(1)`. + /// + /// The caller is expected to verify that `to_ancestor` is indeed a chain ancestor of + /// `from_descendant`, otherwise the function will panic. + pub fn backward_chain_iterator( + &self, + from_descendant: Hash, + to_ancestor: Hash, + inclusive: bool, + ) -> impl Iterator { + BackwardChainIterator::new(self.store.clone(), from_descendant, to_ancestor, inclusive) + } + + /// Returns the default chain iterator, walking from `from` backward down the + /// selected chain until `virtual genesis` (aka `blockhash::ORIGIN`; exclusive) + pub fn default_backward_chain_iterator(&self, from: Hash) -> impl Iterator { + BackwardChainIterator::new( + self.store.clone(), + from, + HashValue::new(blockhash::ORIGIN), + false, + ) + } +} + +/// Iterator design: we currently read-lock at each movement of the iterator. +/// Other options are to keep the read guard throughout the iterator lifetime, or +/// a compromise where the lock is released every constant number of items. +struct BackwardChainIterator { + store: Arc>, + current: Option, + ancestor: Hash, + inclusive: bool, +} + +impl BackwardChainIterator { + fn new( + store: Arc>, + from_descendant: Hash, + to_ancestor: Hash, + inclusive: bool, + ) -> Self { + Self { + store, + current: Some(from_descendant), + ancestor: to_ancestor, + inclusive, + } + } +} + +impl Iterator for BackwardChainIterator { + type Item = Hash; + + fn next(&mut self) -> Option { + if let Some(current) = self.current { + if current == self.ancestor { + if self.inclusive { + self.current = None; + Some(current) + } else { + self.current = None; + None + } + } else { + debug_assert_ne!(current, HashValue::new(blockhash::NONE)); + let next = self.store.read().get_parent(current).unwrap(); + self.current = Some(next); + Some(current) + } + } else { + None + } + } +} + +struct ForwardChainIterator { + store: Arc>, + current: Option, + descendant: Hash, + inclusive: bool, +} + +impl ForwardChainIterator { + fn new( + store: Arc>, + from_ancestor: Hash, + to_descendant: Hash, + inclusive: bool, + ) -> Self { + Self { + store, + current: Some(from_ancestor), + descendant: to_descendant, + inclusive, + } + } +} + +impl Iterator for ForwardChainIterator { + type Item = Hash; + + fn next(&mut self) -> Option { + if let Some(current) = self.current { + if current == self.descendant { + if self.inclusive { + self.current = None; + Some(current) + } else { + self.current = None; + None + } + } else { + let next = inquirer::get_next_chain_ancestor( + self.store.read().deref(), + self.descendant, + current, + ) + .unwrap(); + self.current = Some(next); + Some(current) + } + } else { + None + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::consensusdb::schemadb::MemoryReachabilityStore; + use crate::reachability::tests::TreeBuilder; + use crate::types::interval::Interval; + + #[test] + fn test_forward_iterator() { + // Arrange + let mut store = MemoryReachabilityStore::new(); + + // Act + let root: Hash = 1.into(); + TreeBuilder::new(&mut store) + .init_with_params(root, Interval::new(1, 15)) + .add_block(2.into(), root) + .add_block(3.into(), 2.into()) + .add_block(4.into(), 2.into()) + .add_block(5.into(), 3.into()) + .add_block(6.into(), 5.into()) + .add_block(7.into(), 1.into()) + .add_block(8.into(), 6.into()) + .add_block(9.into(), 6.into()) + .add_block(10.into(), 6.into()) + .add_block(11.into(), 6.into()); + + let service = MTReachabilityService::new(Arc::new(RwLock::new(store))); + + // Exclusive + let iter = service.forward_chain_iterator(2.into(), 10.into(), false); + + // Assert + let expected_hashes = [2u64, 3, 5, 6].map(Hash::from); + assert!(expected_hashes.iter().cloned().eq(iter)); + + // Inclusive + let iter = service.forward_chain_iterator(2.into(), 10.into(), true); + + // Assert + let expected_hashes = [2u64, 3, 5, 6, 10].map(Hash::from); + assert!(expected_hashes.iter().cloned().eq(iter)); + + // Compare backward to reversed forward + let forward_iter = service.forward_chain_iterator(2.into(), 10.into(), true); + let backward_iter: Vec = service + .backward_chain_iterator(10.into(), 2.into(), true) + .collect(); + assert!(forward_iter.eq(backward_iter.iter().cloned().rev())) + } + + #[test] + fn test_iterator_boundaries() { + // Arrange & Act + let mut store = MemoryReachabilityStore::new(); + let root: Hash = 1.into(); + TreeBuilder::new(&mut store) + .init_with_params(root, Interval::new(1, 5)) + .add_block(2.into(), root); + + let service = MTReachabilityService::new(Arc::new(RwLock::new(store))); + + // Asserts + assert!([1u64, 2] + .map(Hash::from) + .iter() + .cloned() + .eq(service.forward_chain_iterator(1.into(), 2.into(), true))); + assert!([1u64] + .map(Hash::from) + .iter() + .cloned() + .eq(service.forward_chain_iterator(1.into(), 2.into(), false))); + assert!([2u64, 1] + .map(Hash::from) + .iter() + .cloned() + .eq(service.backward_chain_iterator(2.into(), root, true))); + assert!([2u64] + .map(Hash::from) + .iter() + .cloned() + .eq(service.backward_chain_iterator(2.into(), root, false))); + assert!(std::iter::once(root).eq(service.backward_chain_iterator(root, root, true))); + assert!(std::iter::empty::().eq(service.backward_chain_iterator(root, root, false))); + assert!(std::iter::once(root).eq(service.forward_chain_iterator(root, root, true))); + assert!(std::iter::empty::().eq(service.forward_chain_iterator(root, root, false))); + } +} diff --git a/flexidag/dag/src/reachability/reindex.rs b/flexidag/dag/src/reachability/reindex.rs new file mode 100644 index 0000000000..9bfa098807 --- /dev/null +++ b/flexidag/dag/src/reachability/reindex.rs @@ -0,0 +1,688 @@ +use super::{ + extensions::ReachabilityStoreIntervalExtensions, inquirer::get_next_chain_ancestor_unchecked, *, +}; +use crate::consensusdb::schemadb::ReachabilityStore; +use crate::types::interval::Interval; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::{BlockHashExtensions, BlockHashMap}; +use std::collections::VecDeque; + +/// A struct used during reindex operations. It represents a temporary context +/// for caching subtree information during the *current* reindex operation only +pub(super) struct ReindexOperationContext<'a, T: ReachabilityStore + ?Sized> { + store: &'a mut T, + subtree_sizes: BlockHashMap, // Cache for subtree sizes computed during this operation + _depth: u64, + slack: u64, +} + +impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { + pub(super) fn new(store: &'a mut T, depth: u64, slack: u64) -> Self { + Self { + store, + subtree_sizes: BlockHashMap::new(), + _depth: depth, + slack, + } + } + + fn get_subtree_size(&self, block: Hash) -> Result { + Ok(*self.subtree_sizes.get(&block).ok_or_else(|| ReachabilityError::KeyNotFound(block.to_string()))?) + } + + /// Traverses the reachability subtree that's defined by the new child + /// block and reallocates reachability interval space + /// such that another reindexing is unlikely to occur shortly + /// thereafter. It does this by traversing down the reachability + /// tree until it finds a block with an interval size that's greater than + /// its subtree size. See `propagate_interval` for further details. + pub(super) fn reindex_intervals(&mut self, new_child: Hash, reindex_root: Hash) -> Result<()> { + let mut current = new_child; + + // Search for the first ancestor with sufficient interval space + loop { + let current_interval = self.store.get_interval(current)?; + self.count_subtrees(current)?; + + // `current` has sufficient space, break and propagate + if current_interval.size() >= self.get_subtree_size(current)? { + break; + } + + let parent = self.store.get_parent(current)?; + + if parent.is_none() { + // If we ended up here it means that there are more + // than 2^64 blocks, which shouldn't ever happen. + return Err(ReachabilityError::DataOverflow( + "missing tree + parent during reindexing. Theoretically, this + should only ever happen if there are more + than 2^64 blocks in the DAG." + .to_string(), + )); + } + + if current == reindex_root { + // Reindex root is expected to hold enough capacity as long as there are less + // than ~2^52 blocks in the DAG, which should never happen in our lifetimes + // even if block rate per second is above 100. The calculation follows from the allocation of + // 2^12 (which equals 2^64/2^52) for slack per chain block below the reindex root. + return Err(ReachabilityError::DataOverflow(format!( + "unexpected behavior: reindex root {reindex_root} is out of capacity during reindexing. + Theoretically, this should only ever happen if there are more than ~2^52 blocks in the DAG." + ))); + } + + if inquirer::is_strict_chain_ancestor_of(self.store, parent, reindex_root)? { + // In this case parent is guaranteed to have sufficient interval space, + // however we avoid reindexing the entire subtree above parent + // (which includes root and thus majority of blocks mined since) + // and use slacks along the chain up forward from parent to reindex root. + // Notes: + // 1. we set `required_allocation` = subtree size of current in order to double the + // current interval capacity + // 2. it might be the case that current is the `new_child` itself + return self.reindex_intervals_earlier_than_root( + current, + reindex_root, + parent, + self.get_subtree_size(current)?, + ); + } + + current = parent + } + + self.propagate_interval(current) + } + + /// + /// Core (BFS) algorithms used during reindexing (see `count_subtrees` and `propagate_interval` below) + /// + /// + /// count_subtrees counts the size of each subtree under this block, + /// and populates self.subtree_sizes with the results. + /// It is equivalent to the following recursive implementation: + /// + /// fn count_subtrees(&mut self, block: Hash) -> Result { + /// let mut subtree_size = 0u64; + /// for child in self.store.get_children(block)?.iter().cloned() { + /// subtree_size += self.count_subtrees(child)?; + /// } + /// self.subtree_sizes.insert(block, subtree_size + 1); + /// Ok(subtree_size + 1) + /// } + /// + /// However, we are expecting (linearly) deep trees, and so a + /// recursive stack-based approach is inefficient and will hit + /// recursion limits. Instead, the same logic was implemented + /// using a (queue-based) BFS method. At a high level, the + /// algorithm uses BFS for reaching all leaves and pushes + /// intermediate updates from leaves via parent chains until all + /// size information is gathered at the root of the operation + /// (i.e. at block). + fn count_subtrees(&mut self, block: Hash) -> Result<()> { + if self.subtree_sizes.contains_key(&block) { + return Ok(()); + } + + let mut queue = VecDeque::::from([block]); + let mut counts = BlockHashMap::::new(); + + while let Some(mut current) = queue.pop_front() { + let children = self.store.get_children(current)?; + if children.is_empty() { + // We reached a leaf + self.subtree_sizes.insert(current, 1); + } else if !self.subtree_sizes.contains_key(¤t) { + // We haven't yet calculated the subtree size of + // the current block. Add all its children to the + // queue + queue.extend(children.iter()); + continue; + } + + // We reached a leaf or a pre-calculated subtree. + // Push information up + while current != block { + current = self.store.get_parent(current)?; + + let count = counts.entry(current).or_insert(0); + let children = self.store.get_children(current)?; + + *count = (*count).checked_add(1).unwrap(); + if *count < children.len() as u64 { + // Not all subtrees of the current block are ready + break; + } + + // All children of `current` have calculated their subtree size. + // Sum them all together and add 1 to get the sub tree size of + // `current`. + let subtree_sum: u64 = children.iter().map(|c| self.get_subtree_size(*c)).collect::>>()?.into_iter().sum(); + // let subtree_sum: u64 = children.iter().map(|c| self.get_subtree_size(*c)).collect()?.sum(); + self.subtree_sizes + .insert(current, subtree_sum.checked_add(1).unwrap()); + } + } + + Ok(()) + } + + /// Propagates a new interval using a BFS traversal. + /// Subtree intervals are recursively allocated according to subtree sizes and + /// the allocation rule in `Interval::split_exponential`. + fn propagate_interval(&mut self, block: Hash) -> Result<()> { + // Make sure subtrees are counted before propagating + self.count_subtrees(block)?; + + let mut queue = VecDeque::::from([block]); + while let Some(current) = queue.pop_front() { + let children = self.store.get_children(current)?; + if !children.is_empty() { + let sizes = children.iter().map(|c| Ok(self.get_subtree_size(*c)?)).collect::>>()?; + let interval = self.store.interval_children_capacity(current)?; + let intervals = interval.split_exponential(&sizes); + for (c, ci) in children.iter().copied().zip(intervals) { + self.store.set_interval(c, ci)?; + } + queue.extend(children.iter()); + } + } + Ok(()) + } + + /// This method implements the reindex algorithm for the case where the + /// new child node is not in reindex root's subtree. The function is expected to allocate + /// `required_allocation` to be added to interval of `allocation_block`. `common_ancestor` is + /// expected to be a direct parent of `allocation_block` and an ancestor of current `reindex_root`. + fn reindex_intervals_earlier_than_root( + &mut self, + allocation_block: Hash, + reindex_root: Hash, + common_ancestor: Hash, + required_allocation: u64, + ) -> Result<()> { + // The chosen child is: (i) child of `common_ancestor`; (ii) an + // ancestor of `reindex_root` or `reindex_root` itself + let chosen_child = + get_next_chain_ancestor_unchecked(self.store, reindex_root, common_ancestor)?; + let block_interval = self.store.get_interval(allocation_block)?; + let chosen_interval = self.store.get_interval(chosen_child)?; + + if block_interval.start < chosen_interval.start { + // `allocation_block` is in the subtree before the chosen child + self.reclaim_interval_before( + allocation_block, + common_ancestor, + chosen_child, + reindex_root, + required_allocation, + ) + } else { + // `allocation_block` is in the subtree after the chosen child + self.reclaim_interval_after( + allocation_block, + common_ancestor, + chosen_child, + reindex_root, + required_allocation, + ) + } + } + + fn reclaim_interval_before( + &mut self, + allocation_block: Hash, + common_ancestor: Hash, + chosen_child: Hash, + reindex_root: Hash, + required_allocation: u64, + ) -> Result<()> { + let mut slack_sum = 0u64; + let mut path_len = 0u64; + let mut path_slack_alloc = 0u64; + + let mut current = chosen_child; + // Walk up the chain from common ancestor's chosen child towards reindex root + loop { + if current == reindex_root { + // Reached reindex root. In this case, since we reached (the unlimited) root, + // we also re-allocate new slack for the chain we just traversed + let offset = required_allocation + .checked_add(self.slack.checked_mul(path_len).unwrap()) + .unwrap() + .checked_sub(slack_sum) + .unwrap(); + self.apply_interval_op_and_propagate(current, offset, Interval::increase_start)?; + self.offset_siblings_before(allocation_block, current, offset)?; + + // Set the slack for each chain block to be reserved below during the chain walk-down + path_slack_alloc = self.slack; + break; + } + + let slack_before_current = self.store.interval_remaining_before(current)?.size(); + slack_sum = slack_sum.checked_add(slack_before_current).unwrap(); + + if slack_sum >= required_allocation { + // Set offset to be just enough to satisfy required allocation + let offset = slack_before_current + .checked_sub(slack_sum.checked_sub(required_allocation).unwrap()) + .unwrap(); + self.apply_interval_op(current, offset, Interval::increase_start)?; + self.offset_siblings_before(allocation_block, current, offset)?; + + break; + } + + current = get_next_chain_ancestor_unchecked(self.store, reindex_root, current)?; + path_len = path_len.checked_add(1).unwrap(); + } + + // Go back down the reachability tree towards the common ancestor. + // On every hop we reindex the reachability subtree before the + // current block with an interval that is smaller. + // This is to make room for the required allocation. + loop { + current = self.store.get_parent(current)?; + if current == common_ancestor { + break; + } + + let slack_before_current = self.store.interval_remaining_before(current)?.size(); + let offset = slack_before_current.checked_sub(path_slack_alloc).unwrap(); + self.apply_interval_op(current, offset, Interval::increase_start)?; + self.offset_siblings_before(allocation_block, current, offset)?; + } + + Ok(()) + } + + fn reclaim_interval_after( + &mut self, + allocation_block: Hash, + common_ancestor: Hash, + chosen_child: Hash, + reindex_root: Hash, + required_allocation: u64, + ) -> Result<()> { + let mut slack_sum = 0u64; + let mut path_len = 0u64; + let mut path_slack_alloc = 0u64; + + let mut current = chosen_child; + // Walk up the chain from common ancestor's chosen child towards reindex root + loop { + if current == reindex_root { + // Reached reindex root. In this case, since we reached (the unlimited) root, + // we also re-allocate new slack for the chain we just traversed + let offset = required_allocation + .checked_add(self.slack.checked_mul(path_len).unwrap()) + .unwrap() + .checked_sub(slack_sum) + .unwrap(); + self.apply_interval_op_and_propagate(current, offset, Interval::decrease_end)?; + self.offset_siblings_after(allocation_block, current, offset)?; + + // Set the slack for each chain block to be reserved below during the chain walk-down + path_slack_alloc = self.slack; + break; + } + + let slack_after_current = self.store.interval_remaining_after(current)?.size(); + slack_sum = slack_sum.checked_add(slack_after_current).unwrap(); + + if slack_sum >= required_allocation { + // Set offset to be just enough to satisfy required allocation + let offset = slack_after_current + .checked_sub(slack_sum.checked_sub(required_allocation).unwrap()) + .unwrap(); + self.apply_interval_op(current, offset, Interval::decrease_end)?; + self.offset_siblings_after(allocation_block, current, offset)?; + + break; + } + + current = get_next_chain_ancestor_unchecked(self.store, reindex_root, current)?; + path_len = path_len.checked_add(1).unwrap(); + } + + // Go back down the reachability tree towards the common ancestor. + // On every hop we reindex the reachability subtree before the + // current block with an interval that is smaller. + // This is to make room for the required allocation. + loop { + current = self.store.get_parent(current)?; + if current == common_ancestor { + break; + } + + let slack_after_current = self.store.interval_remaining_after(current)?.size(); + let offset = slack_after_current.checked_sub(path_slack_alloc).unwrap(); + self.apply_interval_op(current, offset, Interval::decrease_end)?; + self.offset_siblings_after(allocation_block, current, offset)?; + } + + Ok(()) + } + + fn offset_siblings_before( + &mut self, + allocation_block: Hash, + current: Hash, + offset: u64, + ) -> Result<()> { + let parent = self.store.get_parent(current)?; + let children = self.store.get_children(parent)?; + + let (siblings_before, _) = split_children(&children, current)?; + for sibling in siblings_before.iter().cloned().rev() { + if sibling == allocation_block { + // We reached our final destination, allocate `offset` to `allocation_block` by increasing end and break + self.apply_interval_op_and_propagate( + allocation_block, + offset, + Interval::increase_end, + )?; + break; + } + // For non-`allocation_block` siblings offset the interval upwards in order to create space + self.apply_interval_op_and_propagate(sibling, offset, Interval::increase)?; + } + + Ok(()) + } + + fn offset_siblings_after( + &mut self, + allocation_block: Hash, + current: Hash, + offset: u64, + ) -> Result<()> { + let parent = self.store.get_parent(current)?; + let children = self.store.get_children(parent)?; + + let (_, siblings_after) = split_children(&children, current)?; + for sibling in siblings_after.iter().cloned() { + if sibling == allocation_block { + // We reached our final destination, allocate `offset` to `allocation_block` by decreasing only start and break + self.apply_interval_op_and_propagate( + allocation_block, + offset, + Interval::decrease_start, + )?; + break; + } + // For siblings before `allocation_block` offset the interval downwards to create space + self.apply_interval_op_and_propagate(sibling, offset, Interval::decrease)?; + } + + Ok(()) + } + + fn apply_interval_op( + &mut self, + block: Hash, + offset: u64, + op: fn(&Interval, u64) -> Interval, + ) -> Result<()> { + self.store + .set_interval(block, op(&self.store.get_interval(block)?, offset))?; + Ok(()) + } + + fn apply_interval_op_and_propagate( + &mut self, + block: Hash, + offset: u64, + op: fn(&Interval, u64) -> Interval, + ) -> Result<()> { + self.store + .set_interval(block, op(&self.store.get_interval(block)?, offset))?; + self.propagate_interval(block)?; + Ok(()) + } + + /// A method for handling reindex operations triggered by moving the reindex root + pub(super) fn concentrate_interval( + &mut self, + parent: Hash, + child: Hash, + is_final_reindex_root: bool, + ) -> Result<()> { + let children = self.store.get_children(parent)?; + + // Split the `children` of `parent` to siblings before `child` and siblings after `child` + let (siblings_before, siblings_after) = split_children(&children, child)?; + + let siblings_before_subtrees_sum: u64 = + self.tighten_intervals_before(parent, siblings_before)?; + let siblings_after_subtrees_sum: u64 = + self.tighten_intervals_after(parent, siblings_after)?; + + self.expand_interval_to_chosen( + parent, + child, + siblings_before_subtrees_sum, + siblings_after_subtrees_sum, + is_final_reindex_root, + )?; + + Ok(()) + } + + pub(super) fn tighten_intervals_before( + &mut self, + parent: Hash, + children_before: &[Hash], + ) -> Result { + let sizes = children_before + .iter() + .cloned() + .map(|block| { + self.count_subtrees(block)?; + Ok(self.get_subtree_size(block)?) + }) + .collect::>>()?; + let sum = sizes.iter().sum(); + + let interval = self.store.get_interval(parent)?; + let interval_before = Interval::new( + interval.start.checked_add(self.slack).unwrap(), + interval + .start + .checked_add(self.slack) + .unwrap() + .checked_add(sum) + .unwrap() + .checked_sub(1) + .unwrap(), + ); + + for (c, ci) in children_before + .iter() + .cloned() + .zip(interval_before.split_exact(sizes.as_slice())) + { + self.store.set_interval(c, ci)?; + self.propagate_interval(c)?; + } + + Ok(sum) + } + + pub(super) fn tighten_intervals_after( + &mut self, + parent: Hash, + children_after: &[Hash], + ) -> Result { + let sizes = children_after + .iter() + .cloned() + .map(|block| { + self.count_subtrees(block)?; + Ok(self.get_subtree_size(block)?) + }) + .collect::>>()?; + let sum = sizes.iter().sum(); + + let interval = self.store.get_interval(parent)?; + let interval_after = Interval::new( + interval + .end + .checked_sub(self.slack) + .unwrap() + .checked_sub(sum) + .unwrap(), + interval + .end + .checked_sub(self.slack) + .unwrap() + .checked_sub(1) + .unwrap(), + ); + + for (c, ci) in children_after + .iter() + .cloned() + .zip(interval_after.split_exact(sizes.as_slice())) + { + self.store.set_interval(c, ci)?; + self.propagate_interval(c)?; + } + + Ok(sum) + } + + pub(super) fn expand_interval_to_chosen( + &mut self, + parent: Hash, + child: Hash, + siblings_before_subtrees_sum: u64, + siblings_after_subtrees_sum: u64, + is_final_reindex_root: bool, + ) -> Result<()> { + let interval = self.store.get_interval(parent)?; + let allocation = Interval::new( + interval + .start + .checked_add(siblings_before_subtrees_sum) + .unwrap() + .checked_add(self.slack) + .unwrap(), + interval + .end + .checked_sub(siblings_after_subtrees_sum) + .unwrap() + .checked_sub(self.slack) + .unwrap() + .checked_sub(1) + .unwrap(), + ); + let current = self.store.get_interval(child)?; + + // Propagate interval only if the chosen `child` is the final reindex root AND + // the new interval doesn't contain the previous one + if is_final_reindex_root && !allocation.contains(current) { + /* + We deallocate slack on both sides as an optimization. Were we to + assign the fully allocated interval, the next time the reindex root moves we + would need to propagate intervals again. However when we do allocate slack, + next time this method is called (next time the reindex root moves), `allocation` is likely to contain `current`. + Note that below following the propagation we reassign the full `allocation` to `child`. + */ + let narrowed = Interval::new( + allocation.start.checked_add(self.slack).unwrap(), + allocation.end.checked_sub(self.slack).unwrap(), + ); + self.store.set_interval(child, narrowed)?; + self.propagate_interval(child)?; + } + + self.store.set_interval(child, allocation)?; + Ok(()) + } +} + +/// Splits `children` into two slices: the blocks that are before `pivot` and the blocks that are after. +fn split_children(children: &std::sync::Arc>, pivot: Hash) -> Result<(&[Hash], &[Hash])> { + if let Some(index) = children.iter().cloned().position(|c| c == pivot) { + Ok(( + &children[..index], + &children[index.checked_add(1).unwrap()..], + )) + } else { + Err(ReachabilityError::DataInconsistency) + } +} + +#[cfg(test)] +mod tests { + use super::{super::tests::*, *}; + use crate::consensusdb::schemadb::{MemoryReachabilityStore, ReachabilityStoreReader}; + use starcoin_types::blockhash; + + #[test] + fn test_count_subtrees() { + let mut store = MemoryReachabilityStore::new(); + + // Arrange + let root: Hash = 1.into(); + StoreBuilder::new(&mut store) + .add_block(root, Hash::new(blockhash::NONE)) + .add_block(2.into(), root) + .add_block(3.into(), 2.into()) + .add_block(4.into(), 2.into()) + .add_block(5.into(), 3.into()) + .add_block(6.into(), 5.into()) + .add_block(7.into(), 1.into()) + .add_block(8.into(), 6.into()); + + // Act + let mut ctx = ReindexOperationContext::new(&mut store, 10, 16); + ctx.count_subtrees(root).unwrap(); + + // Assert + let expected = [ + (1u64, 8u64), + (2, 6), + (3, 4), + (4, 1), + (5, 3), + (6, 2), + (7, 1), + (8, 1), + ] + .iter() + .cloned() + .map(|(h, c)| (Hash::from(h), c)) + .collect::>(); + + assert_eq!(expected, ctx.subtree_sizes); + + // Act + ctx.store.set_interval(root, Interval::new(1, 8)).unwrap(); + ctx.propagate_interval(root).unwrap(); + + // Assert intervals manually + let expected_intervals = [ + (1u64, (1u64, 8u64)), + (2, (1, 6)), + (3, (1, 4)), + (4, (5, 5)), + (5, (1, 3)), + (6, (1, 2)), + (7, (7, 7)), + (8, (1, 1)), + ]; + let actual_intervals = (1u64..=8) + .map(|i| (i, ctx.store.get_interval(i.into()).unwrap().into())) + .collect::>(); + assert_eq!(actual_intervals, expected_intervals); + + // Assert intervals follow the general rules + store.validate_intervals(root).unwrap(); + } +} diff --git a/flexidag/dag/src/reachability/relations_service.rs b/flexidag/dag/src/reachability/relations_service.rs new file mode 100644 index 0000000000..755cfb49be --- /dev/null +++ b/flexidag/dag/src/reachability/relations_service.rs @@ -0,0 +1,34 @@ +use crate::consensusdb::{prelude::StoreError, schemadb::RelationsStoreReader}; +use parking_lot::RwLock; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::BlockHashes; +use std::sync::Arc; +/// Multi-threaded block-relations service imp +#[derive(Clone)] +pub struct MTRelationsService { + store: Arc>>, + level: usize, +} + +impl MTRelationsService { + pub fn new(store: Arc>>, level: u8) -> Self { + Self { + store, + level: level as usize, + } + } +} + +impl RelationsStoreReader for MTRelationsService { + fn get_parents(&self, hash: Hash) -> Result { + self.store.read()[self.level].get_parents(hash) + } + + fn get_children(&self, hash: Hash) -> Result { + self.store.read()[self.level].get_children(hash) + } + + fn has(&self, hash: Hash) -> Result { + self.store.read()[self.level].has(hash) + } +} diff --git a/flexidag/dag/src/reachability/tests.rs b/flexidag/dag/src/reachability/tests.rs new file mode 100644 index 0000000000..8627928297 --- /dev/null +++ b/flexidag/dag/src/reachability/tests.rs @@ -0,0 +1,268 @@ +//! +//! Test utils for reachability +//! +use super::{inquirer::*, tree::*}; +use crate::consensusdb::{ + prelude::StoreError, + schemadb::{ReachabilityStore, ReachabilityStoreReader}, +}; +use crate::types::interval::Interval; +use crate::types::perf; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::{BlockHashExtensions, BlockHashMap, BlockHashSet}; +use std::collections::VecDeque; +use thiserror::Error; + +/// A struct with fluent API to streamline reachability store building +pub struct StoreBuilder<'a, T: ReachabilityStore + ?Sized> { + store: &'a mut T, +} + +impl<'a, T: ReachabilityStore + ?Sized> StoreBuilder<'a, T> { + pub fn new(store: &'a mut T) -> Self { + Self { store } + } + + pub fn add_block(&mut self, hash: Hash, parent: Hash) -> &mut Self { + let parent_height = if !parent.is_none() { + self.store.append_child(parent, hash).unwrap() + } else { + 0 + }; + self.store + .insert(hash, parent, Interval::empty(), parent_height + 1) + .unwrap(); + self + } +} + +/// A struct with fluent API to streamline tree building +pub struct TreeBuilder<'a, T: ReachabilityStore + ?Sized> { + store: &'a mut T, + reindex_depth: u64, + reindex_slack: u64, +} + +impl<'a, T: ReachabilityStore + ?Sized> TreeBuilder<'a, T> { + pub fn new(store: &'a mut T) -> Self { + Self { + store, + reindex_depth: perf::DEFAULT_REINDEX_DEPTH, + reindex_slack: perf::DEFAULT_REINDEX_SLACK, + } + } + + pub fn new_with_params(store: &'a mut T, reindex_depth: u64, reindex_slack: u64) -> Self { + Self { + store, + reindex_depth, + reindex_slack, + } + } + + #[allow(dead_code)] + pub fn init(&mut self, origin: Hash) -> &mut Self { + init(self.store, origin).unwrap(); + self + } + + pub fn init_with_params(&mut self, origin: Hash, capacity: Interval) -> &mut Self { + init_with_params(self.store, origin, capacity).unwrap(); + self + } + + pub fn add_block(&mut self, hash: Hash, parent: Hash) -> &mut Self { + add_tree_block( + self.store, + hash, + parent, + self.reindex_depth, + self.reindex_slack, + ) + .unwrap(); + try_advancing_reindex_root(self.store, hash, self.reindex_depth, self.reindex_slack) + .unwrap(); + self + } + + #[allow(dead_code)] + pub fn store(&self) -> &&'a mut T { + &self.store + } +} + +#[derive(Clone)] +pub struct DagBlock { + pub hash: Hash, + pub parents: Vec, +} + +impl DagBlock { + pub fn new(hash: Hash, parents: Vec) -> Self { + Self { hash, parents } + } +} + +/// A struct with fluent API to streamline DAG building +pub struct DagBuilder<'a, T: ReachabilityStore + ?Sized> { + store: &'a mut T, + map: BlockHashMap, +} + +impl<'a, T: ReachabilityStore + ?Sized> DagBuilder<'a, T> { + pub fn new(store: &'a mut T) -> Self { + Self { + store, + map: BlockHashMap::new(), + } + } + + pub fn init(&mut self, origin: Hash) -> &mut Self { + init(self.store, origin).unwrap(); + self + } + + pub fn add_block(&mut self, block: DagBlock) -> &mut Self { + // Select by height (longest chain) just for the sake of internal isolated tests + let selected_parent = block + .parents + .iter() + .cloned() + .max_by_key(|p| self.store.get_height(*p).unwrap()) + .unwrap(); + let mergeset = self.mergeset(&block, selected_parent); + add_block( + self.store, + block.hash, + selected_parent, + &mut mergeset.iter().cloned(), + ) + .unwrap(); + hint_virtual_selected_parent(self.store, block.hash).unwrap(); + self.map.insert(block.hash, block); + self + } + + fn mergeset(&self, block: &DagBlock, selected_parent: Hash) -> Vec { + let mut queue: VecDeque = block + .parents + .iter() + .copied() + .filter(|p| *p != selected_parent) + .collect(); + let mut mergeset: BlockHashSet = queue.iter().copied().collect(); + let mut past = BlockHashSet::new(); + + while let Some(current) = queue.pop_front() { + for parent in self.map[¤t].parents.iter() { + if mergeset.contains(parent) || past.contains(parent) { + continue; + } + + if is_dag_ancestor_of(self.store, *parent, selected_parent).unwrap() { + past.insert(*parent); + continue; + } + + mergeset.insert(*parent); + queue.push_back(*parent); + } + } + mergeset.into_iter().collect() + } + + #[allow(dead_code)] + pub fn store(&self) -> &&'a mut T { + &self.store + } +} + +#[derive(Error, Debug)] +pub enum TestError { + #[error("data store error")] + StoreError(#[from] StoreError), + + #[error("empty interval")] + EmptyInterval(Hash, Interval), + + #[error("sibling intervals are expected to be consecutive")] + NonConsecutiveSiblingIntervals(Interval, Interval), + + #[error("child interval out of parent bounds")] + IntervalOutOfParentBounds { + parent: Hash, + child: Hash, + parent_interval: Interval, + child_interval: Interval, + }, +} + +pub trait StoreValidationExtensions { + /// Checks if `block` is in the past of `other` (creates hashes from the u64 numbers) + fn in_past_of(&self, block: u64, other: u64) -> bool; + + /// Checks if `block` and `other` are in the anticone of each other + /// (creates hashes from the u64 numbers) + fn are_anticone(&self, block: u64, other: u64) -> bool; + + /// Validates that all tree intervals match the expected interval relations + fn validate_intervals(&self, root: Hash) -> std::result::Result<(), TestError>; +} + +impl StoreValidationExtensions for T { + fn in_past_of(&self, block: u64, other: u64) -> bool { + if block == other { + return false; + } + let res = is_dag_ancestor_of(self, block.into(), other.into()).unwrap(); + if res { + // Assert that the `future` relation is indeed asymmetric + assert!(!is_dag_ancestor_of(self, other.into(), block.into()).unwrap()) + } + res + } + + fn are_anticone(&self, block: u64, other: u64) -> bool { + !is_dag_ancestor_of(self, block.into(), other.into()).unwrap() + && !is_dag_ancestor_of(self, other.into(), block.into()).unwrap() + } + + fn validate_intervals(&self, root: Hash) -> std::result::Result<(), TestError> { + let mut queue = VecDeque::::from([root]); + while let Some(parent) = queue.pop_front() { + let children = self.get_children(parent)?; + queue.extend(children.iter()); + + let parent_interval = self.get_interval(parent)?; + if parent_interval.is_empty() { + return Err(TestError::EmptyInterval(parent, parent_interval)); + } + + // Verify parent-child strict relation + for child in children.iter().cloned() { + let child_interval = self.get_interval(child)?; + if !parent_interval.strictly_contains(child_interval) { + return Err(TestError::IntervalOutOfParentBounds { + parent, + child, + parent_interval, + child_interval, + }); + } + } + + // Iterate over consecutive siblings + for siblings in children.windows(2) { + let sibling_interval = self.get_interval(siblings[0])?; + let current_interval = self.get_interval(siblings[1])?; + if sibling_interval.end + 1 != current_interval.start { + return Err(TestError::NonConsecutiveSiblingIntervals( + sibling_interval, + current_interval, + )); + } + } + } + Ok(()) + } +} diff --git a/flexidag/dag/src/reachability/tree.rs b/flexidag/dag/src/reachability/tree.rs new file mode 100644 index 0000000000..734e81f713 --- /dev/null +++ b/flexidag/dag/src/reachability/tree.rs @@ -0,0 +1,162 @@ +//! +//! Tree-related functions internal to the module +//! +use super::{ + extensions::ReachabilityStoreIntervalExtensions, inquirer::*, reindex::ReindexOperationContext, + *, +}; +use crate::{consensusdb::schemadb::ReachabilityStore, process_key_already_error}; +use starcoin_crypto::HashValue as Hash; + +/// Adds `new_block` as a child of `parent` in the tree structure. If this block +/// has no remaining interval to allocate, a reindexing is triggered. When a reindexing +/// is triggered, the reindex root point is used within the reindex algorithm's logic +pub fn add_tree_block( + store: &mut (impl ReachabilityStore + ?Sized), + new_block: Hash, + parent: Hash, + reindex_depth: u64, + reindex_slack: u64, +) -> Result<()> { + // Get the remaining interval capacity + let remaining = store.interval_remaining_after(parent)?; + // Append the new child to `parent.children` + let parent_height = store.append_child(parent, new_block)?; + + if remaining.is_empty() { + // Init with the empty interval. + // Note: internal logic relies on interval being this specific interval + // which comes exactly at the end of current capacity + process_key_already_error(store.insert( + new_block, + parent, + remaining, + parent_height.checked_add(1).unwrap(), + ))?; + + // Start a reindex operation (TODO: add timing) + let reindex_root = store.get_reindex_root()?; + let mut ctx = ReindexOperationContext::new(store, reindex_depth, reindex_slack); + ctx.reindex_intervals(new_block, reindex_root)?; + } else { + let allocated = remaining.split_half().0; + process_key_already_error(store.insert( + new_block, + parent, + allocated, + parent_height.checked_add(1).unwrap(), + ))?; + }; + Ok(()) +} + +/// Finds the most recent tree ancestor common to both `block` and the given `reindex root`. +/// Note that we assume that almost always the chain between the reindex root and the common +/// ancestor is longer than the chain between block and the common ancestor, hence we iterate +/// from `block`. +pub fn find_common_tree_ancestor( + store: &(impl ReachabilityStore + ?Sized), + block: Hash, + reindex_root: Hash, +) -> Result { + let mut current = block; + loop { + if is_chain_ancestor_of(store, current, reindex_root)? { + return Ok(current); + } + current = store.get_parent(current)?; + } +} + +/// Finds a possible new reindex root, based on the `current` reindex root and the selected tip `hint` +pub fn find_next_reindex_root( + store: &(impl ReachabilityStore + ?Sized), + current: Hash, + hint: Hash, + reindex_depth: u64, + reindex_slack: u64, +) -> Result<(Hash, Hash)> { + let mut ancestor = current; + let mut next = current; + + let hint_height = store.get_height(hint)?; + + // Test if current root is ancestor of selected tip (`hint`) - if not, this is a reorg case + if !is_chain_ancestor_of(store, current, hint)? { + let current_height = store.get_height(current)?; + + // We have reindex root out of (hint) selected tip chain, however we switch chains only after a sufficient + // threshold of `reindex_slack` diff in order to address possible alternating reorg attacks. + // The `reindex_slack` constant is used as an heuristic large enough on the one hand, but + // one which will not harm performance on the other hand - given the available slack at the chain split point. + // + // Note: In some cases the height of the (hint) selected tip can be lower than the current reindex root height. + // If that's the case we keep the reindex root unchanged. + if hint_height < current_height + || hint_height.checked_sub(current_height).unwrap() < reindex_slack + { + return Ok((current, current)); + } + + let common = find_common_tree_ancestor(store, hint, current)?; + ancestor = common; + next = common; + } + + // Iterate from ancestor towards the selected tip (`hint`) until passing the + // `reindex_window` threshold, for finding the new reindex root + loop { + let child = get_next_chain_ancestor_unchecked(store, hint, next)?; + let child_height = store.get_height(child)?; + + if hint_height < child_height { + return Err(ReachabilityError::DataInconsistency); + } + if hint_height.checked_sub(child_height).unwrap() < reindex_depth { + break; + } + next = child; + } + + Ok((ancestor, next)) +} + +/// Attempts to advance or move the current reindex root according to the +/// provided `virtual selected parent` (`VSP`) hint. +/// It is important for the reindex root point to follow the consensus-agreed chain +/// since this way it can benefit from chain-robustness which is implied by the security +/// of the ordering protocol. That is, it enjoys from the fact that all future blocks are +/// expected to elect the root subtree (by converging to the agreement to have it on the +/// selected chain). See also the reachability algorithms overview (TODO) +pub fn try_advancing_reindex_root( + store: &mut (impl ReachabilityStore + ?Sized), + hint: Hash, + reindex_depth: u64, + reindex_slack: u64, +) -> Result<()> { + // Get current root from the store + let current = store.get_reindex_root()?; + + // Find the possible new root + let (mut ancestor, next) = + find_next_reindex_root(store, current, hint, reindex_depth, reindex_slack)?; + + // No update to root, return + if current == next { + return Ok(()); + } + + // if ancestor == next { + // trace!("next reindex root is an ancestor of current one, skipping concentration.") + // } + while ancestor != next { + let child = get_next_chain_ancestor_unchecked(store, next, ancestor)?; + let mut ctx = ReindexOperationContext::new(store, reindex_depth, reindex_slack); + ctx.concentrate_interval(ancestor, child, child == next)?; + ancestor = child; + } + + // Update reindex root in the data store + store.set_reindex_root(next)?; + Ok(()) +} diff --git a/flexidag/dag/src/types/ghostdata.rs b/flexidag/dag/src/types/ghostdata.rs new file mode 100644 index 0000000000..c680172148 --- /dev/null +++ b/flexidag/dag/src/types/ghostdata.rs @@ -0,0 +1,147 @@ +use super::trusted::ExternalGhostdagData; +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::{BlockHashMap, BlockHashes, BlueWorkType, HashKTypeMap, KType}; +use std::sync::Arc; + +#[derive(Clone, Serialize, Deserialize, Default, Debug)] +pub struct GhostdagData { + pub blue_score: u64, + pub blue_work: BlueWorkType, + pub selected_parent: Hash, + pub mergeset_blues: BlockHashes, + pub mergeset_reds: BlockHashes, + pub blues_anticone_sizes: HashKTypeMap, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize, Copy)] +pub struct CompactGhostdagData { + pub blue_score: u64, + pub blue_work: BlueWorkType, + pub selected_parent: Hash, +} + +impl From for GhostdagData { + fn from(value: ExternalGhostdagData) -> Self { + Self { + blue_score: value.blue_score, + blue_work: value.blue_work, + selected_parent: value.selected_parent, + mergeset_blues: Arc::new(value.mergeset_blues), + mergeset_reds: Arc::new(value.mergeset_reds), + blues_anticone_sizes: Arc::new(value.blues_anticone_sizes), + } + } +} + +impl From<&GhostdagData> for ExternalGhostdagData { + fn from(value: &GhostdagData) -> Self { + Self { + blue_score: value.blue_score, + blue_work: value.blue_work, + selected_parent: value.selected_parent, + mergeset_blues: (*value.mergeset_blues).clone(), + mergeset_reds: (*value.mergeset_reds).clone(), + blues_anticone_sizes: (*value.blues_anticone_sizes).clone(), + } + } +} + +impl GhostdagData { + pub fn new( + blue_score: u64, + blue_work: BlueWorkType, + selected_parent: Hash, + mergeset_blues: BlockHashes, + mergeset_reds: BlockHashes, + blues_anticone_sizes: HashKTypeMap, + ) -> Self { + Self { + blue_score, + blue_work, + selected_parent, + mergeset_blues, + mergeset_reds, + blues_anticone_sizes, + } + } + + pub fn new_with_selected_parent(selected_parent: Hash, k: KType) -> Self { + let mut mergeset_blues: Vec = Vec::with_capacity(k.checked_add(1).unwrap() as usize); + let mut blues_anticone_sizes: BlockHashMap = BlockHashMap::with_capacity(k as usize); + mergeset_blues.push(selected_parent); + blues_anticone_sizes.insert(selected_parent, 0); + + Self { + blue_score: Default::default(), + blue_work: Default::default(), + selected_parent, + mergeset_blues: BlockHashes::new(mergeset_blues), + mergeset_reds: Default::default(), + blues_anticone_sizes: HashKTypeMap::new(blues_anticone_sizes), + } + } + + pub fn mergeset_size(&self) -> usize { + self.mergeset_blues + .len() + .checked_add(self.mergeset_reds.len()) + .unwrap() + } + + /// Returns an iterator to the mergeset with no specified order (excluding the selected parent) + pub fn unordered_mergeset_without_selected_parent(&self) -> impl Iterator + '_ { + self.mergeset_blues + .iter() + .skip(1) // Skip the selected parent + .cloned() + .chain(self.mergeset_reds.iter().cloned()) + } + + /// Returns an iterator to the mergeset with no specified order (including the selected parent) + pub fn unordered_mergeset(&self) -> impl Iterator + '_ { + self.mergeset_blues + .iter() + .cloned() + .chain(self.mergeset_reds.iter().cloned()) + } + + pub fn to_compact(&self) -> CompactGhostdagData { + CompactGhostdagData { + blue_score: self.blue_score, + blue_work: self.blue_work, + selected_parent: self.selected_parent, + } + } + + pub fn add_blue( + &mut self, + block: Hash, + blue_anticone_size: KType, + block_blues_anticone_sizes: &BlockHashMap, + ) { + // Add the new blue block to mergeset blues + BlockHashes::make_mut(&mut self.mergeset_blues).push(block); + + // Get a mut ref to internal anticone size map + let blues_anticone_sizes = HashKTypeMap::make_mut(&mut self.blues_anticone_sizes); + + // Insert the new blue block with its blue anticone size to the map + blues_anticone_sizes.insert(block, blue_anticone_size); + + // Insert/update map entries for blocks affected by this insertion + for (blue, size) in block_blues_anticone_sizes { + blues_anticone_sizes.insert(*blue, size.checked_add(1).unwrap()); + } + } + + pub fn add_red(&mut self, block: Hash) { + // Add the new red block to mergeset reds + BlockHashes::make_mut(&mut self.mergeset_reds).push(block); + } + + pub fn finalize_score_and_work(&mut self, blue_score: u64, blue_work: BlueWorkType) { + self.blue_score = blue_score; + self.blue_work = blue_work; + } +} diff --git a/flexidag/dag/src/types/interval.rs b/flexidag/dag/src/types/interval.rs new file mode 100644 index 0000000000..0b5cc4f6e5 --- /dev/null +++ b/flexidag/dag/src/types/interval.rs @@ -0,0 +1,377 @@ +use serde::{Deserialize, Serialize}; +use std::fmt::{Display, Formatter}; + +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] +pub struct Interval { + pub start: u64, + pub end: u64, +} + +impl Display for Interval { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "[{}, {}]", self.start, self.end) + } +} + +impl From for (u64, u64) { + fn from(val: Interval) -> Self { + (val.start, val.end) + } +} + +impl Interval { + pub fn new(start: u64, end: u64) -> Self { + debug_assert!(start > 0 && end < u64::MAX && end >= start.checked_sub(1).unwrap()); // TODO: make sure this is actually debug-only + Interval { start, end } + } + + pub fn empty() -> Self { + Self::new(1, 0) + } + + /// Returns the maximally allowed `u64` interval. We leave a margin of 1 from + /// both `u64` bounds (`0` and `u64::MAX`) in order to support the reduction of any + /// legal interval to an empty one by setting `end = start - 1` or `start = end + 1` + pub fn maximal() -> Self { + Self::new(1, u64::MAX.saturating_sub(1)) + } + + pub fn size(&self) -> u64 { + // Empty intervals are indicated by `self.end == self.start - 1`, so + // we avoid the overflow by first adding 1 + // Note: this function will panic if `self.end < self.start - 1` due to overflow + (self.end.checked_add(1).unwrap()) + .checked_sub(self.start) + .unwrap() + } + + pub fn is_empty(&self) -> bool { + self.size() == 0 + } + + pub fn increase(&self, offset: u64) -> Self { + Self::new( + self.start.checked_add(offset).unwrap(), + self.end.checked_add(offset).unwrap(), + ) + } + + pub fn decrease(&self, offset: u64) -> Self { + Self::new( + self.start.checked_sub(offset).unwrap(), + self.end.checked_sub(offset).unwrap(), + ) + } + + pub fn increase_start(&self, offset: u64) -> Self { + Self::new(self.start.checked_add(offset).unwrap(), self.end) + } + + pub fn decrease_start(&self, offset: u64) -> Self { + Self::new(self.start.checked_sub(offset).unwrap(), self.end) + } + + pub fn increase_end(&self, offset: u64) -> Self { + Self::new(self.start, self.end.checked_add(offset).unwrap()) + } + + pub fn decrease_end(&self, offset: u64) -> Self { + Self::new(self.start, self.end.checked_sub(offset).unwrap()) + } + + pub fn split_half(&self) -> (Self, Self) { + self.split_fraction(0.5) + } + + /// Splits this interval to two parts such that their + /// union is equal to the original interval and the first (left) part + /// contains the given fraction of the original interval's size. + /// Note: if the split results in fractional parts, this method rounds + /// the first part up and the last part down. + fn split_fraction(&self, fraction: f32) -> (Self, Self) { + let left_size = f32::ceil(self.size() as f32 * fraction) as u64; + + ( + Self::new( + self.start, + self.start + .checked_add(left_size) + .unwrap() + .checked_sub(1) + .unwrap(), + ), + Self::new(self.start.checked_add(left_size).unwrap(), self.end), + ) + } + + /// Splits this interval to exactly |sizes| parts where + /// |part_i| = sizes[i]. This method expects sum(sizes) to be exactly + /// equal to the interval's size. + pub fn split_exact(&self, sizes: &[u64]) -> Vec { + assert_eq!( + sizes.iter().sum::(), + self.size(), + "sum of sizes must be equal to the interval's size" + ); + let mut start = self.start; + sizes + .iter() + .map(|size| { + let interval = Self::new( + start, + start.checked_add(*size).unwrap().checked_sub(1).unwrap(), + ); + start = start.checked_add(*size).unwrap(); + interval + }) + .collect() + } + + /// Splits this interval to |sizes| parts + /// by the allocation rule described below. This method expects sum(sizes) + /// to be smaller or equal to the interval's size. Every part_i is + /// allocated at least sizes[i] capacity. The remaining budget is + /// split by an exponentially biased rule described below. + /// + /// This rule follows the GHOSTDAG protocol behavior where the child + /// with the largest subtree is expected to dominate the competition + /// for new blocks and thus grow the most. However, we may need to + /// add slack for non-largest subtrees in order to make CPU reindexing + /// attacks unworthy. + pub fn split_exponential(&self, sizes: &[u64]) -> Vec { + let interval_size = self.size(); + let sizes_sum = sizes.iter().sum::(); + assert!( + interval_size >= sizes_sum, + "interval's size must be greater than or equal to sum of sizes" + ); + assert!(sizes_sum > 0, "cannot split to 0 parts"); + if interval_size == sizes_sum { + return self.split_exact(sizes); + } + + // + // Add a fractional bias to every size in the provided sizes + // + + let mut remaining_bias = interval_size.checked_sub(sizes_sum).unwrap(); + let total_bias = remaining_bias as f64; + + let mut biased_sizes = Vec::::with_capacity(sizes.len()); + let exp_fractions = exponential_fractions(sizes); + for (i, fraction) in exp_fractions.iter().enumerate() { + let bias: u64 = if i == exp_fractions.len().checked_sub(1).unwrap() { + remaining_bias + } else { + remaining_bias.min(f64::round(total_bias * fraction) as u64) + }; + biased_sizes.push(sizes[i].checked_add(bias).unwrap()); + remaining_bias = remaining_bias.checked_sub(bias).unwrap(); + } + + self.split_exact(biased_sizes.as_slice()) + } + + pub fn contains(&self, other: Self) -> bool { + self.start <= other.start && other.end <= self.end + } + + pub fn strictly_contains(&self, other: Self) -> bool { + self.start <= other.start && other.end < self.end + } +} + +/// Returns a fraction for each size in sizes +/// as follows: +/// fraction[i] = 2^size[i] / sum_j(2^size[j]) +/// In the code below the above equation is divided by 2^max(size) +/// to avoid exploding numbers. Note that in 1 / 2^(max(size)-size[i]) +/// we divide 1 by potentially a very large number, which will +/// result in loss of float precision. This is not a problem - all +/// numbers close to 0 bear effectively the same weight. +fn exponential_fractions(sizes: &[u64]) -> Vec { + let max_size = sizes.iter().copied().max().unwrap_or_default(); + + let mut fractions = sizes + .iter() + .map(|s| 1f64 / 2f64.powf((max_size - s) as f64)) + .collect::>(); + + let fractions_sum = fractions.iter().sum::(); + for item in &mut fractions { + *item /= fractions_sum; + } + + fractions +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_interval_basics() { + let interval = Interval::new(101, 164); + let increased = interval.increase(10); + let decreased = increased.decrease(5); + // println!("{}", interval.clone()); + + assert_eq!(interval.start + 10, increased.start); + assert_eq!(interval.end + 10, increased.end); + + assert_eq!(interval.start + 5, decreased.start); + assert_eq!(interval.end + 5, decreased.end); + + assert_eq!(interval.size(), 64); + assert_eq!(Interval::maximal().size(), u64::MAX - 1); + assert_eq!(Interval::empty().size(), 0); + + let (empty_left, empty_right) = Interval::empty().split_half(); + assert_eq!(empty_left.size(), 0); + assert_eq!(empty_right.size(), 0); + + assert_eq!(interval.start + 10, interval.increase_start(10).start); + assert_eq!(interval.start - 10, interval.decrease_start(10).start); + assert_eq!(interval.end + 10, interval.increase_end(10).end); + assert_eq!(interval.end - 10, interval.decrease_end(10).end); + + assert_eq!(interval.end, interval.increase_start(10).end); + assert_eq!(interval.end, interval.decrease_start(10).end); + assert_eq!(interval.start, interval.increase_end(10).start); + assert_eq!(interval.start, interval.decrease_end(10).start); + + // println!("{:?}", Interval::maximal()); + // println!("{:?}", Interval::maximal().split_half()); + } + + #[test] + fn test_split_exact() { + let sizes = vec![5u64, 10, 15, 20]; + let intervals = Interval::new(1, 50).split_exact(sizes.as_slice()); + assert_eq!(intervals.len(), sizes.len()); + for i in 0..sizes.len() { + assert_eq!(intervals[i].size(), sizes[i]) + } + } + + #[test] + fn test_exponential_fractions() { + let mut exp_fractions = exponential_fractions(vec![2, 4, 8, 16].as_slice()); + // println!("{:?}", exp_fractions); + for i in 0..exp_fractions.len() - 1 { + assert!(exp_fractions[i + 1] > exp_fractions[i]); + } + + exp_fractions = exponential_fractions(vec![].as_slice()); + assert_eq!(exp_fractions.len(), 0); + + exp_fractions = exponential_fractions(vec![0, 0].as_slice()); + assert_eq!(exp_fractions.len(), 2); + assert_eq!(0.5f64, exp_fractions[0]); + assert_eq!(exp_fractions[0], exp_fractions[1]); + } + + #[test] + fn test_contains() { + assert!(Interval::new(1, 100).contains(Interval::new(1, 100))); + assert!(Interval::new(1, 100).contains(Interval::new(1, 99))); + assert!(Interval::new(1, 100).contains(Interval::new(2, 100))); + assert!(Interval::new(1, 100).contains(Interval::new(2, 99))); + assert!(!Interval::new(1, 100).contains(Interval::new(50, 150))); + assert!(!Interval::new(1, 100).contains(Interval::new(150, 160))); + } + + #[test] + fn test_split_exponential() { + struct Test { + interval: Interval, + sizes: Vec, + expected: Vec, + } + + let tests = [ + Test { + interval: Interval::new(1, 100), + sizes: vec![100u64], + expected: vec![Interval::new(1, 100)], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![50u64, 50], + expected: vec![Interval::new(1, 50), Interval::new(51, 100)], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![10u64, 20, 30, 40], + expected: vec![ + Interval::new(1, 10), + Interval::new(11, 30), + Interval::new(31, 60), + Interval::new(61, 100), + ], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![25u64, 25], + expected: vec![Interval::new(1, 50), Interval::new(51, 100)], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![1u64, 1], + expected: vec![Interval::new(1, 50), Interval::new(51, 100)], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![33u64, 33, 33], + expected: vec![ + Interval::new(1, 33), + Interval::new(34, 66), + Interval::new(67, 100), + ], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![10u64, 15, 25], + expected: vec![ + Interval::new(1, 10), + Interval::new(11, 25), + Interval::new(26, 100), + ], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![25u64, 15, 10], + expected: vec![ + Interval::new(1, 75), + Interval::new(76, 90), + Interval::new(91, 100), + ], + }, + Test { + interval: Interval::new(1, 10_000), + sizes: vec![10u64, 10, 20], + expected: vec![ + Interval::new(1, 20), + Interval::new(21, 40), + Interval::new(41, 10_000), + ], + }, + Test { + interval: Interval::new(1, 100_000), + sizes: vec![31_000u64, 31_000, 30_001], + expected: vec![ + Interval::new(1, 35_000), + Interval::new(35_001, 69_999), + Interval::new(70_000, 100_000), + ], + }, + ]; + + for test in &tests { + assert_eq!( + test.expected, + test.interval.split_exponential(test.sizes.as_slice()) + ); + } + } +} diff --git a/flexidag/dag/src/types/mod.rs b/flexidag/dag/src/types/mod.rs new file mode 100644 index 0000000000..d3acae1c23 --- /dev/null +++ b/flexidag/dag/src/types/mod.rs @@ -0,0 +1,6 @@ +pub mod ghostdata; +pub mod interval; +pub mod ordering; +pub mod perf; +pub mod reachability; +pub mod trusted; diff --git a/flexidag/dag/src/types/ordering.rs b/flexidag/dag/src/types/ordering.rs new file mode 100644 index 0000000000..a1ed8c2561 --- /dev/null +++ b/flexidag/dag/src/types/ordering.rs @@ -0,0 +1,36 @@ +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::BlueWorkType; +use std::cmp::Ordering; + +#[derive(Eq, Clone, Debug, Serialize, Deserialize)] +pub struct SortableBlock { + pub hash: Hash, + pub blue_work: BlueWorkType, +} + +impl SortableBlock { + pub fn new(hash: Hash, blue_work: BlueWorkType) -> Self { + Self { hash, blue_work } + } +} + +impl PartialEq for SortableBlock { + fn eq(&self, other: &Self) -> bool { + self.hash == other.hash + } +} + +impl PartialOrd for SortableBlock { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for SortableBlock { + fn cmp(&self, other: &Self) -> Ordering { + self.blue_work + .cmp(&other.blue_work) + .then_with(|| self.hash.cmp(&other.hash)) + } +} diff --git a/flexidag/dag/src/types/perf.rs b/flexidag/dag/src/types/perf.rs new file mode 100644 index 0000000000..6da44d4cd7 --- /dev/null +++ b/flexidag/dag/src/types/perf.rs @@ -0,0 +1,51 @@ +//! +//! A module for performance critical constants which depend on consensus parameters. +//! The constants in this module should all be revisited if mainnet consensus parameters change. +//! + +/// The default target depth for reachability reindexes. +pub const DEFAULT_REINDEX_DEPTH: u64 = 100; + +/// The default slack interval used by the reachability +/// algorithm to encounter for blocks out of the selected chain. +pub const DEFAULT_REINDEX_SLACK: u64 = 1 << 12; + +#[derive(Clone, Debug)] +pub struct PerfParams { + // + // Cache sizes + // + /// Preferred cache size for header-related data + pub header_data_cache_size: u64, + + /// Preferred cache size for block-body-related data which + /// is typically orders-of magnitude larger than header data + /// (Note this cannot be set to high due to severe memory consumption) + pub block_data_cache_size: u64, + + /// Preferred cache size for UTXO-related data + pub utxo_set_cache_size: u64, + + /// Preferred cache size for block-window-related data + pub block_window_cache_size: u64, + + // + // Thread-pools + // + /// Defaults to 0 which indicates using system default + /// which is typically the number of logical CPU cores + pub block_processors_num_threads: usize, + + /// Defaults to 0 which indicates using system default + /// which is typically the number of logical CPU cores + pub virtual_processor_num_threads: usize, +} + +pub const PERF_PARAMS: PerfParams = PerfParams { + header_data_cache_size: 10_000, + block_data_cache_size: 200, + utxo_set_cache_size: 10_000, + block_window_cache_size: 2000, + block_processors_num_threads: 0, + virtual_processor_num_threads: 0, +}; diff --git a/flexidag/dag/src/types/reachability.rs b/flexidag/dag/src/types/reachability.rs new file mode 100644 index 0000000000..35dc3979b6 --- /dev/null +++ b/flexidag/dag/src/types/reachability.rs @@ -0,0 +1,26 @@ +use super::interval::Interval; +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::BlockHashes; +use std::sync::Arc; + +#[derive(Clone, Default, Debug, Serialize, Deserialize)] +pub struct ReachabilityData { + pub children: BlockHashes, + pub parent: Hash, + pub interval: Interval, + pub height: u64, + pub future_covering_set: BlockHashes, +} + +impl ReachabilityData { + pub fn new(parent: Hash, interval: Interval, height: u64) -> Self { + Self { + children: Arc::new(vec![]), + parent, + interval, + height, + future_covering_set: Arc::new(vec![]), + } + } +} diff --git a/flexidag/dag/src/types/trusted.rs b/flexidag/dag/src/types/trusted.rs new file mode 100644 index 0000000000..9a4cf37bbd --- /dev/null +++ b/flexidag/dag/src/types/trusted.rs @@ -0,0 +1,26 @@ +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::{BlockHashMap, BlueWorkType, KType}; + +/// Represents semi-trusted externally provided Ghostdag data (by a network peer) +#[derive(Clone, Serialize, Deserialize)] +pub struct ExternalGhostdagData { + pub blue_score: u64, + pub blue_work: BlueWorkType, + pub selected_parent: Hash, + pub mergeset_blues: Vec, + pub mergeset_reds: Vec, + pub blues_anticone_sizes: BlockHashMap, +} + +/// Represents externally provided Ghostdag data associated with a block Hash +pub struct TrustedGhostdagData { + pub hash: Hash, + pub ghostdag: ExternalGhostdagData, +} + +impl TrustedGhostdagData { + pub fn new(hash: Hash, ghostdag: ExternalGhostdagData) -> Self { + Self { hash, ghostdag } + } +} diff --git a/flexidag/dag/tests/tests.rs b/flexidag/dag/tests/tests.rs new file mode 100644 index 0000000000..274092c6f6 --- /dev/null +++ b/flexidag/dag/tests/tests.rs @@ -0,0 +1,678 @@ +#[cfg(test)] +mod tests { + use anyhow::{bail, Ok}; + use starcoin_config::RocksdbConfig; + use starcoin_crypto::HashValue as Hash; + use starcoin_dag::{ + blockdag::BlockDAG, + consensusdb::{ + consenses_state::{DagState, DagStateReader, DagStateStore}, + prelude::{FlexiDagStorage, FlexiDagStorageConfig}, + schemadb::{DbReachabilityStore, ReachabilityStore, ReachabilityStoreReader}, + }, + reachability::{inquirer, ReachabilityError}, + types::{ghostdata, interval::Interval}, + }; + use starcoin_types::{ + block::{set_test_flexidag_fork_height, BlockHeader, BlockHeaderBuilder, BlockNumber}, + blockhash::KType, + }; + use std::{env, fs, vec}; + + fn build_block_dag(k: KType) -> BlockDAG { + let db_path = env::temp_dir().join("smolstc"); + if db_path + .as_path() + .try_exists() + .unwrap_or_else(|_| panic!("Failed to check {db_path:?}")) + { + fs::remove_dir_all(db_path.as_path()).expect("Failed to delete temporary directory"); + } + let config = FlexiDagStorageConfig::create_with_params(1, RocksdbConfig::default()); + let db = FlexiDagStorage::create_from_path(db_path, config) + .expect("Failed to create flexidag storage"); + BlockDAG::new(k, db) + } + + #[test] + fn test_dag_0() { + let mut dag = BlockDAG::create_for_testing().unwrap(); + let genesis = BlockHeader::dag_genesis_random() + .as_builder() + .with_difficulty(0.into()) + .build(); + + let mut parents_hash = vec![genesis.id()]; + dag.init_with_genesis(genesis.clone()).unwrap(); + + for _ in 0..10 { + let header_builder = BlockHeaderBuilder::random(); + let header = header_builder + .with_parents_hash(Some(parents_hash.clone())) + .build(); + parents_hash = vec![header.id()]; + dag.commit(header.to_owned(), genesis.parent_hash()) + .unwrap(); + let ghostdata = dag.ghostdata_by_hash(header.id()).unwrap().unwrap(); + println!("{:?},{:?}", header, ghostdata); + } + } + + #[test] + fn test_dag_1() { + let genesis = BlockHeader::dag_genesis_random() + .as_builder() + .with_difficulty(0.into()) + .build(); + let block1 = BlockHeaderBuilder::random() + .with_difficulty(1.into()) + .with_parents_hash(Some(vec![genesis.id()])) + .build(); + let block2 = BlockHeaderBuilder::random() + .with_difficulty(2.into()) + .with_parents_hash(Some(vec![genesis.id()])) + .build(); + let block3_1 = BlockHeaderBuilder::random() + .with_difficulty(1.into()) + .with_parents_hash(Some(vec![genesis.id()])) + .build(); + let block3 = BlockHeaderBuilder::random() + .with_difficulty(3.into()) + .with_parents_hash(Some(vec![block3_1.id()])) + .build(); + let block4 = BlockHeaderBuilder::random() + .with_difficulty(4.into()) + .with_parents_hash(Some(vec![block1.id(), block2.id()])) + .build(); + let block5 = BlockHeaderBuilder::random() + .with_difficulty(4.into()) + .with_parents_hash(Some(vec![block2.id(), block3.id()])) + .build(); + let block6 = BlockHeaderBuilder::random() + .with_difficulty(5.into()) + .with_parents_hash(Some(vec![block4.id(), block5.id()])) + .build(); + let mut latest_id = block6.id(); + let genesis_id = genesis.id(); + let mut dag = build_block_dag(3); + let expect_selected_parented = vec![block5.id(), block3.id(), block3_1.id(), genesis_id]; + dag.init_with_genesis(genesis.clone()).unwrap(); + + dag.commit(block1, genesis.parent_hash()).unwrap(); + dag.commit(block2, genesis.parent_hash()).unwrap(); + dag.commit(block3_1, genesis.parent_hash()).unwrap(); + dag.commit(block3, genesis.parent_hash()).unwrap(); + dag.commit(block4, genesis.parent_hash()).unwrap(); + dag.commit(block5, genesis.parent_hash()).unwrap(); + dag.commit(block6, genesis.parent_hash()).unwrap(); + let mut count = 0; + while latest_id != genesis_id && count < 4 { + let ghostdata = dag.ghostdata_by_hash(latest_id).unwrap().unwrap(); + latest_id = ghostdata.selected_parent; + assert_eq!(expect_selected_parented[count], latest_id); + count += 1; + } + } + + #[tokio::test] + async fn test_with_spawn() { + use starcoin_types::block::{BlockHeader, BlockHeaderBuilder}; + let genesis = BlockHeader::dag_genesis_random() + .as_builder() + .with_difficulty(0.into()) + .build(); + let block1 = BlockHeaderBuilder::random() + .with_difficulty(1.into()) + .with_parents_hash(Some(vec![genesis.id()])) + .build(); + let block2 = BlockHeaderBuilder::random() + .with_difficulty(2.into()) + .with_parents_hash(Some(vec![genesis.id()])) + .build(); + let mut dag = BlockDAG::create_for_testing().unwrap(); + dag.init_with_genesis(genesis.clone()).unwrap(); + dag.commit(block1.clone(), genesis.parent_hash()).unwrap(); + dag.commit(block2.clone(), genesis.parent_hash()).unwrap(); + let block3 = BlockHeaderBuilder::random() + .with_difficulty(3.into()) + .with_parents_hash(Some(vec![block1.id(), block2.id()])) + .build(); + let mut handles = vec![]; + for _i in 1..100 { + let mut dag_clone = dag.clone(); + let block_clone = block3.clone(); + let origin = genesis.parent_hash(); + let handle = tokio::task::spawn_blocking(move || { + let _ = dag_clone.commit(block_clone, origin); + }); + handles.push(handle); + } + for handle in handles { + handle.await.unwrap(); + } + let mut child = dag.get_children(block1.id()).unwrap(); + assert_eq!(child.pop().unwrap(), block3.id()); + assert_eq!(child.len(), 0); + } + + #[test] + fn test_dag_genesis_fork() { + // initialzie the dag firstly + let mut dag = build_block_dag(3); + + let genesis = BlockHeader::dag_genesis_random() + .as_builder() + .with_difficulty(0.into()) + .build(); + dag.init_with_genesis(genesis.clone()).unwrap(); + + // normally add the dag blocks + let mut parents_hash = vec![genesis.id()]; + for _ in 0..10 { + let header_builder = BlockHeaderBuilder::random(); + let header = header_builder + .with_parents_hash(Some(parents_hash.clone())) + .build(); + parents_hash = vec![header.id()]; + dag.commit(header.to_owned(), genesis.parent_hash()) + .unwrap(); + let _ghostdata = dag.ghostdata_by_hash(header.id()).unwrap().unwrap(); + } + + // fork, produce a new dag gensis + let new_genesis = BlockHeader::dag_genesis_random() + .as_builder() + .with_difficulty(0.into()) + .build(); + dag.init_with_genesis(new_genesis.clone()).unwrap(); + + // record the old dag chain + let mut old_parents_hash = parents_hash.clone(); + // the new dag chain + parents_hash = vec![new_genesis.id()]; + + // add dag blocks in the old dag chain + for _ in 0..10 { + let header_builder = BlockHeaderBuilder::random(); + let header = header_builder + .with_parents_hash(Some(old_parents_hash.clone())) + .build(); + old_parents_hash = vec![header.id()]; + dag.commit(header.to_owned(), genesis.parent_hash()) + .unwrap(); + let ghostdata = dag.ghostdata_by_hash(header.id()).unwrap().unwrap(); + println!("add a old header: {:?}, tips: {:?}", header, ghostdata); + } + + // add dag blocks in the new dag chain + for _ in 0..10 { + let header_builder = BlockHeaderBuilder::random(); + let header = header_builder + .with_parents_hash(Some(parents_hash.clone())) + .build(); + parents_hash = vec![header.id()]; + dag.commit(header.to_owned(), genesis.parent_hash()) + .unwrap(); + let ghostdata = dag.ghostdata_by_hash(header.id()).unwrap().unwrap(); + println!("add a forked header: {:?}, tips: {:?}", header, ghostdata); + } + + let header_builder = BlockHeaderBuilder::random(); + parents_hash.append(&mut old_parents_hash); + let header = header_builder.with_parents_hash(Some(parents_hash)).build(); + // parents_hash = vec![header.id()]; + dag.commit(header.to_owned(), genesis.parent_hash()) + .unwrap(); + let ghostdata = dag.ghostdata_by_hash(header.id()).unwrap().unwrap(); + println!("add a forked header: {:?}, tips: {:?}", header, ghostdata); + } + + #[test] + fn test_dag_tips_store() { + let dag = BlockDAG::create_for_testing().unwrap(); + + let state1 = DagState { + tips: vec![Hash::random()], + }; + let dag_gensis1 = Hash::random(); + dag.storage + .state_store + .insert(dag_gensis1, state1.clone()) + .expect("failed to store the dag state"); + + let state2 = DagState { + tips: vec![Hash::random()], + }; + let dag_gensis2 = Hash::random(); + dag.storage + .state_store + .insert(dag_gensis2, state2.clone()) + .expect("failed to store the dag state"); + + assert_eq!( + dag.storage + .state_store + .get_state(dag_gensis1) + .expect("failed to get the dag state"), + state1 + ); + assert_eq!( + dag.storage + .state_store + .get_state(dag_gensis2) + .expect("failed to get the dag state"), + state2 + ); + } + + // #[test] + // fn test_dag_multiple_commits() { + // // initialzie the dag firstly + // let dag = BlockDAG::create_for_testing().unwrap(); + + // let genesis = BlockHeader::dag_genesis_random() + // .as_builder() + // .with_difficulty(0.into()) + // .build(); + // dag.init_with_genesis(genesis.clone()).unwrap(); + + // // normally add the dag blocks + // let mut headers = vec![]; + // let mut parents_hash = vec![genesis.id()]; + // let mut parent_hash = genesis.id(); + // for _ in 0..100 { + // let header_builder = BlockHeaderBuilder::random(); + // let header = header_builder + // .with_parent_hash(parent_hash) + // .with_parents_hash(Some(parents_hash.clone())) + // .build(); + // parents_hash = vec![header.id()]; + // parent_hash = header.id(); + // headers.push(header.clone()); + // dag.commit(header.to_owned()).unwrap(); + // let ghostdata = dag.ghostdata_by_hash(header.id()).unwrap().unwrap(); + // } + + // for _ in 0..10 { + // for header in &headers { + // let _ = dag.commit(header.clone()); + // let _ = dag.ghostdata_by_hash(header.id()).unwrap().unwrap(); + // } + // } + // } + + #[test] + fn test_dag_multiple_commits() -> anyhow::Result<()> { + set_test_flexidag_fork_height(1); + // initialzie the dag firstly + let mut dag = BlockDAG::create_for_testing().unwrap(); + + let origin = BlockHeaderBuilder::random().with_number(0).build(); + let genesis = BlockHeader::dag_genesis_random_with_parent(origin); + + dag.init_with_genesis(genesis.clone()).unwrap(); + + // normally add the dag blocks + let mut parents_hash = vec![genesis.id()]; + let mut parent_hash = genesis.id(); + for i in 2..100 { + let header_builder = BlockHeaderBuilder::random(); + let header = header_builder + .with_parent_hash(parent_hash) + .with_parents_hash(Some(parents_hash.clone())) + .with_number(i) + .build(); + parents_hash = vec![header.id()]; + parent_hash = header.id(); + dag.commit(header.to_owned(), genesis.parent_hash())?; + if header.number() == 6 { + println!("commit again: {:?}", header); + dag.commit(header.to_owned(), genesis.parent_hash())?; + println!("and again: {:?}", header); + dag.commit(header.to_owned(), genesis.parent_hash())?; + } + let ghostdata = dag.ghostdata(&parents_hash).unwrap(); + println!("add a header: {:?}, tips: {:?}", header, ghostdata); + } + + Ok(()) + } + + #[test] + fn test_reachability_abort_add_block() -> anyhow::Result<()> { + let dag = BlockDAG::create_for_testing().unwrap(); + let mut reachability_store = dag.storage.reachability_store; + + let mut parent = Hash::random(); + let origin = parent; + let mut child = Hash::random(); + inquirer::init(&mut reachability_store, parent)?; + inquirer::add_block( + &mut reachability_store, + child, + parent, + &mut vec![parent].into_iter(), + )?; + + for i in 0..70 { + parent = child; + child = Hash::random(); + + inquirer::add_block( + &mut reachability_store, + child, + parent, + &mut vec![parent].into_iter(), + )?; + if (61..=69).contains(&i) { + for _ in 0..10 { + inquirer::init(&mut reachability_store, origin)?; + let result = inquirer::add_block( + &mut reachability_store, + child, + parent, + &mut vec![parent].into_iter(), + ); + match result { + Result::Ok(_) => (), + Err(ReachabilityError::DataInconsistency) => { + let future_covering_set = + reachability_store.get_future_covering_set(child)?; + println!("future_covering_set = {:?}", future_covering_set); + } + Err(e) => { + println!( + "failed to add a block in reachability store, error = {:?}", + e + ); + bail!("{:?}", e); + } + } + } + } + } + + Ok(()) + } + + #[test] + fn test_reachability_check_ancestor() -> anyhow::Result<()> { + let dag = BlockDAG::create_for_testing().unwrap(); + let mut reachability_store = dag.storage.reachability_store.clone(); + + let mut parent = Hash::random(); + let origin = parent; + let mut child = Hash::random(); + inquirer::init(&mut reachability_store, parent)?; + inquirer::add_block( + &mut reachability_store, + child, + parent, + &mut vec![parent].into_iter(), + )?; + + let mut target = child; + let mut target_parent = parent; + for i in 0..70 { + parent = child; + child = Hash::random(); + + if i == 47 { + inquirer::add_block( + &mut reachability_store, + child, + parent, + &mut vec![parent].into_iter(), + )?; + + target = child; + target_parent = parent; + } else { + inquirer::add_block( + &mut reachability_store, + child, + parent, + &mut vec![parent].into_iter(), + )?; + } + } + + // the relationship + // origin.....target_parent-target.....parent-child + // ancestor + assert!( + dag.check_ancestor_of(target, vec![parent, child])?, + "failed to check target is the ancestor of its descendant" + ); + assert!( + dag.check_ancestor_of(origin, vec![target, parent, child])?, + "failed to check origin is the parent of its child" + ); + assert!( + dag.check_ancestor_of(parent, vec![child])?, + "failed to check target, parent is the parent of its child" + ); + assert!( + dag.check_ancestor_of(target_parent, vec![target])?, + "failed to check target parent, parent is the parent of its child" + ); + + // not ancestor + assert!( + !dag.check_ancestor_of(child, vec![target])?, + "failed to check child is not the ancestor of its descendant" + ); + assert!( + !dag.check_ancestor_of(parent, vec![target])?, + "failed to check child is not the ancestor of its descendant" + ); + assert!( + !dag.check_ancestor_of(child, vec![parent])?, + "failed to check target, child is the child of its parent" + ); + assert!( + !dag.check_ancestor_of(target, vec![target_parent])?, + "failed to check target is the child of its parent" + ); + + assert!( + dag.check_ancestor_of(target, vec![Hash::random(), Hash::random(),]) + .is_err(), + "failed to check not the ancestor of descendants" + ); + assert!( + dag.check_ancestor_of(Hash::random(), vec![target, parent, child]) + .is_err(), + "failed to check not the descendant of parents" + ); + + Ok(()) + } + + fn print_reachability_data(reachability: &DbReachabilityStore, key: &[Hash]) { + println!("**********************"); + for k in key { + let height = reachability.get_height(*k).unwrap(); + let parent = reachability.get_parent(*k).unwrap(); + let children = reachability.get_children(*k).unwrap(); + let interval = reachability.get_interval(*k).unwrap(); + let future_cover_hashes = reachability.get_future_covering_set(*k).unwrap(); + + println!("key: {:?}, height: {:?}, interval: {:?}, parent: {:?}, children: {:?}, future_cover_hashes: {:?}", k, height, interval, parent, children, future_cover_hashes); + } + println!("**********************"); + } + + #[test] + fn test_reachability_algorighm() -> anyhow::Result<()> { + let dag = BlockDAG::create_for_testing().unwrap(); + let mut reachability_store = dag.storage.reachability_store.clone(); + + let origin = Hash::random(); + + inquirer::init_for_test(&mut reachability_store, origin, Interval::new(1, 32))?; + + let mut hashes = vec![origin]; + print_reachability_data(&reachability_store, &hashes); + + let child1 = Hash::random(); + inquirer::add_block( + &mut reachability_store, + child1, + origin, + &mut vec![origin].into_iter(), + )?; + hashes.push(child1); + print_reachability_data(&reachability_store, &hashes); + + let child2 = Hash::random(); + hashes.push(child2); + inquirer::add_block( + &mut reachability_store, + child2, + origin, + &mut vec![origin].into_iter(), + )?; + print_reachability_data(&reachability_store, &hashes); + + let child3 = Hash::random(); + inquirer::add_block( + &mut reachability_store, + child3, + origin, + &mut vec![origin].into_iter(), + )?; + hashes.push(child3); + print_reachability_data(&reachability_store, &hashes); + + let child4 = Hash::random(); + inquirer::add_block( + &mut reachability_store, + child4, + origin, + &mut vec![origin].into_iter(), + )?; + hashes.push(child4); + print_reachability_data(&reachability_store, &hashes); + + let child5 = Hash::random(); + inquirer::add_block( + &mut reachability_store, + child5, + origin, + &mut vec![origin].into_iter(), + )?; + hashes.push(child5); + print_reachability_data(&reachability_store, &hashes); + + let child6 = Hash::random(); + inquirer::add_block( + &mut reachability_store, + child6, + origin, + &mut vec![origin].into_iter(), + )?; + hashes.push(child6); + print_reachability_data(&reachability_store, &hashes); + + let child7 = Hash::random(); + inquirer::add_block( + &mut reachability_store, + child7, + origin, + &mut vec![origin].into_iter(), + )?; + hashes.push(child7); + print_reachability_data(&reachability_store, &hashes); + + + let child8 = Hash::random(); + inquirer::add_block( + &mut reachability_store, + child8, + child1, + &mut vec![child1].into_iter(), + )?; + hashes.push(child8); + print_reachability_data(&reachability_store, &hashes); + + // for _i in 7..=31 { + // let s = Hash::random(); + // inquirer::add_block( + // &mut reachability_store, + // s, + // child1, + // &mut vec![child1].into_iter(), + // )?; + // hashes.push(s); + // print_reachability_data(&reachability_store, &hashes); + // } + + assert!( + dag.check_ancestor_of(origin, vec![child5])?, + "child 5 must be origin's child" + ); + + // let mut count = 6; + // loop { + // let child = Hash::random(); + // inquirer::add_block(&mut reachability_store, child, origin, &mut vec![origin].into_iter())?; + // hashes.push(child); + // print!("{count:?}"); + // print_reachability_data(&reachability_store, &hashes); + // count += 1; + // } + + Ok(()) + } + + fn add_and_print(number: BlockNumber, parent: Hash, parents: Vec, origin: Hash, dag: &mut BlockDAG) -> anyhow::Result { + let header_builder = BlockHeaderBuilder::random(); + let header = header_builder + .with_parent_hash(parent) + .with_parents_hash(Some(parents)) + .with_number(number) + .build(); + dag.commit(header.to_owned(), origin)?; + let ghostdata = dag.ghostdata(&[header.id()])?; + println!("add a header: {:?}, blue set: {:?}, red set: {:?}, blue anticone size: {:?}", header, ghostdata.mergeset_blues, ghostdata.mergeset_reds, ghostdata.blues_anticone_sizes); + Ok(header.id()) + } + + #[test] + fn test_dag_mergeset() -> anyhow::Result<()> { + set_test_flexidag_fork_height(1); + // initialzie the dag firstly + let mut dag = BlockDAG::create_for_testing().unwrap(); + + let origin = BlockHeaderBuilder::random().with_number(0).build(); + let genesis = BlockHeader::dag_genesis_random_with_parent(origin); + + dag.init_with_genesis(genesis.clone()).unwrap(); + + println!("add a genesis: {:?}", genesis.id()); + + // normally add the dag blocks + let mut parents_hash = vec![genesis.id()]; + let mut parent_hash = genesis.id(); + + let mut header = add_and_print(2, parent_hash, parents_hash, genesis.parent_hash(), &mut dag)?; + let red = add_and_print(3, header, vec![header], genesis.parent_hash(), &mut dag)?; + + parents_hash = vec![genesis.id()]; + parent_hash = genesis.id(); + + header = add_and_print(2, parent_hash, parents_hash, genesis.parent_hash(), &mut dag)?; + header = add_and_print(3, header, vec![header], genesis.parent_hash(), &mut dag)?; + header = add_and_print(4, header, vec![header], genesis.parent_hash(), &mut dag)?; + let blue = header; + + + header = add_and_print(5, blue, vec![blue, red], genesis.parent_hash(), &mut dag)?; + + let ghostdata = dag.ghostdata(&[header, red])?; + println!("add a header: {:?}, blue set: {:?}, red set: {:?}, blue anticone size: {:?}", header, ghostdata.mergeset_blues, ghostdata.mergeset_reds, ghostdata.blues_anticone_sizes); + + Ok(()) + } +} diff --git a/flexidag/src/lib.rs b/flexidag/src/lib.rs new file mode 100644 index 0000000000..319bf240fb --- /dev/null +++ b/flexidag/src/lib.rs @@ -0,0 +1,38 @@ +use std::path::Path; + +use starcoin_config::{ChainNetworkID, RocksdbConfig}; +use starcoin_dag::blockdag::BlockDAG; +use starcoin_dag::consensusdb::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; + +// pub fn try_init_with_storage( +// storage: Arc, +// config: Arc, +// ) -> anyhow::Result { +// let dag = new_by_config( +// config.data_dir().join("flexidag").as_path(), +// config.net().id().clone(), +// )?; +// let startup_info = storage +// .get_startup_info()? +// .expect("startup info must exist"); + +// let block_header = storage +// .get_block_header_by_hash(*startup_info.get_main())? +// .expect("the genesis block in dag accumulator must none be none"); +// let fork_height = block_header.dag_fork_height(); +// match block_header.number().cmp(&fork_height) { +// std::cmp::Ordering::Greater | std::cmp::Ordering::Less => Ok(dag), +// std::cmp::Ordering::Equal => { +// // dag.commit(block_header)?; +// dag.init_with_genesis(block_header)?; +// Ok(dag) +// } +// } +// } + +pub fn new_by_config(db_path: &Path, _net: ChainNetworkID) -> anyhow::Result { + let config = FlexiDagStorageConfig::create_with_params(1, RocksdbConfig::default()); + let db = FlexiDagStorage::create_from_path(db_path, config)?; + let dag = BlockDAG::new(8, db); + Ok(dag) +} diff --git a/genesis/Cargo.toml b/genesis/Cargo.toml index 685966588a..541bb76807 100644 --- a/genesis/Cargo.toml +++ b/genesis/Cargo.toml @@ -19,10 +19,11 @@ starcoin-transaction-builder = { workspace = true } starcoin-types = { features = ["fuzzing"], workspace = true } starcoin-vm-types = { workspace = true } starcoin-vm-runtime = { workspace = true } +starcoin-dag = { workspace = true } stdlib = { workspace = true } stest = { workspace = true } thiserror = { workspace = true } - +tempfile = {workspace = true} [features] default = [] fuzzing = ["starcoin-types/fuzzing"] diff --git a/genesis/generated/halley/genesis b/genesis/generated/halley/genesis index 0b31f956ca..877e984305 100644 Binary files a/genesis/generated/halley/genesis and b/genesis/generated/halley/genesis differ diff --git a/genesis/src/lib.rs b/genesis/src/lib.rs index f16dc6b0ed..8d58f19916 100644 --- a/genesis/src/lib.rs +++ b/genesis/src/lib.rs @@ -1,7 +1,10 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 +mod errors; + use anyhow::{bail, ensure, format_err, Result}; +pub use errors::GenesisError; use include_dir::include_dir; use include_dir::Dir; use serde::{Deserialize, Serialize}; @@ -12,18 +15,25 @@ use starcoin_chain::{BlockChain, ChainReader}; use starcoin_config::{ genesis_key_pair, BuiltinNetworkID, ChainNetwork, ChainNetworkID, GenesisBlockParameter, }; +use starcoin_dag::block_dag_config::BlockDAGConfigMock; +use starcoin_dag::blockdag::BlockDAG; use starcoin_logger::prelude::*; use starcoin_state_api::ChainStateWriter; use starcoin_statedb::ChainStateDB; use starcoin_storage::storage::StorageInstance; +use starcoin_storage::table_info::TableInfoStore; use starcoin_storage::{BlockStore, Storage, Store}; use starcoin_transaction_builder::build_stdlib_package_with_modules; use starcoin_transaction_builder::{build_stdlib_package, StdLibOptions}; +use starcoin_types::block::BlockNumber; +use starcoin_types::block::LegacyBlock; use starcoin_types::startup_info::{ChainInfo, StartupInfo}; use starcoin_types::transaction::Package; use starcoin_types::transaction::TransactionInfo; use starcoin_types::{block::Block, transaction::Transaction}; use starcoin_vm_types::account_config::CORE_CODE_ADDRESS; +use starcoin_vm_types::state_store::table::{TableHandle, TableInfo}; +use starcoin_vm_types::state_view::StateView; use starcoin_vm_types::transaction::{ RawUserTransaction, SignedUserTransaction, TransactionPayload, }; @@ -35,12 +45,6 @@ use std::io::{Read, Write}; use std::path::{Path, PathBuf}; use std::sync::Arc; -mod errors; -pub use errors::GenesisError; -use starcoin_storage::table_info::TableInfoStore; -use starcoin_vm_types::state_store::table::{TableHandle, TableInfo}; -use starcoin_vm_types::state_view::StateView; - pub static G_GENESIS_GENERATED_DIR: &str = "generated"; pub const GENESIS_DIR: Dir = include_dir!("generated"); @@ -49,6 +53,25 @@ pub struct Genesis { block: Block, } +#[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] +#[serde(rename(deserialize = "Genesis"))] +pub struct LegacyGenesis { + pub block: LegacyBlock, +} +impl From for Genesis { + fn from(value: LegacyGenesis) -> Self { + Self { + block: value.block.into(), + } + } +} +impl From for LegacyGenesis { + fn from(value: Genesis) -> Self { + Self { + block: value.block.into(), + } + } +} impl Display for Genesis { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Genesis {{")?; @@ -97,6 +120,7 @@ impl Genesis { pub fn build(net: &ChainNetwork) -> Result { debug!("Init genesis for {}", net); let block = Self::build_genesis_block(net)?; + assert_eq!(block.header().number(), 0); debug!("Genesis block id : {:?}", block.header().id()); let genesis = Self { block }; @@ -232,7 +256,7 @@ impl Genesis { let mut genesis_file = File::open(genesis_file_path)?; let mut content = vec![]; genesis_file.read_to_end(&mut content)?; - let genesis = bcs_ext::from_bytes(&content)?; + let genesis = bcs_ext::from_bytes::(&content)?.into(); Ok(Some(genesis)) } @@ -245,7 +269,7 @@ impl Genesis { pub fn load_generated(net: BuiltinNetworkID) -> Result> { match Self::genesis_bytes(net) { - Some(bytes) => Ok(Some(bcs_ext::from_bytes::(bytes)?)), + Some(bytes) => Ok(Some(bcs_ext::from_bytes::(bytes)?.into())), None => Ok(None), } } @@ -254,6 +278,7 @@ impl Genesis { &self, net: &ChainNetwork, storage: Arc, + dag: BlockDAG, ) -> Result { storage.save_genesis(self.block.id())?; let genesis_chain = BlockChain::new_with_genesis( @@ -261,6 +286,7 @@ impl Genesis { storage.clone(), net.genesis_epoch(), self.block.clone(), + dag, )?; let startup_info = StartupInfo::new(genesis_chain.current_header().id()); storage.save_startup_info(startup_info)?; @@ -279,7 +305,7 @@ impl Genesis { } let genesis_file = data_dir.join(Self::GENESIS_FILE_NAME); let mut file = File::create(genesis_file)?; - let contents = bcs_ext::to_bytes(self)?; + let contents = bcs_ext::to_bytes::(&LegacyGenesis::from(self.to_owned()))?; file.write_all(&contents)?; Ok(()) } @@ -315,6 +341,7 @@ impl Genesis { pub fn init_and_check_storage( net: &ChainNetwork, storage: Arc, + dag: BlockDAG, data_dir: &Path, ) -> Result<(ChainInfo, Genesis)> { debug!("load startup_info."); @@ -344,7 +371,7 @@ impl Genesis { } Ok(None) => { let genesis = Self::load_and_check_genesis(net, data_dir, true)?; - let chain_info = genesis.execute_genesis_block(net, storage.clone())?; + let chain_info = genesis.execute_genesis_block(net, storage.clone(), dag)?; (chain_info, genesis) } Err(e) => return Err(GenesisError::GenesisLoadFailure(e).into()), @@ -353,12 +380,16 @@ impl Genesis { Ok((chain_info, genesis)) } - pub fn init_storage_for_test(net: &ChainNetwork) -> Result<(Arc, ChainInfo, Genesis)> { - debug!("init storage by genesis for test."); + pub fn init_storage_for_test( + net: &ChainNetwork, + fork_number: BlockNumber, + ) -> Result<(Arc, ChainInfo, Genesis, BlockDAG)> { + debug!("init storage by genesis for test. {net:?}"); let storage = Arc::new(Storage::new(StorageInstance::new_cache_instance())?); let genesis = Genesis::load_or_build(net)?; - let chain_info = genesis.execute_genesis_block(net, storage.clone())?; - Ok((storage, chain_info, genesis)) + let dag = BlockDAG::create_for_testing_mock(BlockDAGConfigMock { fork_number })?; + let chain_info = genesis.execute_genesis_block(net, storage.clone(), dag.clone())?; + Ok((storage, chain_info, genesis, dag)) } } @@ -428,12 +459,20 @@ mod tests { pub fn do_test_genesis(net: &ChainNetwork, data_dir: &Path) -> Result<()> { let storage1 = Arc::new(Storage::new(StorageInstance::new_cache_instance())?); - let (chain_info1, genesis1) = - Genesis::init_and_check_storage(net, storage1.clone(), data_dir)?; + let (chain_info1, genesis1) = Genesis::init_and_check_storage( + net, + storage1.clone(), + BlockDAG::create_for_testing()?, + data_dir, + )?; let storage2 = Arc::new(Storage::new(StorageInstance::new_cache_instance())?); - let (chain_info2, genesis2) = - Genesis::init_and_check_storage(net, storage2.clone(), data_dir)?; + let (chain_info2, genesis2) = Genesis::init_and_check_storage( + net, + storage2.clone(), + BlockDAG::create_for_testing()?, + data_dir, + )?; assert_eq!(genesis1, genesis2, "genesis execute chain info different."); diff --git a/kube/manifest/starcoin-proxima.yaml b/kube/manifest/starcoin-proxima.yaml index 491b5e01f2..4777cdc2e1 100644 --- a/kube/manifest/starcoin-proxima.yaml +++ b/kube/manifest/starcoin-proxima.yaml @@ -11,7 +11,7 @@ spec: matchLabels: app: starcoin serviceName: starcoin-svc - replicas: 1 + replicas: 2 template: metadata: name: starcoin @@ -23,19 +23,19 @@ spec: starcoin/node-pool: seed-pool containers: - name: starcoin - image: ghcr.io/starcoin/starcoin:v1.13.7 + image: ghcr.io/starcoinorg/starcoin:dag-mining-net imagePullPolicy: Always command: - bash - -c args: - - rm -rf /sc-data/proxima/starcoin.ipc /sc-data/proxima/starcoindb/db/starcoindb/LOCK; + - rm -rf /sc-data/proxima/ /sc-data/proxima/starcoindb/db/starcoindb/LOCK; id=$(echo -e $POD_NAME|awk -F'-' '{print $2}') && IFS='; ' read -r -a node_keys <<< $NODE_KEYS && node_key=${node_keys[$id]}; if [ ! -z $node_key ]; then node_key_flag="--node-key ${node_key}"; fi; - /starcoin/starcoin -n proxima -d /sc-data --discover-local=true $node_key_flag; + /starcoin/starcoin -n proxima -d /sc-data --p2prpc-default-global-api-quota 9000/s --p2prpc-custom-user-api-quota get_header_by_hash=9000/s --p2prpc-custom-user-api-quota get_headers_by_hash=9000/s --p2prpc-custom-user-api-quota info=9000/s --p2prpc-custom-user-api-quota get_block_by_hash=9000/s --p2prpc-custom-user-api-quota get_block_ids=9000/s --p2prpc-custom-user-api-quota get_blocks_v1=9000/s --p2prpc-custom-user-api-quota get_blocks=9000/s --jsonrpc-default-global-api-quota 9000/s --jsonrpc-custom-user-api-quota chain.get_headers_by_hash=9000/s --jsonrpc-custom-user-api-quota chain.get_header_by_hash=9000/s --jsonrpc-custom-user-api-quota chain.info=9000/s --jsonrpc-custom-user-api-quota chain.get_block_by_hash=9000/s --jsonrpc-custom-user-api-quota chain.get_block_ids=9000/s --jsonrpc-custom-user-api-quota chain.get_blocks_v1=9000/s --jsonrpc-custom-user-api-quota chain.get_blocks=9000/s --min-peers-to-propagate 512 --max-peers-to-propagate 1024 --max-outgoing-peers 512 --max-incoming-peers 512 --discover-local=true $node_key_flag; ret=$?; echo "Now ret is - $ret"; if [ $ret -eq 120 ] || [ $ret -eq 139 ]; then diff --git a/kube/manifest/starcoin-proxima2.yaml b/kube/manifest/starcoin-proxima2.yaml new file mode 100644 index 0000000000..f40fb07454 --- /dev/null +++ b/kube/manifest/starcoin-proxima2.yaml @@ -0,0 +1,72 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: starcoin + namespace: starcoin-proxima + labels: + app: starcoin + network: proxima +spec: + selector: + matchLabels: + app: starcoin + serviceName: starcoin-svc + replicas: 2 + template: + metadata: + name: starcoin + labels: + app: starcoin + network: proxima + spec: + nodeSelector: + starcoin/node-pool: seed-pool + containers: + - name: starcoin + image: ghcr.io/starcoinorg/starcoin:dag-mining-net + imagePullPolicy: Always + command: + - bash + - -c + args: + - rm -rf /sc-data/proxima/ /sc-data/proxima/starcoindb/db/starcoindb/LOCK; + id=$(echo -e $POD_NAME|awk -F'-' '{print $2}') && IFS='; ' read -r -a node_keys <<< $NODE_KEYS && + node_key=${node_keys[$id]}; + if [ ! -z $node_key ]; then + node_key_flag="--node-key ${node_key}"; + fi; + /starcoin/starcoin -n proxima -d /sc-data --p2prpc-default-global-api-quota 9000/s --p2prpc-custom-user-api-quota get_header_by_hash=9000/s --p2prpc-custom-user-api-quota get_headers_by_hash=9000/s --p2prpc-custom-user-api-quota info=9000/s --p2prpc-custom-user-api-quota get_block_by_hash=9000/s --p2prpc-custom-user-api-quota get_block_ids=9000/s --p2prpc-custom-user-api-quota get_blocks_v1=9000/s --p2prpc-custom-user-api-quota get_blocks=9000/s --jsonrpc-default-global-api-quota 9000/s --jsonrpc-custom-user-api-quota chain.get_headers_by_hash=9000/s --jsonrpc-custom-user-api-quota chain.get_header_by_hash=9000/s --jsonrpc-custom-user-api-quota chain.info=9000/s --jsonrpc-custom-user-api-quota chain.get_block_by_hash=9000/s --jsonrpc-custom-user-api-quota chain.get_block_ids=9000/s --jsonrpc-custom-user-api-quota chain.get_blocks_v1=9000/s --jsonrpc-custom-user-api-quota chain.get_blocks=9000/s --min-peers-to-propagate 512 --max-peers-to-propagate 1024 --max-outgoing-peers 512 --max-incoming-peers 512 --discover-local=true $node_key_flag; + ret=$?; + echo "Now ret is - $ret"; + if [ $ret -eq 120 ] || [ $ret -eq 139 ]; then + echo "Start failed with gensis mismatch code 120, please check or remove proxima data..."; + elif [ $ret -ne 0 ]; then + echo "Node start fail, try to remove config."; + rm /sc-data/proxima/config.toml; + rm /sc-data/proxima/genesis_config.json; + fi; + ports: + - containerPort: 9840 + hostPort: 9840 + volumeMounts: + - name: starcoin-volume + mountPath: /sc-data + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NODE_KEYS + valueFrom: + secretKeyRef: + name: node-keys + key: node-keys + volumeClaimTemplates: + - metadata: + name: starcoin-volume + namespace: starcoin-proxima + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 50Gi diff --git a/miner/Cargo.toml b/miner/Cargo.toml index d8a904bc46..20adabff05 100644 --- a/miner/Cargo.toml +++ b/miner/Cargo.toml @@ -27,7 +27,7 @@ starcoin-txpool-api = { workspace = true } starcoin-vm-types = { workspace = true } tokio = { features = ["full"], workspace = true } starcoin-types = { package = "starcoin-types", workspace = true } - +starcoin-dag = { workspace =true } [dev-dependencies] starcoin-network-rpc = { package = "starcoin-network-rpc", workspace = true } starcoin-genesis = { workspace = true } diff --git a/miner/src/create_block_template/mod.rs b/miner/src/create_block_template/mod.rs index 5e6ba1ae50..41b07d80b7 100644 --- a/miner/src/create_block_template/mod.rs +++ b/miner/src/create_block_template/mod.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::create_block_template::metrics::BlockBuilderMetrics; -use anyhow::{format_err, Result}; +use anyhow::{anyhow, format_err, Result}; use futures::executor::block_on; use starcoin_account_api::{AccountAsyncService, AccountInfo, DefaultAccountChangeEvent}; use starcoin_account_service::AccountService; @@ -12,6 +12,7 @@ use starcoin_config::ChainNetwork; use starcoin_config::NodeConfig; use starcoin_consensus::Consensus; use starcoin_crypto::hash::HashValue; +use starcoin_dag::blockdag::BlockDAG; use starcoin_executor::VMMetrics; use starcoin_logger::prelude::*; use starcoin_open_block::OpenedBlock; @@ -79,6 +80,8 @@ impl ServiceFactory for BlockBuilderService { .and_then(|registry| BlockBuilderMetrics::register(registry).ok()); let vm_metrics = ctx.get_shared_opt::()?; + let dag = ctx.get_shared::()?; + let inner = Inner::new( config.net(), storage, @@ -88,6 +91,7 @@ impl ServiceFactory for BlockBuilderService { miner_account, metrics, vm_metrics, + dag, )?; Ok(Self { inner }) } @@ -111,7 +115,7 @@ impl ActorService for BlockBuilderService { impl EventHandler for BlockBuilderService { fn handle_event(&mut self, msg: NewHeadBlock, _ctx: &mut ServiceContext) { - if let Err(e) = self.inner.update_chain(msg.0.as_ref().clone()) { + if let Err(e) = self.inner.update_chain(msg.executed_block.as_ref().clone()) { error!("err : {:?}", e) } } @@ -191,6 +195,7 @@ pub struct Inner

{ miner_account: AccountInfo, metrics: Option, vm_metrics: Option, + dag: BlockDAG, } impl

Inner

@@ -206,12 +211,14 @@ where miner_account: AccountInfo, metrics: Option, vm_metrics: Option, + dag: BlockDAG, ) -> Result { let chain = BlockChain::new( net.time_service(), block_id, storage.clone(), vm_metrics.clone(), + dag.clone(), )?; Ok(Inner { @@ -224,6 +231,7 @@ where miner_account, metrics, vm_metrics, + dag, }) } @@ -251,6 +259,7 @@ where block.header().id(), self.storage.clone(), self.vm_metrics.clone(), + self.dag.clone(), )?; //current block possible be uncle. self.uncles.insert(current_id, current_header); @@ -309,10 +318,12 @@ where let max_txns = (block_gas_limit / 200) * 2; let txns = self.tx_provider.get_txns(max_txns); - let author = *self.miner_account.address(); let previous_header = self.chain.current_header(); - let uncles = self.find_uncles(); + let current_number = previous_header.number().saturating_add(1); + let epoch = self.chain.epoch(); + let strategy = epoch.strategy(); + let mut now_millis = self.chain.time_service().now_millis(); if now_millis <= previous_header.timestamp() { info!( @@ -321,6 +332,56 @@ where ); now_millis = previous_header.timestamp() + 1; } + let difficulty = strategy.calculate_next_difficulty(&self.chain)?; + let tips_hash = if current_number > self.chain.dag_fork_height()? { + let (_dag_genesis, tips_hash) = self + .chain + .current_tips_hash(&previous_header)? + .ok_or_else(|| { + anyhow!( + "the number of the block is larger than the dag fork number but no dag state!" + ) + })?; + Some(tips_hash) + } else { + None + }; + info!( + "block:{} tips(dag state):{:?}", + self.chain.current_header().number(), + &tips_hash, + ); + let (uncles, blue_blocks) = { + match &tips_hash { + None => (self.find_uncles(), None), + Some(tips) => { + let mut blues = self.dag.ghostdata(tips)?.mergeset_blues.to_vec(); + info!( + "create block template with tips:{:?},ghostdata blues:{:?}", + &tips_hash, blues + ); + let mut blue_blocks = vec![]; + + let __selected_parent = blues.remove(0); + for blue in &blues { + // todo: make sure blue block has been executed successfully + let block = self + .storage + .get_block_by_hash(blue.to_owned())? + .expect("Block should exist"); + blue_blocks.push(block); + } + ( + blue_blocks + .as_slice() + .iter() + .map(|b| b.header.clone()) + .collect(), + Some(blue_blocks), + ) + } + } + }; info!( "[CreateBlockTemplate] previous_header: {:?}, block_gas_limit: {}, max_txns: {}, txn len: {}, uncles len: {}, timestamp: {}", previous_header, @@ -331,10 +392,6 @@ where now_millis, ); - let epoch = self.chain.epoch(); - let strategy = epoch.strategy(); - let difficulty = strategy.calculate_next_difficulty(&self.chain)?; - let mut opened_block = OpenedBlock::new( self.storage.clone(), previous_header.clone(), @@ -345,8 +402,12 @@ where difficulty, strategy, self.vm_metrics.clone(), + Some(tips_hash.unwrap_or_default()), + blue_blocks, )?; + let excluded_txns = opened_block.push_txns(txns)?; + let template = opened_block.finalize()?; for invalid_txn in excluded_txns.discarded_txns { self.tx_provider.remove_invalid_txn(invalid_txn.id()); diff --git a/miner/src/create_block_template/test_create_block_template.rs b/miner/src/create_block_template/test_create_block_template.rs index ebcb912977..6228e606d5 100644 --- a/miner/src/create_block_template/test_create_block_template.rs +++ b/miner/src/create_block_template/test_create_block_template.rs @@ -18,6 +18,7 @@ use starcoin_service_registry::{RegistryAsyncService, RegistryService}; use starcoin_storage::BlockStore; use starcoin_time_service::MockTimeService; use starcoin_txpool::TxPoolService; +use starcoin_types::block::TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH; use std::sync::Arc; #[stest::test] @@ -36,8 +37,11 @@ fn test_create_block_template_by_net(net: ChainNetworkID) { opt.base_data_dir = Some(temp_path.path().to_path_buf()); let node_config = Arc::new(NodeConfig::load_with_opt(&opt).unwrap()); - let (storage, chain_info, genesis) = StarcoinGenesis::init_storage_for_test(node_config.net()) - .expect("init storage by genesis fail."); + let (storage, chain_info, genesis, dag) = StarcoinGenesis::init_storage_for_test( + node_config.net(), + TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH, + ) + .expect("init storage by genesis fail."); let genesis_id = genesis.block().id(); let miner_account = AccountInfo::random(); let inner = Inner::new( @@ -49,6 +53,7 @@ fn test_create_block_template_by_net(net: ChainNetworkID) { miner_account, None, None, + dag, ) .unwrap(); @@ -61,8 +66,11 @@ fn test_create_block_template_by_net(net: ChainNetworkID) { #[stest::test(timeout = 120)] fn test_switch_main() { let node_config = Arc::new(NodeConfig::random_for_test()); - let (storage, _, genesis) = StarcoinGenesis::init_storage_for_test(node_config.net()) - .expect("init storage by genesis fail."); + let (storage, _, genesis, dag) = StarcoinGenesis::init_storage_for_test( + node_config.net(), + TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH, + ) + .expect("init storage by genesis fail."); let genesis_id = genesis.block().id(); let times = 10; @@ -79,7 +87,14 @@ fn test_switch_main() { let net = node_config.net(); for i in 0..times { - let mut main = BlockChain::new(net.time_service(), head_id, storage.clone(), None).unwrap(); + let mut main = BlockChain::new( + net.time_service(), + head_id, + storage.clone(), + None, + dag.clone(), + ) + .unwrap(); let mut tmp_inner = Inner::new( net, @@ -90,6 +105,7 @@ fn test_switch_main() { miner_account.clone(), None, None, + dag.clone(), ) .unwrap(); @@ -116,8 +132,14 @@ fn test_switch_main() { } for i in 0..3 { - let mut new_main = - BlockChain::new(net.time_service(), head_id, storage.clone(), None).unwrap(); + let mut new_main = BlockChain::new( + net.time_service(), + head_id, + storage.clone(), + None, + dag.clone(), + ) + .unwrap(); let block_template = if i == 0 { let tmp = Inner::new( @@ -129,6 +151,7 @@ fn test_switch_main() { miner_account.clone(), None, None, + dag.clone(), ) .unwrap(); @@ -178,8 +201,11 @@ fn test_switch_main() { #[stest::test] fn test_do_uncles() { let node_config = Arc::new(NodeConfig::random_for_test()); - let (storage, _, genesis) = StarcoinGenesis::init_storage_for_test(node_config.net()) - .expect("init storage by genesis fail."); + let (storage, _, genesis, dag) = StarcoinGenesis::init_storage_for_test( + node_config.net(), + TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH, + ) + .expect("init storage by genesis fail."); let genesis_id = genesis.block().id(); let times = 2; @@ -196,7 +222,14 @@ fn test_do_uncles() { let net = node_config.net(); for _i in 0..times { - let mut main = BlockChain::new(net.time_service(), head_id, storage.clone(), None).unwrap(); + let mut main = BlockChain::new( + net.time_service(), + head_id, + storage.clone(), + None, + dag.clone(), + ) + .unwrap(); let mut tmp_inner = Inner::new( net, @@ -207,6 +240,7 @@ fn test_do_uncles() { miner_account.clone(), None, None, + dag.clone(), ) .unwrap(); @@ -224,8 +258,14 @@ fn test_do_uncles() { // branch for _i in 0..times { - let mut branch = - BlockChain::new(net.time_service(), genesis_id, storage.clone(), None).unwrap(); + let mut branch = BlockChain::new( + net.time_service(), + genesis_id, + storage.clone(), + None, + dag.clone(), + ) + .unwrap(); let inner = Inner::new( net, storage.clone(), @@ -235,6 +275,7 @@ fn test_do_uncles() { miner_account.clone(), None, None, + dag.clone(), ) .unwrap(); @@ -254,7 +295,14 @@ fn test_do_uncles() { // uncles for i in 0..times { - let mut main = BlockChain::new(net.time_service(), head_id, storage.clone(), None).unwrap(); + let mut main = BlockChain::new( + net.time_service(), + head_id, + storage.clone(), + None, + dag.clone(), + ) + .unwrap(); let block_template = main_inner .as_ref() @@ -284,8 +332,11 @@ fn test_do_uncles() { #[stest::test(timeout = 120)] fn test_new_head() { let node_config = Arc::new(NodeConfig::random_for_test()); - let (storage, _, genesis) = StarcoinGenesis::init_storage_for_test(node_config.net()) - .expect("init storage by genesis fail."); + let (storage, _, genesis, dag) = StarcoinGenesis::init_storage_for_test( + node_config.net(), + TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH, + ) + .expect("init storage by genesis fail."); let genesis_id = genesis.block().id(); let times = 10; @@ -306,6 +357,7 @@ fn test_new_head() { miner_account, None, None, + dag, ) .unwrap(); @@ -327,8 +379,11 @@ fn test_new_head() { #[stest::test(timeout = 120)] fn test_new_branch() { let node_config = Arc::new(NodeConfig::random_for_test()); - let (storage, _, genesis) = StarcoinGenesis::init_storage_for_test(node_config.net()) - .expect("init storage by genesis fail."); + let (storage, _, genesis, dag) = StarcoinGenesis::init_storage_for_test( + node_config.net(), + TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH, + ) + .expect("init storage by genesis fail."); let genesis_id = genesis.block().id(); let times = 5; @@ -351,6 +406,7 @@ fn test_new_branch() { miner_account.clone(), None, None, + dag.clone(), ) .unwrap(); for _i in 0..times { @@ -367,8 +423,14 @@ fn test_new_branch() { let mut new_head_id = genesis_id; let net = node_config.net(); for i in 0..(times * 2) { - let mut branch = - BlockChain::new(net.time_service(), new_head_id, storage.clone(), None).unwrap(); + let mut branch = BlockChain::new( + net.time_service(), + new_head_id, + storage.clone(), + None, + dag.clone(), + ) + .unwrap(); let inner = Inner::new( net, storage.clone(), @@ -378,6 +440,7 @@ fn test_new_branch() { miner_account.clone(), None, None, + dag.clone(), ) .unwrap(); let block_template = inner.create_block_template().unwrap().template; @@ -401,8 +464,11 @@ async fn test_create_block_template_actor() { let registry = RegistryService::launch(); registry.put_shared(node_config.clone()).await.unwrap(); - let (storage, _, genesis) = StarcoinGenesis::init_storage_for_test(node_config.net()) - .expect("init storage by genesis fail."); + let (storage, _, genesis, dag) = StarcoinGenesis::init_storage_for_test( + node_config.net(), + TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH, + ) + .expect("init storage by genesis fail."); let genesis_id = genesis.block().id(); let chain_header = storage .get_block_header_by_hash(genesis_id) @@ -412,7 +478,7 @@ async fn test_create_block_template_actor() { //TODO mock txpool. let txpool = TxPoolService::new(node_config.clone(), storage.clone(), chain_header, None); registry.put_shared(txpool).await.unwrap(); - + registry.put_shared(dag).await.unwrap(); registry.put_shared(storage).await.unwrap(); registry .register_mocker(AccountService::mock().unwrap()) @@ -432,7 +498,10 @@ async fn test_create_block_template_actor() { fn test_create_block_template_by_adjust_time() -> Result<()> { let node_config = Arc::new(NodeConfig::random_for_test()); - let (storage, _, genesis) = StarcoinGenesis::init_storage_for_test(node_config.net())?; + let (storage, _, genesis, dag) = StarcoinGenesis::init_storage_for_test( + node_config.net(), + TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH, + )?; let mut inner = Inner::new( node_config.net(), storage, @@ -442,6 +511,7 @@ fn test_create_block_template_by_adjust_time() -> Result<()> { AccountInfo::random(), None, None, + dag, )?; let template = inner.create_block_template()?.template; let previous_block_time = template.timestamp; diff --git a/miner/src/lib.rs b/miner/src/lib.rs index 54dfd52c12..7e440e7051 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -252,7 +252,7 @@ impl MinerService { if let Some(task) = self.current_task.take() { let block = task.finish(nonce, extra); - let block_hash = block.id(); + let block_hash: HashValue = block.id(); info!(target: "miner", "Mint new block: {}", block); ctx.broadcast(MinedBlock(Arc::new(block))); if let Some(metrics) = self.metrics.as_ref() { diff --git a/miner/tests/miner_test.rs b/miner/tests/miner_test.rs index 76bb1ee549..9d7aae6225 100644 --- a/miner/tests/miner_test.rs +++ b/miner/tests/miner_test.rs @@ -11,6 +11,7 @@ use starcoin_miner::{ use starcoin_service_registry::{RegistryAsyncService, RegistryService}; use starcoin_storage::BlockStore; use starcoin_txpool::TxPoolService; +use starcoin_types::block::TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH; use starcoin_types::{system_events::GenerateBlockEvent, U256}; use std::sync::Arc; use std::time::Duration; @@ -23,8 +24,11 @@ async fn test_miner_service() { let registry = RegistryService::launch(); let node_config = Arc::new(config.clone()); registry.put_shared(node_config.clone()).await.unwrap(); - let (storage, _chain_info, genesis) = Genesis::init_storage_for_test(config.net()).unwrap(); + let (storage, _chain_info, genesis, dag) = + Genesis::init_storage_for_test(config.net(), TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH) + .unwrap(); registry.put_shared(storage.clone()).await.unwrap(); + registry.put_shared(dag).await.unwrap(); let genesis_hash = genesis.block().id(); let chain_header = storage diff --git a/network-p2p/src/service_test.rs b/network-p2p/src/service_test.rs index 35b8f7a1fc..84694f78d7 100644 --- a/network-p2p/src/service_test.rs +++ b/network-p2p/src/service_test.rs @@ -1,24 +1,24 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::business_layer_handle::{BusinessLayerHandle, HandshakeResult}; -use crate::config::RequestResponseConfig; -use crate::protocol::rep; -use crate::service::NetworkStateInfo; -use crate::{config, Event, NetworkService, NetworkWorker}; -use crate::{NetworkConfiguration, Params, ProtocolId}; +use crate::{ + business_layer_handle::{BusinessLayerHandle, HandshakeResult}, + config, + config::RequestResponseConfig, + protocol::rep, + service::NetworkStateInfo, + Event, NetworkConfiguration, NetworkService, NetworkWorker, Params, ProtocolId, +}; use anyhow::{Ok, Result}; use bcs_ext::BCSCodec; -use futures::prelude::*; -use futures::stream::StreamExt; +use futures::{prelude::*, stream::StreamExt}; use libp2p::PeerId; use network_p2p_types::MultiaddrWithPeerId; use once_cell::sync::Lazy; use sc_peerset::ReputationChange; use serde::{Deserialize, Serialize}; use starcoin_types::startup_info::{ChainInfo, ChainStatus}; -use std::borrow::Cow; -use std::{sync::Arc, time::Duration}; +use std::{borrow::Cow, sync::Arc, time::Duration}; use Event::NotificationStreamOpened; static G_TEST_CHAIN_INFO: Lazy = Lazy::new(Status::default); @@ -580,13 +580,13 @@ fn test_handshake_message() { {"version":1,"min_supported_version":1, "notif_protocols":["/starcoin/txn/1","/starcoin/block/1"], "rpc_protocols":[], - "info":{"chain_id":{"id":1},"genesis_hash":"0x509224b8142926f6c079c66a85ca6db7981734bfe8f9427b3b925574be013f93","status":{"head":{"parent_hash":"0x82b85e25967cd4077f4df26a8975ab34ec6eba954e2c38d2b8393c6c42c2963c","timestamp":1612227819459,"number":9213,"author":"0xe6f6e9ec5a878e29350b4356e21d63db","author_auth_key":null,"txn_accumulator_root":"0xa57516ba50672afe23869529b2d54b9cb95bf6c2ad0982048c5dc1633e567f56","block_accumulator_root":"0x163305561261490852c28f3c1131e4e8d181bea0e1c8552f1ff9f8fbdd107727","state_root":"0xcead8e63f08b297df0e6c0e80a15f824d1a6f08ecb6f88021d6f3dc6c31544af","gas_used":16384000,"difficulty":"0x1648","body_hash":"0x19990c2875098a829ac4d6db2c78b77e6102d0837920304a14ebb474190a5007","chain_id":{"id":1},"nonce":620209232,"extra":"0x00000000"},"info":{"block_id":"0xcabe94c219acfae4044e8e5c8609a6d98153935e60e18be7f0ca611243714da2","total_difficulty":"0x0356fcbd","txn_accumulator_info":{"accumulator_root":"0xa57516ba50672afe23869529b2d54b9cb95bf6c2ad0982048c5dc1633e567f56","frozen_subtree_roots":["0xed2a8ca4a2972761099903410a9dc0c4607eaec944c41d919c27c57418d2aa59","0x21ee454f8510f89866eae45cd5727bee271595e67740ef5aaf80f9fc9d3b84d3","0x527890d7a348f2bfe9801eaad4d98facd340489a37234f405c15ab4e64a0f2eb","0xd0dacaa8beb77998983313ce06b44385b88c1772992f42a835b2f8477118321b","0x31b0df1da737424b169c3a43c0bc23794cc65d65d352aeff8a50b0593320a0cb","0x17dcc4f902c5e237a2c2a3b47b9263b7e67512c026ff76981e9c88955135cd86","0x0686841f7caeb4cd82eb1d51575971c7b189609a87c63970447c45b103619086","0xabfa4a9ed920176ad2a789d731f26398768732f813351e43a38d4c1aa22ff259","0x6914b1dd9aac5d4721fdb7bd736b1f107e72253050b4effd4bd9952da32eef84","0x2b0be3dc9f9196c5f8b5b9c430083d682720651154b29d1778971273eb9dfbcf","0x566f2db25b5255647988d164c4e2855b689fe5dcf7b1ba37bfa6a3d86accc503","0xe5b5f78b0b2e08fc3e3cafa9808346704da2f7b7a572dd84ed947e00003266c4"],"num_leaves":126960,"num_nodes":253908},"block_accumulator_info":{"accumulator_root":"0x2be16af3d9084b18d6ca44050ff46474d888b8c6340db0fbcb7aef9e423794af","frozen_subtree_roots":["0xef637a9b977e8969503e4fedb8558b0f294268bbaa6a0b24a824ad3c98edcf1e","0xa8cf073cfe1b08a5ed94a04dc79f16d125b7d4fb4d7ce02f75f412ded9cf9b79","0xf89ff07faba4299566955c4b9c31fcba99fc5855a229bed7d6487dafd59f1e70","0x2fd161c1b5d03833eb3efb09e530e689ac67ec7d5748246df4891bb9c3f3111b","0x55e40a53390e839a588904e16fe656676b0c5a7b3ec70bd8dcc2276e70e7600b","0xb3918be1fd6460dd30daf058e0e516c7046d242642130547f510335a319a98dd","0xf0737bc518a99c1a619bd87ba82d95dcd8dd19b0836a7dbed514b603f90e7ea8","0xf48e3dfc240d86a64e9adb9c2d276c6f42119e4aaee7598b13f61e4d77390d11","0x62cb92b81afa80226494d92a2120bdd4e9956c48f44f41b1283a59d9fe32e6df","0xeb5618d7d5699735477bee792b0e1a1ffa3c892fa31b7515b6948d80e3b424b2"],"num_leaves":9214,"num_nodes":18418}}}}} + "info":{"chain_id":{"id":1},"genesis_hash":"0x509224b8142926f6c079c66a85ca6db7981734bfe8f9427b3b925574be013f93","status":{"head":{"parent_hash":"0x82b85e25967cd4077f4df26a8975ab34ec6eba954e2c38d2b8393c6c42c2963c","timestamp":1612227819459,"number":9213,"author":"0xe6f6e9ec5a878e29350b4356e21d63db","author_auth_key":null,"txn_accumulator_root":"0xa57516ba50672afe23869529b2d54b9cb95bf6c2ad0982048c5dc1633e567f56","block_accumulator_root":"0x163305561261490852c28f3c1131e4e8d181bea0e1c8552f1ff9f8fbdd107727","state_root":"0xcead8e63f08b297df0e6c0e80a15f824d1a6f08ecb6f88021d6f3dc6c31544af","gas_used":16384000,"difficulty":"0x1648","body_hash":"0x19990c2875098a829ac4d6db2c78b77e6102d0837920304a14ebb474190a5007","chain_id":{"id":1},"nonce":620209232,"extra":"0x00000000","parents_hash":null},"info":{"block_id":"0xcabe94c219acfae4044e8e5c8609a6d98153935e60e18be7f0ca611243714da2","total_difficulty":"0x0356fcbd","txn_accumulator_info":{"accumulator_root":"0xa57516ba50672afe23869529b2d54b9cb95bf6c2ad0982048c5dc1633e567f56","frozen_subtree_roots":["0xed2a8ca4a2972761099903410a9dc0c4607eaec944c41d919c27c57418d2aa59","0x21ee454f8510f89866eae45cd5727bee271595e67740ef5aaf80f9fc9d3b84d3","0x527890d7a348f2bfe9801eaad4d98facd340489a37234f405c15ab4e64a0f2eb","0xd0dacaa8beb77998983313ce06b44385b88c1772992f42a835b2f8477118321b","0x31b0df1da737424b169c3a43c0bc23794cc65d65d352aeff8a50b0593320a0cb","0x17dcc4f902c5e237a2c2a3b47b9263b7e67512c026ff76981e9c88955135cd86","0x0686841f7caeb4cd82eb1d51575971c7b189609a87c63970447c45b103619086","0xabfa4a9ed920176ad2a789d731f26398768732f813351e43a38d4c1aa22ff259","0x6914b1dd9aac5d4721fdb7bd736b1f107e72253050b4effd4bd9952da32eef84","0x2b0be3dc9f9196c5f8b5b9c430083d682720651154b29d1778971273eb9dfbcf","0x566f2db25b5255647988d164c4e2855b689fe5dcf7b1ba37bfa6a3d86accc503","0xe5b5f78b0b2e08fc3e3cafa9808346704da2f7b7a572dd84ed947e00003266c4"],"num_leaves":126960,"num_nodes":253908},"block_accumulator_info":{"accumulator_root":"0x2be16af3d9084b18d6ca44050ff46474d888b8c6340db0fbcb7aef9e423794af","frozen_subtree_roots":["0xef637a9b977e8969503e4fedb8558b0f294268bbaa6a0b24a824ad3c98edcf1e","0xa8cf073cfe1b08a5ed94a04dc79f16d125b7d4fb4d7ce02f75f412ded9cf9b79","0xf89ff07faba4299566955c4b9c31fcba99fc5855a229bed7d6487dafd59f1e70","0x2fd161c1b5d03833eb3efb09e530e689ac67ec7d5748246df4891bb9c3f3111b","0x55e40a53390e839a588904e16fe656676b0c5a7b3ec70bd8dcc2276e70e7600b","0xb3918be1fd6460dd30daf058e0e516c7046d242642130547f510335a319a98dd","0xf0737bc518a99c1a619bd87ba82d95dcd8dd19b0836a7dbed514b603f90e7ea8","0xf48e3dfc240d86a64e9adb9c2d276c6f42119e4aaee7598b13f61e4d77390d11","0x62cb92b81afa80226494d92a2120bdd4e9956c48f44f41b1283a59d9fe32e6df","0xeb5618d7d5699735477bee792b0e1a1ffa3c892fa31b7515b6948d80e3b424b2"],"num_leaves":9214,"num_nodes":18418}}}}} "#; let status = serde_json::from_str::(json_msg).unwrap(); //let hex = hex::encode(status.encode().unwrap()); //println!("{}", hex); //println!("{}", serde_json::to_string(&status).unwrap()); - let bin_msg = "0100000001000000020f2f73746172636f696e2f74786e2f31112f73746172636f696e2f626c6f636b2f31000120509224b8142926f6c079c66a85ca6db7981734bfe8f9427b3b925574be013f932082b85e25967cd4077f4df26a8975ab34ec6eba954e2c38d2b8393c6c42c2963cc337446077010000fd23000000000000e6f6e9ec5a878e29350b4356e21d63db0020a57516ba50672afe23869529b2d54b9cb95bf6c2ad0982048c5dc1633e567f5620163305561261490852c28f3c1131e4e8d181bea0e1c8552f1ff9f8fbdd10772720cead8e63f08b297df0e6c0e80a15f824d1a6f08ecb6f88021d6f3dc6c31544af0000fa000000000000000000000000000000000000000000000000000000000000000000000016482019990c2875098a829ac4d6db2c78b77e6102d0837920304a14ebb474190a50070150a4f7240000000020cabe94c219acfae4044e8e5c8609a6d98153935e60e18be7f0ca611243714da2000000000000000000000000000000000000000000000000000000000356fcbd20a57516ba50672afe23869529b2d54b9cb95bf6c2ad0982048c5dc1633e567f560c20ed2a8ca4a2972761099903410a9dc0c4607eaec944c41d919c27c57418d2aa592021ee454f8510f89866eae45cd5727bee271595e67740ef5aaf80f9fc9d3b84d320527890d7a348f2bfe9801eaad4d98facd340489a37234f405c15ab4e64a0f2eb20d0dacaa8beb77998983313ce06b44385b88c1772992f42a835b2f8477118321b2031b0df1da737424b169c3a43c0bc23794cc65d65d352aeff8a50b0593320a0cb2017dcc4f902c5e237a2c2a3b47b9263b7e67512c026ff76981e9c88955135cd86200686841f7caeb4cd82eb1d51575971c7b189609a87c63970447c45b10361908620abfa4a9ed920176ad2a789d731f26398768732f813351e43a38d4c1aa22ff259206914b1dd9aac5d4721fdb7bd736b1f107e72253050b4effd4bd9952da32eef84202b0be3dc9f9196c5f8b5b9c430083d682720651154b29d1778971273eb9dfbcf20566f2db25b5255647988d164c4e2855b689fe5dcf7b1ba37bfa6a3d86accc50320e5b5f78b0b2e08fc3e3cafa9808346704da2f7b7a572dd84ed947e00003266c4f0ef010000000000d4df030000000000202be16af3d9084b18d6ca44050ff46474d888b8c6340db0fbcb7aef9e423794af0a20ef637a9b977e8969503e4fedb8558b0f294268bbaa6a0b24a824ad3c98edcf1e20a8cf073cfe1b08a5ed94a04dc79f16d125b7d4fb4d7ce02f75f412ded9cf9b7920f89ff07faba4299566955c4b9c31fcba99fc5855a229bed7d6487dafd59f1e70202fd161c1b5d03833eb3efb09e530e689ac67ec7d5748246df4891bb9c3f3111b2055e40a53390e839a588904e16fe656676b0c5a7b3ec70bd8dcc2276e70e7600b20b3918be1fd6460dd30daf058e0e516c7046d242642130547f510335a319a98dd20f0737bc518a99c1a619bd87ba82d95dcd8dd19b0836a7dbed514b603f90e7ea820f48e3dfc240d86a64e9adb9c2d276c6f42119e4aaee7598b13f61e4d77390d112062cb92b81afa80226494d92a2120bdd4e9956c48f44f41b1283a59d9fe32e6df20eb5618d7d5699735477bee792b0e1a1ffa3c892fa31b7515b6948d80e3b424b2fe23000000000000f247000000000000"; + let bin_msg = "0100000001000000020f2f73746172636f696e2f74786e2f31112f73746172636f696e2f626c6f636b2f31000120509224b8142926f6c079c66a85ca6db7981734bfe8f9427b3b925574be013f932082b85e25967cd4077f4df26a8975ab34ec6eba954e2c38d2b8393c6c42c2963cc337446077010000fd23000000000000e6f6e9ec5a878e29350b4356e21d63db0020a57516ba50672afe23869529b2d54b9cb95bf6c2ad0982048c5dc1633e567f5620163305561261490852c28f3c1131e4e8d181bea0e1c8552f1ff9f8fbdd10772720cead8e63f08b297df0e6c0e80a15f824d1a6f08ecb6f88021d6f3dc6c31544af0000fa000000000000000000000000000000000000000000000000000000000000000000000016482019990c2875098a829ac4d6db2c78b77e6102d0837920304a14ebb474190a50070150a4f724000000000020cabe94c219acfae4044e8e5c8609a6d98153935e60e18be7f0ca611243714da2000000000000000000000000000000000000000000000000000000000356fcbd20a57516ba50672afe23869529b2d54b9cb95bf6c2ad0982048c5dc1633e567f560c20ed2a8ca4a2972761099903410a9dc0c4607eaec944c41d919c27c57418d2aa592021ee454f8510f89866eae45cd5727bee271595e67740ef5aaf80f9fc9d3b84d320527890d7a348f2bfe9801eaad4d98facd340489a37234f405c15ab4e64a0f2eb20d0dacaa8beb77998983313ce06b44385b88c1772992f42a835b2f8477118321b2031b0df1da737424b169c3a43c0bc23794cc65d65d352aeff8a50b0593320a0cb2017dcc4f902c5e237a2c2a3b47b9263b7e67512c026ff76981e9c88955135cd86200686841f7caeb4cd82eb1d51575971c7b189609a87c63970447c45b10361908620abfa4a9ed920176ad2a789d731f26398768732f813351e43a38d4c1aa22ff259206914b1dd9aac5d4721fdb7bd736b1f107e72253050b4effd4bd9952da32eef84202b0be3dc9f9196c5f8b5b9c430083d682720651154b29d1778971273eb9dfbcf20566f2db25b5255647988d164c4e2855b689fe5dcf7b1ba37bfa6a3d86accc50320e5b5f78b0b2e08fc3e3cafa9808346704da2f7b7a572dd84ed947e00003266c4f0ef010000000000d4df030000000000202be16af3d9084b18d6ca44050ff46474d888b8c6340db0fbcb7aef9e423794af0a20ef637a9b977e8969503e4fedb8558b0f294268bbaa6a0b24a824ad3c98edcf1e20a8cf073cfe1b08a5ed94a04dc79f16d125b7d4fb4d7ce02f75f412ded9cf9b7920f89ff07faba4299566955c4b9c31fcba99fc5855a229bed7d6487dafd59f1e70202fd161c1b5d03833eb3efb09e530e689ac67ec7d5748246df4891bb9c3f3111b2055e40a53390e839a588904e16fe656676b0c5a7b3ec70bd8dcc2276e70e7600b20b3918be1fd6460dd30daf058e0e516c7046d242642130547f510335a319a98dd20f0737bc518a99c1a619bd87ba82d95dcd8dd19b0836a7dbed514b603f90e7ea820f48e3dfc240d86a64e9adb9c2d276c6f42119e4aaee7598b13f61e4d77390d112062cb92b81afa80226494d92a2120bdd4e9956c48f44f41b1283a59d9fe32e6df20eb5618d7d5699735477bee792b0e1a1ffa3c892fa31b7515b6948d80e3b424b2fe23000000000000f247000000000000"; let bytes = hex::decode(bin_msg).unwrap(); let status2 = Status::decode(bytes.as_slice()).unwrap(); assert_eq!(status, status2); diff --git a/network-rpc/api/src/lib.rs b/network-rpc/api/src/lib.rs index dd4b3a909c..f98bc15cf6 100644 --- a/network-rpc/api/src/lib.rs +++ b/network-rpc/api/src/lib.rs @@ -17,7 +17,7 @@ use starcoin_state_tree::StateNode; use starcoin_types::access_path::AccessPath; use starcoin_types::account_address::AccountAddress; use starcoin_types::account_state::AccountState; -use starcoin_types::block::{Block, BlockHeader, BlockInfo, BlockNumber}; +use starcoin_types::block::{BlockHeader, BlockInfo, BlockNumber}; use starcoin_types::transaction::{SignedUserTransaction, Transaction, TransactionInfo}; use starcoin_vm_types::state_store::table::TableInfo; @@ -280,7 +280,13 @@ pub trait NetworkRpc: Sized + Send + Sync + 'static { &self, peer_id: PeerId, ids: Vec, - ) -> BoxFuture>>>; + ) -> BoxFuture>>>; + + fn get_blocks_v1( + &self, + peer_id: PeerId, + ids: Vec, + ) -> BoxFuture>>>; fn get_state_with_table_item_proof( &self, @@ -293,6 +299,12 @@ pub trait NetworkRpc: Sized + Send + Sync + 'static { peer_id: PeerId, request: GetTableInfo, ) -> BoxFuture>>; + + fn get_dag_block_children( + &self, + peer_id: PeerId, + request: Vec, + ) -> BoxFuture>>; } #[derive(Debug, Serialize, Deserialize, Clone)] diff --git a/network-rpc/src/lib.rs b/network-rpc/src/lib.rs index a66f89c7da..526c3dae99 100644 --- a/network-rpc/src/lib.rs +++ b/network-rpc/src/lib.rs @@ -6,7 +6,9 @@ use anyhow::Result; use api_limiter::{ApiLimiters, Quota}; use network_api::{PeerId, RpcInfo}; use network_p2p_core::server::NetworkRpcServer; -use network_p2p_core::{NetRpcError, RawRpcServer, RpcErrorCode}; +use network_p2p_core::NetRpcError; +use network_p2p_core::RawRpcServer; +use network_p2p_core::RpcErrorCode; use network_p2p_types::{OutgoingResponse, ProtocolRequest}; use starcoin_chain_service::ChainReaderService; use starcoin_config::ApiQuotaConfig; @@ -56,7 +58,6 @@ impl NetworkRpcService { ) -> Self { let rpc_impl = NetworkRpcImpl::new(storage, chain_service, txpool_service, state_service); let rpc_server = NetworkRpcServer::new(rpc_impl.to_delegate()); - let limiters = ApiLimiters::new( Into::::into(quotas.default_global_api_quota()).0, quotas diff --git a/network-rpc/src/rpc.rs b/network-rpc/src/rpc.rs index c333341a44..eb6590a91e 100644 --- a/network-rpc/src/rpc.rs +++ b/network-rpc/src/rpc.rs @@ -22,7 +22,6 @@ use starcoin_state_tree::StateNode; use starcoin_storage::Store; use starcoin_txpool::TxPoolService; use starcoin_txpool_api::TxPoolSyncService; -use starcoin_types::block::Block; use starcoin_types::{ account_state::AccountState, block::{BlockHeader, BlockInfo, BlockNumber}, @@ -303,7 +302,31 @@ impl gen_server::NetworkRpc for NetworkRpcImpl { &self, _peer_id: PeerId, ids: Vec, - ) -> BoxFuture>>> { + ) -> BoxFuture>>> { + let chain_service = self.chain_service.clone(); + let fut = async move { + if ids.len() as u64 > MAX_BLOCK_REQUEST_SIZE { + return Err(NetRpcError::client_err(format!( + "max block ids size > {}", + MAX_BLOCK_REQUEST_SIZE + )) + .into()); + } + chain_service.get_blocks(ids).await.map(|blocks| { + blocks + .into_iter() + .map(|opt_block| opt_block.map(|block| block.into())) + .collect() + }) + }; + Box::pin(fut) + } + + fn get_blocks_v1( + &self, + _peer_id: PeerId, + ids: Vec, + ) -> BoxFuture>>> { let chain_service = self.chain_service.clone(); let fut = async move { if ids.len() as u64 > MAX_BLOCK_REQUEST_SIZE { @@ -317,4 +340,14 @@ impl gen_server::NetworkRpc for NetworkRpcImpl { }; Box::pin(fut) } + + fn get_dag_block_children( + &self, + _peer_id: PeerId, + request: Vec, + ) -> BoxFuture>> { + let chain_service = self.chain_service.clone(); + let fut = async move { chain_service.get_dag_block_children(request).await }; + Box::pin(fut) + } } diff --git a/network-rpc/src/tests.rs b/network-rpc/src/tests.rs index 4516051bd8..c6da49ca95 100644 --- a/network-rpc/src/tests.rs +++ b/network-rpc/src/tests.rs @@ -18,14 +18,16 @@ use std::sync::Arc; #[stest::test] fn test_network_rpc() { + // network1 initialization let (handle1, net_addr_1) = { let config_1 = NodeConfig::random_for_test(); let net_addr = config_1.network.self_address(); debug!("First node address: {:?}", net_addr); (gen_chain_env(config_1).unwrap(), net_addr) }; - let network_1 = handle1.network(); + + // network2 initialization let (handle2, peer_id_2) = { let mut config_2 = NodeConfig::random_for_test(); config_2.network.seeds = vec![net_addr_1].into(); diff --git a/network/api/src/messages.rs b/network/api/src/messages.rs index 046fb58e77..8f3cded0ba 100644 --- a/network/api/src/messages.rs +++ b/network/api/src/messages.rs @@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize}; use starcoin_crypto::HashValue; use starcoin_service_registry::ServiceRequest; use starcoin_types::block::BlockInfo; -use starcoin_types::compact_block::CompactBlock; +use starcoin_types::compact_block::{CompactBlock, LegacyCompactBlock}; use starcoin_types::startup_info::ChainInfo; use starcoin_types::transaction::SignedUserTransaction; use std::borrow::Cow; @@ -50,6 +50,32 @@ pub struct CompactBlockMessage { pub block_info: BlockInfo, } +/// The legacy Message of block notification exchanged over network +#[derive(Serialize, Deserialize)] +#[serde(rename = "CompactBlockMessage")] +pub struct LegacyCompactBlockMessage { + pub compact_block: LegacyCompactBlock, + pub block_info: BlockInfo, +} + +impl From for CompactBlockMessage { + fn from(value: LegacyCompactBlockMessage) -> Self { + Self { + compact_block: value.compact_block.into(), + block_info: value.block_info, + } + } +} + +impl From for LegacyCompactBlockMessage { + fn from(value: CompactBlockMessage) -> Self { + Self { + compact_block: value.compact_block.into(), + block_info: value.block_info, + } + } +} + impl CompactBlockMessage { pub fn new(compact_block: CompactBlock, block_info: BlockInfo) -> Self { Self { @@ -57,6 +83,10 @@ impl CompactBlockMessage { block_info, } } + + pub fn is_legacy(&self) -> bool { + self.compact_block.header.is_legacy() + } } impl Sample for CompactBlockMessage { @@ -131,9 +161,10 @@ impl NotificationMessage { TXN_PROTOCOL_NAME => { NotificationMessage::Transactions(TransactionsMessage::decode(bytes)?) } - BLOCK_PROTOCOL_NAME => { - NotificationMessage::CompactBlock(Box::new(CompactBlockMessage::decode(bytes)?)) - } + BLOCK_PROTOCOL_NAME => NotificationMessage::CompactBlock(Box::new( + CompactBlockMessage::decode(bytes) + .or_else(|_| LegacyCompactBlockMessage::decode(bytes).map(Into::into))?, + )), ANNOUNCEMENT_PROTOCOL_NAME => { NotificationMessage::Announcement(Announcement::decode(bytes)?) } @@ -148,7 +179,15 @@ impl NotificationMessage { pub fn encode_notification(&self) -> Result<(Cow<'static, str>, Vec)> { Ok(match self { NotificationMessage::Transactions(msg) => (TXN_PROTOCOL_NAME.into(), msg.encode()?), - NotificationMessage::CompactBlock(msg) => (BLOCK_PROTOCOL_NAME.into(), msg.encode()?), + NotificationMessage::CompactBlock(msg) => ( + BLOCK_PROTOCOL_NAME.into(), + if msg.is_legacy() { + let legacy = Into::::into(*msg.clone()); + legacy.encode() + } else { + msg.encode() + }?, + ), NotificationMessage::Announcement(msg) => { (ANNOUNCEMENT_PROTOCOL_NAME.into(), msg.encode()?) } diff --git a/network/api/src/peer_provider.rs b/network/api/src/peer_provider.rs index 0987895bbf..e6df5e6201 100644 --- a/network/api/src/peer_provider.rs +++ b/network/api/src/peer_provider.rs @@ -14,6 +14,7 @@ use rand::prelude::SliceRandom; use rand::Rng; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; +use starcoin_logger::prelude::info; use starcoin_types::block::BlockHeader; use starcoin_types::U256; use std::borrow::Cow; @@ -280,8 +281,10 @@ impl PeerSelector { peers }); if best_peers.is_empty() || best_peers[0].total_difficulty() <= min_difficulty { + info!("best peer difficulty {:?} is smaller than min difficulty {:?}, return None", best_peers[0].total_difficulty(), min_difficulty); None } else { + info!("best peer difficulty {:?}, info: {:?} picked", best_peers[0].total_difficulty(), best_peers); Some(best_peers) } } @@ -300,8 +303,10 @@ impl PeerSelector { .map(|peer| peer.peer_info().clone()) .collect(); if betters.is_empty() { + info!("no betters found for syn"); None } else { + info!("betters found: {:?}", betters); Some(betters) } } diff --git a/network/src/network_p2p_handle.rs b/network/src/network_p2p_handle.rs index 0c58124c82..40df900496 100644 --- a/network/src/network_p2p_handle.rs +++ b/network/src/network_p2p_handle.rs @@ -10,12 +10,38 @@ use network_p2p::business_layer_handle::HandshakeResult; use network_p2p::{business_layer_handle::BusinessLayerHandle, protocol::rep, PeerId}; use sc_peerset::ReputationChange; use serde::{Deserialize, Serialize}; -use starcoin_types::startup_info::{ChainInfo, ChainStatus}; +use starcoin_types::startup_info::{ChainInfo, ChainStatus, OldChainInfo}; /// Current protocol version. -pub(crate) const CURRENT_VERSION: u32 = 5; +pub(crate) const CURRENT_VERSION: u32 = 6; /// Lowest version we support pub(crate) const MIN_VERSION: u32 = 3; +#[derive(Deserialize, Serialize)] +#[serde(rename = "Status")] +pub struct LegacyStatus { + /// Protocol version. + pub version: u32, + /// Minimum supported version. + pub min_supported_version: u32, + /// Tell other peer which notification protocols we support. + pub notif_protocols: Vec>, + /// Tell other peer which rpc api we support. + pub rpc_protocols: Vec>, + /// the generic data related to the peer + pub info: OldChainInfo, +} + +impl From for Status { + fn from(value: LegacyStatus) -> Self { + Self { + version: value.version, + min_supported_version: value.min_supported_version, + notif_protocols: value.notif_protocols, + rpc_protocols: value.rpc_protocols, + info: value.info.into(), + } + } +} /// Status sent on connection. #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] @@ -95,11 +121,14 @@ impl BusinessLayerHandle for Networkp2pHandle { received_handshake: Vec, ) -> Result { match Status::decode(&received_handshake[..]) { - std::result::Result::Ok(status) => self.inner_handshake(peer_id, status), - Err(err) => { - error!(target: "network-p2p", "Couldn't decode handshake packet sent by {}: {:?}: {}", peer_id, hex::encode(received_handshake), err); - Err(rep::BAD_MESSAGE) - } + Result::Ok(status) => self.inner_handshake(peer_id, status), + Err(err) => match LegacyStatus::decode(&received_handshake[..]) { + Result::Ok(s5) => self.inner_handshake(peer_id, s5.into()), + Err(err_inner) => { + error!(target: "network-p2p", "Couldn't decode handshake packet sent by {}: {:?}: {}, {}", peer_id, hex::encode(received_handshake), err_inner, err); + Err(rep::BAD_MESSAGE) + } + }, } } diff --git a/network/tests/network_node_test.rs b/network/tests/network_node_test.rs index e17b9e94ae..c70ef5af26 100644 --- a/network/tests/network_node_test.rs +++ b/network/tests/network_node_test.rs @@ -35,7 +35,7 @@ fn test_reconnected_peers() -> anyhow::Result<()> { // stop node2, node1's peers is empty node2.stop()?; - thread::sleep(Duration::from_secs(3)); + thread::sleep(Duration::from_secs(12)); loop { let network_state = block_on(async { node1_network.network_state().await })?; debug!("network_state: {:?}", network_state); diff --git a/node/Cargo.toml b/node/Cargo.toml index b224e087c0..f76ab1986a 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -48,6 +48,8 @@ thiserror = { workspace = true } timeout-join-handler = { workspace = true } tokio = { features = ["full"], workspace = true } num_cpus = { workspace = true } +starcoin-dag = { workspace = true } +starcoin-chain-api = { workspace = true } [dev-dependencies] stest = { workspace = true } diff --git a/node/src/lib.rs b/node/src/lib.rs index 3c52be3b13..d5d369a7d7 100644 --- a/node/src/lib.rs +++ b/node/src/lib.rs @@ -8,6 +8,7 @@ use futures::executor::block_on; use futures_timer::Delay; use starcoin_chain_service::{ChainAsyncService, ChainReaderService}; use starcoin_config::{BaseConfig, NodeConfig, StarcoinOpt}; +use starcoin_dag::blockdag::BlockDAG; use starcoin_genesis::Genesis; use starcoin_logger::prelude::*; use starcoin_network::NetworkServiceRef; @@ -17,10 +18,11 @@ use starcoin_node_api::node_service::NodeAsyncService; use starcoin_rpc_server::service::RpcService; use starcoin_service_registry::bus::{Bus, BusService}; use starcoin_service_registry::{RegistryAsyncService, RegistryService, ServiceInfo, ServiceRef}; -use starcoin_storage::Storage; -use starcoin_sync::sync::SyncService; +use starcoin_storage::{BlockStore, Storage}; +use starcoin_sync::sync::{CheckSyncEvent, SyncService}; use starcoin_txpool::TxPoolService; use starcoin_types::block::Block; +use starcoin_types::block::BlockNumber; use starcoin_types::system_events::{GenerateBlockEvent, NewHeadBlock}; use std::sync::Arc; use std::time::Duration; @@ -175,8 +177,14 @@ impl NodeHandle { .expect("TxPoolService must exist.") } + pub fn get_dag(&self) -> Result { + self.registry + .get_shared_sync::() + .map_err(|e| format_err!("Get BlockDAG error: {:?}", e)) + } + /// Just for test - pub fn generate_block(&self) -> Result { + pub fn generate_block(&self) -> Result<(Block, bool)> { let registry = &self.registry; block_on(async move { let bus = registry.service_ref::().await?; @@ -186,11 +194,11 @@ impl NodeHandle { let receiver = bus.oneshot::().await?; bus.broadcast(GenerateBlockEvent::new_break(true))?; let block = if let Ok(Ok(event)) = - async_std::future::timeout(Duration::from_secs(5), receiver).await + async_std::future::timeout(Duration::from_secs(20), receiver).await { //wait for new block event to been processed. Delay::new(Duration::from_millis(100)).await; - event.0.block().clone() + event.executed_block.block().clone() } else { let latest_head = chain_service.main_head_block().await?; debug!( @@ -204,9 +212,18 @@ impl NodeHandle { bail!("Wait timeout for generate_block") } }; - Ok(block) + + let is_dag_block = chain_service.dag_fork_number().await? < block.header().number(); + Ok((block, is_dag_block)) }) } + + pub async fn start_to_sync(&self) -> Result<()> { + let registry = &self.registry; + let sync_service = registry.service_ref::().await?; + sync_service.notify(CheckSyncEvent::default()).expect("failed to start to sync"); + Ok(()) + } } pub fn run_node_by_opt( diff --git a/node/src/node.rs b/node/src/node.rs index fd3e7fcf77..cef0b05ad4 100644 --- a/node/src/node.rs +++ b/node/src/node.rs @@ -16,7 +16,9 @@ use starcoin_account_service::{AccountEventService, AccountService, AccountStora use starcoin_block_relayer::BlockRelayer; use starcoin_chain_notify::ChainNotifyHandlerService; use starcoin_chain_service::ChainReaderService; +use starcoin_config::genesis_config::G_BASE_MAX_UNCLES_PER_BLOCK; use starcoin_config::NodeConfig; +use starcoin_dag::block_dag_config::{BlockDAGConfigMock, BlockDAGType}; use starcoin_genesis::{Genesis, GenesisError}; use starcoin_logger::prelude::*; use starcoin_logger::structured_log::init_slog_logger; @@ -51,7 +53,9 @@ use starcoin_sync::block_connector::{BlockConnectorService, ExecuteRequest, Rese use starcoin_sync::sync::SyncService; use starcoin_sync::txn_sync::TxnSyncService; use starcoin_sync::verified_rpc_client::VerifiedRpcClient; -use starcoin_txpool::TxPoolActorService; +use starcoin_txpool::{TxPoolActorService, TxPoolService}; +use starcoin_types::block::TEST_FLEXIDAG_FORK_HEIGHT_FOR_DAG; +use starcoin_types::blockhash::KType; use starcoin_types::system_events::{SystemShutdown, SystemStarted}; use starcoin_vm_runtime::metrics::VMMetrics; use std::sync::Arc; @@ -133,7 +137,9 @@ impl ServiceHandler for NodeService { .start_service_sync(GenerateBlockEventPacemaker::service_name()), ), NodeRequest::ResetNode(block_hash) => { - let connect_service = ctx.service_ref::()?.clone(); + let connect_service = ctx + .service_ref::>()? + .clone(); let fut = async move { info!("Prepare to reset node startup info to {}", block_hash); connect_service.send(ResetRequest { block_hash }).await? @@ -147,7 +153,9 @@ impl ServiceHandler for NodeService { .get_shared_sync::>() .expect("Storage must exist."); - let connect_service = ctx.service_ref::()?.clone(); + let connect_service = ctx + .service_ref::>()? + .clone(); let network = ctx.get_shared::()?; let fut = async move { info!("Prepare to re execute block {}", block_hash); @@ -311,9 +319,25 @@ impl NodeService { let upgrade_time = SystemTime::now().duration_since(start_time)?; let storage = Arc::new(Storage::new(storage_instance)?); registry.put_shared(storage.clone()).await?; + let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + config.storage.dag_dir(), + config.storage.clone().into(), + )?; + let dag = match config.base().net().id() { + starcoin_config::ChainNetworkID::Builtin(starcoin_config::BuiltinNetworkID::Test) => { + starcoin_dag::blockdag::BlockDAG::new_with_type( + 8, + dag_storage.clone(), + BlockDAGType::BlockDAGTestMock(BlockDAGConfigMock { + fork_number: TEST_FLEXIDAG_FORK_HEIGHT_FOR_DAG, + }), + ) + }, + _ => starcoin_dag::blockdag::BlockDAG::new(8, dag_storage.clone()), + }; + registry.put_shared(dag.clone()).await?; let (chain_info, genesis) = - Genesis::init_and_check_storage(config.net(), storage.clone(), config.data_dir())?; - + Genesis::init_and_check_storage(config.net(), storage.clone(), dag, config.data_dir())?; info!( "Start node with chain info: {}, number {} upgrade_time cost {} secs, ", chain_info, @@ -347,7 +371,9 @@ impl NodeService { registry.register::().await?; - registry.register::().await?; + registry + .register::>() + .await?; registry.register::().await?; let block_relayer = registry.register::().await?; diff --git a/rpc/api/Cargo.toml b/rpc/api/Cargo.toml index 650459e6ae..f9cff48e31 100644 --- a/rpc/api/Cargo.toml +++ b/rpc/api/Cargo.toml @@ -47,6 +47,8 @@ starcoin-vm-types = { workspace = true } thiserror = { workspace = true } vm-status-translator = { workspace = true } move-core-types = { workspace = true } +starcoin-flexidag = { workspace = true } +starcoin-dag = { workspace = true } [package] authors = { workspace = true } diff --git a/rpc/api/generated_rpc_schema/chain.json b/rpc/api/generated_rpc_schema/chain.json index b1fffe46bb..8bda677a51 100644 --- a/rpc/api/generated_rpc_schema/chain.json +++ b/rpc/api/generated_rpc_schema/chain.json @@ -220,6 +220,17 @@ "type": "string", "format": "HashValue" }, + "parents_hash": { + "description": "block parents", + "type": [ + "array", + "null" + ], + "items": { + "type": "string", + "format": "HashValue" + } + }, "state_root": { "description": "The last transaction state_root of this block after execute.", "type": "string", @@ -652,6 +663,17 @@ "type": "string", "format": "HashValue" }, + "parents_hash": { + "description": "block parents", + "type": [ + "array", + "null" + ], + "items": { + "type": "string", + "format": "HashValue" + } + }, "state_root": { "description": "The last transaction state_root of this block after execute.", "type": "string", @@ -769,6 +791,17 @@ "type": "string", "format": "HashValue" }, + "parents_hash": { + "description": "block parents", + "type": [ + "array", + "null" + ], + "items": { + "type": "string", + "format": "HashValue" + } + }, "state_root": { "description": "The last transaction state_root of this block after execute.", "type": "string", @@ -1203,6 +1236,17 @@ "type": "string", "format": "HashValue" }, + "parents_hash": { + "description": "block parents", + "type": [ + "array", + "null" + ], + "items": { + "type": "string", + "format": "HashValue" + } + }, "state_root": { "description": "The last transaction state_root of this block after execute.", "type": "string", @@ -1320,6 +1364,17 @@ "type": "string", "format": "HashValue" }, + "parents_hash": { + "description": "block parents", + "type": [ + "array", + "null" + ], + "items": { + "type": "string", + "format": "HashValue" + } + }, "state_root": { "description": "The last transaction state_root of this block after execute.", "type": "string", @@ -1762,6 +1817,17 @@ "type": "string", "format": "HashValue" }, + "parents_hash": { + "description": "block parents", + "type": [ + "array", + "null" + ], + "items": { + "type": "string", + "format": "HashValue" + } + }, "state_root": { "description": "The last transaction state_root of this block after execute.", "type": "string", @@ -1879,6 +1945,17 @@ "type": "string", "format": "HashValue" }, + "parents_hash": { + "description": "block parents", + "type": [ + "array", + "null" + ], + "items": { + "type": "string", + "format": "HashValue" + } + }, "state_root": { "description": "The last transaction state_root of this block after execute.", "type": "string", @@ -2102,6 +2179,16 @@ "type": "string", "format": "HashValue" }, + "parents_hash": { + "type": [ + "array", + "null" + ], + "items": { + "type": "string", + "format": "HashValue" + } + }, "timestamp": { "type": "string" }, @@ -3276,6 +3363,17 @@ "type": "string", "format": "HashValue" }, + "parents_hash": { + "description": "block parents", + "type": [ + "array", + "null" + ], + "items": { + "type": "string", + "format": "HashValue" + } + }, "state_root": { "description": "The last transaction state_root of this block after execute.", "type": "string", diff --git a/rpc/api/generated_rpc_schema/node.json b/rpc/api/generated_rpc_schema/node.json index 84844f78cf..36c697dc04 100644 --- a/rpc/api/generated_rpc_schema/node.json +++ b/rpc/api/generated_rpc_schema/node.json @@ -349,6 +349,17 @@ "type": "string", "format": "HashValue" }, + "parents_hash": { + "description": "block parents", + "type": [ + "array", + "null" + ], + "items": { + "type": "string", + "format": "HashValue" + } + }, "state_root": { "description": "The last transaction state_root of this block after execute.", "type": "string", @@ -591,6 +602,17 @@ "type": "string", "format": "HashValue" }, + "parents_hash": { + "description": "block parents", + "type": [ + "array", + "null" + ], + "items": { + "type": "string", + "format": "HashValue" + } + }, "state_root": { "description": "The last transaction state_root of this block after execute.", "type": "string", diff --git a/rpc/api/generated_rpc_schema/sync_manager.json b/rpc/api/generated_rpc_schema/sync_manager.json index 746a012e69..0288d0b53f 100644 --- a/rpc/api/generated_rpc_schema/sync_manager.json +++ b/rpc/api/generated_rpc_schema/sync_manager.json @@ -113,6 +113,17 @@ "type": "string", "format": "HashValue" }, + "parents_hash": { + "description": "Parents hash.", + "type": [ + "array", + "null" + ], + "items": { + "type": "string", + "format": "HashValue" + } + }, "state_root": { "description": "The last transaction state_root of this block after execute.", "type": "string", diff --git a/rpc/api/src/chain/mod.rs b/rpc/api/src/chain/mod.rs index 6901020caf..088e855243 100644 --- a/rpc/api/src/chain/mod.rs +++ b/rpc/api/src/chain/mod.rs @@ -13,6 +13,7 @@ use openrpc_derive::openrpc; use schemars::{self, JsonSchema}; use serde::{Deserialize, Serialize}; use starcoin_crypto::HashValue; +use starcoin_dag::consensusdb::consenses_state::DagStateView; use starcoin_types::block::BlockNumber; use starcoin_vm_types::access_path::AccessPath; @@ -122,6 +123,10 @@ pub trait ChainApi { event_index: Option, access_path: Option>, ) -> FutureResult>>>; + + /// Get the state of a dag. + #[rpc(name = "chain.get_dag_state")] + fn get_dag_state(&self) -> FutureResult; } #[derive(Copy, Clone, Default, Serialize, Deserialize, JsonSchema)] diff --git a/rpc/api/src/types.rs b/rpc/api/src/types.rs index 532a140998..ccba465351 100644 --- a/rpc/api/src/types.rs +++ b/rpc/api/src/types.rs @@ -24,7 +24,7 @@ use starcoin_resource_viewer::{AnnotatedMoveStruct, AnnotatedMoveValue}; use starcoin_service_registry::ServiceRequest; use starcoin_state_api::{StateProof, StateWithProof, StateWithTableItemProof}; use starcoin_types::block::{ - Block, BlockBody, BlockHeader, BlockHeaderExtra, BlockInfo, BlockNumber, + Block, BlockBody, BlockHeader, BlockHeaderExtra, BlockInfo, BlockNumber, ParentsHash, }; use starcoin_types::contract_event::{ContractEvent, ContractEventInfo}; use starcoin_types::event::EventKey; @@ -433,6 +433,8 @@ pub struct BlockHeaderView { pub nonce: u32, /// block header extra pub extra: BlockHeaderExtra, + /// block parents + pub parents_hash: ParentsHash, } impl From for BlockHeaderView { @@ -453,6 +455,7 @@ impl From for BlockHeaderView { chain_id: origin.chain_id().id(), nonce: origin.nonce(), extra: *origin.extra(), + parents_hash: origin.parents_hash(), } } } @@ -473,6 +476,7 @@ impl From for BlockHeader { genesis_config::ChainId::new(header_view.chain_id), header_view.nonce, header_view.extra, + header_view.parents_hash, ) } } @@ -664,6 +668,7 @@ pub struct BlockMetadataView { pub number: StrView, pub chain_id: u8, pub parent_gas_used: StrView, + pub parents_hash: Option>, } impl From for BlockMetadataView { @@ -677,6 +682,7 @@ impl From for BlockMetadataView { number, chain_id, parent_gas_used, + parents_hash, ) = origin.into_inner(); BlockMetadataView { parent_hash, @@ -687,6 +693,7 @@ impl From for BlockMetadataView { number: number.into(), chain_id: chain_id.id(), parent_gas_used: parent_gas_used.into(), + parents_hash, } } } @@ -703,8 +710,9 @@ impl Into for BlockMetadataView { number, chain_id, parent_gas_used, + parents_hash, } = self; - BlockMetadata::new( + BlockMetadata::new_with_parents( parent_hash, timestamp.0, author, @@ -713,6 +721,7 @@ impl Into for BlockMetadataView { number.0, genesis_config::ChainId::new(chain_id), parent_gas_used.0, + parents_hash.unwrap_or_default(), ) } } diff --git a/rpc/client/Cargo.toml b/rpc/client/Cargo.toml index fe2ea529d2..154579a250 100644 --- a/rpc/client/Cargo.toml +++ b/rpc/client/Cargo.toml @@ -45,6 +45,7 @@ starcoin-types = { workspace = true } starcoin-vm-types = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } +starcoin-dag = { workspace = true } [dev-dependencies] starcoin-config = { workspace = true } diff --git a/rpc/client/src/lib.rs b/rpc/client/src/lib.rs index 7b610fa1bd..1e3dcc2fa5 100644 --- a/rpc/client/src/lib.rs +++ b/rpc/client/src/lib.rs @@ -21,6 +21,7 @@ use serde_json::Value; use starcoin_abi_types::{FunctionABI, ModuleABI, StructInstantiation}; use starcoin_account_api::AccountInfo; use starcoin_crypto::HashValue; +use starcoin_dag::consensusdb::consenses_state::DagStateView; use starcoin_logger::{prelude::*, LogPattern}; use starcoin_rpc_api::chain::{ GetBlockOption, GetBlocksOption, GetEventOption, GetTransactionOption, @@ -785,6 +786,11 @@ impl RpcClient { .map_err(map_err) } + pub fn get_dag_state(&self) -> anyhow::Result { + self.call_rpc_blocking(|inner| inner.chain_client.get_dag_state()) + .map_err(map_err) + } + pub fn chain_get_blocks_by_number( &self, number: Option, diff --git a/rpc/server/Cargo.toml b/rpc/server/Cargo.toml index c3bd9b4d3d..e048b0acd5 100644 --- a/rpc/server/Cargo.toml +++ b/rpc/server/Cargo.toml @@ -67,6 +67,7 @@ starcoin-vm-types = { workspace = true } thiserror = { workspace = true } vm-status-translator = { workspace = true } starcoin-vm-runtime = { workspace = true } +starcoin-dag = { workspace = true } [dev-dependencies] starcoin-chain-mock = { workspace = true } diff --git a/rpc/server/src/module/chain_rpc.rs b/rpc/server/src/module/chain_rpc.rs index 3544155169..c0177e5dbb 100644 --- a/rpc/server/src/module/chain_rpc.rs +++ b/rpc/server/src/module/chain_rpc.rs @@ -7,6 +7,7 @@ use starcoin_abi_decoder::decode_txn_payload; use starcoin_chain_service::ChainAsyncService; use starcoin_config::NodeConfig; use starcoin_crypto::HashValue; +use starcoin_dag::consensusdb::consenses_state::DagStateView; use starcoin_logger::prelude::*; use starcoin_resource_viewer::MoveValueAnnotator; use starcoin_rpc_api::chain::{ @@ -469,6 +470,14 @@ where Box::pin(fut.boxed()) } + + #[doc = r" Get the state of a dag."] + fn get_dag_state(&self) -> FutureResult { + let service = self.service.clone(); + let fut = async move { service.get_dag_state().await }.map_err(map_err); + + Box::pin(fut.boxed()) + } } fn try_decode_block_txns(state: &dyn StateView, block: &mut BlockView) -> anyhow::Result<()> { diff --git a/rpc/server/src/module/pubsub/tests.rs b/rpc/server/src/module/pubsub/tests.rs index fc5d74cc7d..a1cfa655d4 100644 --- a/rpc/server/src/module/pubsub/tests.rs +++ b/rpc/server/src/module/pubsub/tests.rs @@ -34,11 +34,12 @@ pub async fn test_subscribe_to_events() -> Result<()> { starcoin_logger::init_for_test(); // prepare - let (_txpool_service, storage, config, _, registry) = + let (_txpool_service, storage, config, _, registry, dag) = test_helper::start_txpool_with_miner(1000, true).await; let startup_info = storage.get_startup_info()?.unwrap(); let net = config.net(); - let mut block_chain = BlockChain::new(net.time_service(), startup_info.main, storage, None)?; + let mut block_chain = + BlockChain::new(net.time_service(), startup_info.main, storage, None, dag)?; let miner_account = AccountInfo::random(); let pri_key = Ed25519PrivateKey::genesis(); @@ -61,6 +62,7 @@ pub async fn test_subscribe_to_events() -> Result<()> { vec![txn.clone()], vec![], None, + None, )?; debug!("block_template: gas_used: {}", block_template.gas_used); let new_block = block_chain @@ -109,7 +111,9 @@ pub async fn test_subscribe_to_events() -> Result<()> { // send block let block_detail = Arc::new(executed_block); - bus.broadcast(NewHeadBlock(block_detail))?; + bus.broadcast(NewHeadBlock { + executed_block: block_detail.clone(), + })?; let mut receiver = receiver; @@ -133,7 +137,7 @@ pub async fn test_subscribe_to_events() -> Result<()> { #[stest::test] pub async fn test_subscribe_to_pending_transactions() -> Result<()> { // given - let (txpool_service, _, config, _, registry) = + let (txpool_service, _, config, _, registry, _dag) = test_helper::start_txpool_with_miner(1000, true).await; let service = registry .register_by_factory::() @@ -193,7 +197,8 @@ pub async fn test_subscribe_to_pending_transactions() -> Result<()> { #[stest::test] pub async fn test_subscribe_to_mint_block() -> Result<()> { - let (_txpool_service, .., registry) = test_helper::start_txpool_with_miner(1000, true).await; + let (_txpool_service, .., registry, _dag) = + test_helper::start_txpool_with_miner(1000, true).await; let bus = registry.service_ref::().await?; let service = registry .register_by_factory::() diff --git a/scripts/release.sh b/scripts/release.sh index 4c00db7e72..4f6cdfd880 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -2,13 +2,6 @@ rm -rf starcoin-artifacts/* mkdir -p starcoin-artifacts/ cp -v target/release/starcoin starcoin-artifacts/ -cp -v target/release/starcoin_miner starcoin-artifacts/ -cp -v target/release/starcoin_generator starcoin-artifacts/ -cp -v target/release/mpm starcoin-artifacts/ -cp -v target/release/starcoin_db_exporter starcoin-artifacts/ -cp -v scripts/import_block.sh starcoin-artifacts/ -cp -v scripts/import_snapshot.sh starcoin-artifacts/ -cp -v scripts/verify_header.sh starcoin-artifacts/ cp -v README.md starcoin-artifacts/ if [ "$1" == "windows-latest" ]; then 7z a -r starcoin-$1.zip starcoin-artifacts diff --git a/scripts/shell.nix b/scripts/shell.nix new file mode 100644 index 0000000000..83273b9a06 --- /dev/null +++ b/scripts/shell.nix @@ -0,0 +1,9 @@ +{ pkgs ? import {} }: + +pkgs.mkShell { + buildInputs = [ + pkgs.openssl + pkgs.pkg-config + pkgs.protobuf + ]; +} diff --git a/state/service/src/service.rs b/state/service/src/service.rs index f54738a1e8..42106f9470 100644 --- a/state/service/src/service.rs +++ b/state/service/src/service.rs @@ -131,9 +131,7 @@ impl ServiceHandler for ChainStateService { impl EventHandler for ChainStateService { fn handle_event(&mut self, msg: NewHeadBlock, _ctx: &mut ServiceContext) { - let NewHeadBlock(block) = msg; - - let state_root = block.header().state_root(); + let state_root = msg.executed_block.header().state_root(); debug!("ChainStateActor change StateRoot to : {:?}", state_root); self.service.change_root(state_root); } @@ -269,13 +267,13 @@ mod tests { use starcoin_config::NodeConfig; use starcoin_service_registry::{RegistryAsyncService, RegistryService}; use starcoin_state_api::ChainStateAsyncService; - use starcoin_types::account_config::genesis_address; + use starcoin_types::{account_config::genesis_address, block::TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH}; #[stest::test] async fn test_actor_launch() -> Result<()> { let config = Arc::new(NodeConfig::random_for_test()); - let (storage, _startup_info, _) = - test_helper::Genesis::init_storage_for_test(config.net())?; + let (storage, _startup_info, _, _) = + test_helper::Genesis::init_storage_for_test(config.net(), TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH)?; let registry = RegistryService::launch(); registry.put_shared(config).await?; registry.put_shared(storage).await?; diff --git a/storage/src/batch/mod.rs b/storage/src/batch/mod.rs index 60e463274e..562ed71ae1 100644 --- a/storage/src/batch/mod.rs +++ b/storage/src/batch/mod.rs @@ -5,29 +5,31 @@ use crate::storage::{CodecWriteBatch, KeyCodec, ValueCodec, WriteOp}; use anyhow::Result; use std::convert::TryFrom; +pub type WriteBatch = GWriteBatch, Vec>; + #[derive(Debug, Default, Clone)] -pub struct WriteBatch { - pub rows: Vec<(Vec, WriteOp>)>, +pub struct GWriteBatch { + pub rows: Vec<(K, WriteOp)>, } -impl WriteBatch { +impl GWriteBatch { /// Creates an empty batch. pub fn new() -> Self { Self::default() } - pub fn new_with_rows(rows: Vec<(Vec, WriteOp>)>) -> Self { + pub fn new_with_rows(rows: Vec<(K, WriteOp)>) -> Self { Self { rows } } /// Adds an insert/update operation to the batch. - pub fn put(&mut self, key: Vec, value: Vec) -> Result<()> { + pub fn put(&mut self, key: K, value: V) -> Result<()> { self.rows.push((key, WriteOp::Value(value))); Ok(()) } /// Adds a delete operation to the batch. - pub fn delete(&mut self, key: Vec) -> Result<()> { + pub fn delete(&mut self, key: K) -> Result<()> { self.rows.push((key, WriteOp::Deletion)); Ok(()) } diff --git a/storage/src/block/mod.rs b/storage/src/block/mod.rs index 9b2f162ba6..5549f16825 100644 --- a/storage/src/block/mod.rs +++ b/storage/src/block/mod.rs @@ -1,10 +1,14 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::define_storage; -use crate::storage::{CodecKVStore, StorageInstance, ValueCodec}; use crate::{ - BLOCK_BODY_PREFIX_NAME, BLOCK_HEADER_PREFIX_NAME, BLOCK_PREFIX_NAME, - BLOCK_TRANSACTIONS_PREFIX_NAME, BLOCK_TRANSACTION_INFOS_PREFIX_NAME, FAILED_BLOCK_PREFIX_NAME, + define_storage, + storage::{ + CodecKVStore, CodecWriteBatch, ColumnFamily, KeyCodec, SchemaStorage, StorageInstance, + ValueCodec, + }, + BLOCK_BODY_PREFIX_NAME, BLOCK_HEADER_PREFIX_NAME, BLOCK_HEADER_PREFIX_NAME_V2, + BLOCK_PREFIX_NAME, BLOCK_PREFIX_NAME_V2, BLOCK_TRANSACTIONS_PREFIX_NAME, + BLOCK_TRANSACTION_INFOS_PREFIX_NAME, FAILED_BLOCK_PREFIX_NAME, FAILED_BLOCK_PREFIX_NAME_V2, }; use anyhow::{bail, Result}; use bcs_ext::{BCSCodec, Sample}; @@ -12,7 +16,7 @@ use network_p2p_types::peer_id::PeerId; use serde::{Deserialize, Serialize}; use starcoin_crypto::HashValue; use starcoin_logger::prelude::*; -use starcoin_types::block::{Block, BlockBody, BlockHeader}; +use starcoin_types::block::{Block, BlockBody, BlockHeader, LegacyBlock, LegacyBlockHeader}; #[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] pub struct OldFailedBlock { @@ -46,6 +50,37 @@ pub struct FailedBlock { version: String, } +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename(deserialize = "FailedBlock"))] +pub struct OldFailedBlockV2 { + block: LegacyBlock, + peer_id: Option, + failed: String, + version: String, +} + +impl From for FailedBlock { + fn from(value: OldFailedBlockV2) -> Self { + Self { + block: value.block.into(), + peer_id: value.peer_id, + failed: value.failed, + version: value.version, + } + } +} + +impl From for OldFailedBlockV2 { + fn from(value: FailedBlock) -> Self { + Self { + block: value.block.into(), + peer_id: value.peer_id, + failed: value.failed, + version: value.version, + } + } +} + #[allow(clippy::from_over_into)] impl Into<(Block, Option, String, String)> for FailedBlock { fn into(self) -> (Block, Option, String, String) { @@ -75,19 +110,44 @@ impl Sample for FailedBlock { } } -define_storage!(BlockInnerStorage, HashValue, Block, BLOCK_PREFIX_NAME); +impl FailedBlock { + pub fn random() -> Self { + Self { + block: Block::random(), + peer_id: Some(PeerId::random()), + failed: "Unknown reason".to_string(), + version: "Unknown version".to_string(), + } + } +} + +define_storage!(BlockInnerStorage, HashValue, Block, BLOCK_PREFIX_NAME_V2); define_storage!( BlockHeaderStorage, HashValue, BlockHeader, + BLOCK_HEADER_PREFIX_NAME_V2 +); +define_storage!( + OldBlockInnerStorage, + HashValue, + LegacyBlock, + BLOCK_PREFIX_NAME +); +define_storage!( + OldBlockHeaderStorage, + HashValue, + LegacyBlockHeader, BLOCK_HEADER_PREFIX_NAME ); + define_storage!( BlockBodyStorage, HashValue, BlockBody, BLOCK_BODY_PREFIX_NAME ); + define_storage!( BlockTransactionsStorage, HashValue, @@ -100,10 +160,18 @@ define_storage!( Vec, BLOCK_TRANSACTION_INFOS_PREFIX_NAME ); + define_storage!( FailedBlockStorage, HashValue, FailedBlock, + FAILED_BLOCK_PREFIX_NAME_V2 +); + +define_storage!( + OldFailedBlockStorage, + HashValue, + OldFailedBlockV2, FAILED_BLOCK_PREFIX_NAME ); @@ -137,6 +205,36 @@ impl ValueCodec for BlockHeader { } } +impl ValueCodec for LegacyBlock { + fn encode_value(&self) -> Result> { + self.encode() + } + + fn decode_value(data: &[u8]) -> Result { + Self::decode(data) + } +} + +impl ValueCodec for LegacyBlockHeader { + fn encode_value(&self) -> Result> { + self.encode() + } + + fn decode_value(data: &[u8]) -> Result { + Self::decode(data) + } +} + +impl ValueCodec for Vec { + fn encode_value(&self) -> Result> { + self.encode() + } + + fn decode_value(data: &[u8]) -> Result { + Self::decode(data) + } +} + impl ValueCodec for BlockBody { fn encode_value(&self) -> Result> { self.encode() @@ -166,6 +264,16 @@ impl ValueCodec for FailedBlock { } } +impl ValueCodec for OldFailedBlockV2 { + fn encode_value(&self) -> Result> { + self.encode() + } + + fn decode_value(data: &[u8]) -> Result { + Self::decode(data) + } +} + impl BlockStorage { pub fn new(instance: StorageInstance) -> Self { BlockStorage { @@ -314,4 +422,88 @@ impl BlockStorage { self.failed_block_storage .put_raw(block_id, old_block.encode_value()?) } + + fn upgrade_store( + old_store: T1, + store: T2, + batch_size: usize, + ) -> Result + where + K: KeyCodec + Copy, + V1: ValueCodec + Into, + V2: ValueCodec, + T1: SchemaStorage + ColumnFamily, + T2: SchemaStorage + ColumnFamily, + { + let mut total_size: usize = 0; + let mut old_iter = old_store.iter()?; + old_iter.seek_to_first(); + + let mut to_delete = Some(CodecWriteBatch::new()); + let mut to_put = Some(CodecWriteBatch::new()); + let mut item_count = 0; + + for item in old_iter { + let (id, old_block) = item?; + let block: V2 = old_block.into(); + to_delete + .as_mut() + .unwrap() + .delete(id) + .expect("should never fail"); + to_put + .as_mut() + .unwrap() + .put(id, block) + .expect("should never fail"); + + item_count += 1; + if item_count == batch_size { + total_size = total_size.saturating_add(item_count); + item_count = 0; + old_store + .write_batch(to_delete.take().unwrap()) + .expect("should never fail"); + store + .write_batch(to_put.take().unwrap()) + .expect("should never fail"); + + to_delete = Some(CodecWriteBatch::new()); + to_put = Some(CodecWriteBatch::new()); + } + } + if item_count != 0 { + total_size = total_size.saturating_add(item_count); + old_store + .write_batch(to_delete.take().unwrap()) + .expect("should never fail"); + store + .write_batch(to_put.take().unwrap()) + .expect("should never fail"); + } + + Ok(total_size) + } + + pub fn upgrade_block_header(instance: StorageInstance) -> Result<()> { + const BATCH_SIZE: usize = 1000usize; + + let old_header_store = OldBlockHeaderStorage::new(instance.clone()); + let header_store = BlockHeaderStorage::new(instance.clone()); + let total_size = Self::upgrade_store(old_header_store, header_store, BATCH_SIZE)?; + info!("upgraded {total_size} block headers"); + + let old_block_store = OldBlockInnerStorage::new(instance.clone()); + let block_store = BlockInnerStorage::new(instance.clone()); + let total_blocks = Self::upgrade_store(old_block_store, block_store, BATCH_SIZE)?; + info!("upgraded {total_blocks} blocks"); + + let old_failed_block_store = OldFailedBlockStorage::new(instance.clone()); + let failed_block_store = FailedBlockStorage::new(instance); + let total_failed_blocks = + Self::upgrade_store(old_failed_block_store, failed_block_store, BATCH_SIZE)?; + info!("upgraded {total_failed_blocks} failed_blocks"); + + Ok(()) + } } diff --git a/storage/src/cache_storage/mod.rs b/storage/src/cache_storage/mod.rs index 46001ba401..596fbd181d 100644 --- a/storage/src/cache_storage/mod.rs +++ b/storage/src/cache_storage/mod.rs @@ -1,34 +1,44 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::batch::WriteBatch; -use crate::metrics::{record_metrics, StorageMetrics}; -use crate::storage::{InnerStore, WriteOp}; +use crate::batch::GWriteBatch; +use crate::{ + batch::WriteBatch, + metrics::{record_metrics, StorageMetrics}, + storage::{InnerStore, WriteOp}, +}; use anyhow::{Error, Result}; +use core::hash::Hash; use lru::LruCache; use parking_lot::Mutex; use starcoin_config::DEFAULT_CACHE_SIZE; -pub struct CacheStorage { - cache: Mutex, Vec>>, + +pub type CacheStorage = GCacheStorage, Vec>; + +pub struct GCacheStorage { + cache: Mutex>, metrics: Option, } -impl CacheStorage { +impl GCacheStorage { pub fn new(metrics: Option) -> Self { - CacheStorage { - cache: Mutex::new(LruCache::new(DEFAULT_CACHE_SIZE)), + GCacheStorage { + cache: Mutex::new(LruCache::::new(DEFAULT_CACHE_SIZE)), metrics, } } pub fn new_with_capacity(size: usize, metrics: Option) -> Self { - CacheStorage { - cache: Mutex::new(LruCache::new(size)), + GCacheStorage { + cache: Mutex::new(LruCache::::new(size)), metrics, } } + pub fn remove_all(&self) { + self.cache.lock().clear(); + } } -impl Default for CacheStorage { +impl Default for GCacheStorage { fn default() -> Self { Self::new(None) } @@ -36,53 +46,47 @@ impl Default for CacheStorage { impl InnerStore for CacheStorage { fn get(&self, prefix_name: &str, key: Vec) -> Result>> { - record_metrics("cache", prefix_name, "get", self.metrics.as_ref()).call(|| { - Ok(self - .cache - .lock() - .get(&compose_key(prefix_name.to_string(), key)) - .cloned()) - }) + let composed_key = compose_key(Some(prefix_name), key); + record_metrics("cache", prefix_name, "get", self.metrics.as_ref()) + .call(|| Ok(self.get_inner(&composed_key))) } fn put(&self, prefix_name: &str, key: Vec, value: Vec) -> Result<()> { // remove record_metrics for performance // record_metrics add in write_batch to reduce Instant::now system call - let mut cache = self.cache.lock(); - cache.put(compose_key(prefix_name.to_string(), key), value); + let composed_key = compose_key(Some(prefix_name), key); + let len = self.put_inner(composed_key, value); if let Some(metrics) = self.metrics.as_ref() { - metrics.cache_items.set(cache.len() as u64); + metrics.cache_items.set(len as u64); } Ok(()) } fn contains_key(&self, prefix_name: &str, key: Vec) -> Result { - record_metrics("cache", prefix_name, "contains_key", self.metrics.as_ref()).call(|| { - Ok(self - .cache - .lock() - .contains(&compose_key(prefix_name.to_string(), key))) - }) + let composed_key = compose_key(Some(prefix_name), key); + record_metrics("cache", prefix_name, "contains_key", self.metrics.as_ref()) + .call(|| Ok(self.contains_key_inner(&composed_key))) } fn remove(&self, prefix_name: &str, key: Vec) -> Result<()> { // remove record_metrics for performance // record_metrics add in write_batch to reduce Instant::now system call - let mut cache = self.cache.lock(); - cache.pop(&compose_key(prefix_name.to_string(), key)); + let composed_key = compose_key(Some(prefix_name), key); + let len = self.remove_inner(&composed_key); if let Some(metrics) = self.metrics.as_ref() { - metrics.cache_items.set(cache.len() as u64); + metrics.cache_items.set(len as u64); } Ok(()) } fn write_batch(&self, prefix_name: &str, batch: WriteBatch) -> Result<()> { + let rows = batch + .rows + .into_iter() + .map(|(k, v)| (compose_key(Some(prefix_name), k), v)) + .collect(); + let batch = WriteBatch { rows }; record_metrics("cache", prefix_name, "write_batch", self.metrics.as_ref()).call(|| { - for (key, write_op) in &batch.rows { - match write_op { - WriteOp::Value(value) => self.put(prefix_name, key.to_vec(), value.to_vec())?, - WriteOp::Deletion => self.remove(prefix_name, key.to_vec())?, - }; - } + self.write_batch_inner(batch); Ok(()) }) } @@ -108,22 +112,76 @@ impl InnerStore for CacheStorage { } fn multi_get(&self, prefix_name: &str, keys: Vec>) -> Result>>> { + let composed_keys = keys + .into_iter() + .map(|k| compose_key(Some(prefix_name), k)) + .collect::>(); + Ok(self.multi_get_inner(composed_keys.as_slice())) + } +} + +fn compose_key(prefix_name: Option<&str>, source_key: Vec) -> Vec { + match prefix_name { + Some(prefix_name) => { + let temp_vec = prefix_name.as_bytes().to_vec(); + let mut compose = Vec::with_capacity(temp_vec.len() + source_key.len()); + compose.extend(temp_vec); + compose.extend(source_key); + compose + } + None => source_key, + } +} + +impl GCacheStorage { + pub fn get_inner(&self, key: &K) -> Option { + self.cache.lock().get(key).cloned() + } + + pub fn put_inner(&self, key: K, value: V) -> usize { + let mut cache = self.cache.lock(); + cache.put(key, value); + cache.len() + } + + pub fn contains_key_inner(&self, key: &K) -> bool { + self.cache.lock().contains(key) + } + + pub fn remove_inner(&self, key: &K) -> usize { + let mut cache = self.cache.lock(); + cache.pop(key); + cache.len() + } + + pub fn write_batch_inner(&self, batch: GWriteBatch) { + for (key, write_op) in batch.rows { + match write_op { + WriteOp::Value(value) => { + self.put_inner(key, value); + } + WriteOp::Deletion => { + self.remove_inner(&key); + } + }; + } + } + + pub fn put_sync_inner(&self, key: K, value: V) -> usize { + self.put_inner(key, value) + } + + pub fn write_batch_sync_inner(&self, batch: GWriteBatch) { + self.write_batch_inner(batch) + } + + pub fn multi_get_inner(&self, keys: &[K]) -> Vec> { let mut cache = self.cache.lock(); let mut result = vec![]; - for key in keys.into_iter() { - let item = cache - .get(&compose_key(prefix_name.to_string(), key)) - .cloned(); + for key in keys { + let item = cache.get(key).cloned(); result.push(item); } - Ok(result) + result } } - -fn compose_key(prefix_name: String, source_key: Vec) -> Vec { - let temp_vec = prefix_name.as_bytes().to_vec(); - let mut compose = Vec::with_capacity(temp_vec.len() + source_key.len()); - compose.extend(temp_vec); - compose.extend(source_key); - compose -} diff --git a/storage/src/chain_info/mod.rs b/storage/src/chain_info/mod.rs index 3f193be3f0..0a258d7823 100644 --- a/storage/src/chain_info/mod.rs +++ b/storage/src/chain_info/mod.rs @@ -4,6 +4,7 @@ use crate::storage::{ColumnFamily, InnerStorage, KVStore}; use crate::{StorageVersion, CHAIN_INFO_PREFIX_NAME}; use anyhow::Result; +use bcs_ext::BCSCodec; use starcoin_crypto::HashValue; use starcoin_types::startup_info::{BarnardHardFork, SnapshotRange, StartupInfo}; use std::convert::{TryFrom, TryInto}; diff --git a/storage/src/db_storage/mod.rs b/storage/src/db_storage/mod.rs index 20e6f82dbc..e80a870544 100644 --- a/storage/src/db_storage/mod.rs +++ b/storage/src/db_storage/mod.rs @@ -1,18 +1,20 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::batch::WriteBatch; -use crate::errors::StorageInitError; -use crate::metrics::{record_metrics, StorageMetrics}; -use crate::storage::{ColumnFamilyName, InnerStore, KeyCodec, ValueCodec, WriteOp}; -use crate::{StorageVersion, DEFAULT_PREFIX_NAME}; +use crate::{ + batch::WriteBatch, + errors::StorageInitError, + metrics::{record_metrics, StorageMetrics}, + storage::{ColumnFamilyName, InnerStore, KeyCodec, RawDBStorage, ValueCodec, WriteOp}, + StorageVersion, DEFAULT_PREFIX_NAME, +}; use anyhow::{ensure, format_err, Error, Result}; -use rocksdb::{Options, ReadOptions, WriteBatch as DBWriteBatch, WriteOptions, DB}; +use rocksdb::{ + DBIterator, DBPinnableSlice, IteratorMode, Options, ReadOptions, WriteBatch as DBWriteBatch, + WriteOptions, DB, +}; use starcoin_config::{check_open_fds_limit, RocksdbConfig}; -use std::collections::HashSet; -use std::iter; -use std::marker::PhantomData; -use std::path::Path; +use std::{collections::HashSet, iter, marker::PhantomData, path::Path}; const RES_FDS: u64 = 4096; @@ -213,6 +215,9 @@ impl DBStorage { // write buffer size db_opts.set_max_write_buffer_number(5); db_opts.set_max_background_jobs(5); + if config.parallelism > 1 { + db_opts.increase_parallelism(config.parallelism as i32); + } // cache // let cache = Cache::new_lru_cache(2 * 1024 * 1024 * 1024); // db_opts.set_row_cache(&cache.unwrap()); @@ -235,6 +240,16 @@ impl DBStorage { )) } + pub fn raw_iterator_cf_opt( + &self, + prefix_name: &str, + mode: IteratorMode, + readopts: ReadOptions, + ) -> Result { + let cf_handle = self.get_cf_handle(prefix_name)?; + Ok(self.db.iterator_cf_opt(cf_handle, readopts, mode)) + } + /// Returns a forward [`SchemaIterator`] on a certain schema. pub fn iter(&self, prefix_name: &str) -> Result> where @@ -460,3 +475,22 @@ impl InnerStore for DBStorage { }) } } + +impl RawDBStorage for DBStorage { + fn raw_get_pinned_cf>( + &self, + prefix: &str, + key: K, + ) -> Result> { + let cf = self.get_cf_handle(prefix)?; + let res = self + .db + .get_pinned_cf_opt(cf, key, &ReadOptions::default())?; + Ok(res) + } + + fn raw_write_batch(&self, batch: DBWriteBatch) -> Result<()> { + self.db.write(batch)?; + Ok(()) + } +} diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 0246b6e7f4..6ed9a685fa 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -21,6 +21,7 @@ use starcoin_accumulator::node::AccumulatorStoreType; use starcoin_accumulator::AccumulatorTreeStore; use starcoin_crypto::HashValue; use starcoin_state_store_api::{StateNode, StateNodeStore}; +use starcoin_types::block::BlockNumber; use starcoin_types::contract_event::ContractEvent; use starcoin_types::startup_info::{ChainInfo, ChainStatus, SnapshotRange}; use starcoin_types::transaction::{RichTransactionInfo, Transaction}; @@ -72,12 +73,16 @@ pub const STATE_NODE_PREFIX_NAME: ColumnFamilyName = "state_node"; pub const STATE_NODE_PREFIX_NAME_PREV: ColumnFamilyName = "state_node_prev"; pub const CHAIN_INFO_PREFIX_NAME: ColumnFamilyName = "chain_info"; pub const TRANSACTION_PREFIX_NAME: ColumnFamilyName = "transaction"; +pub const TRANSACTION_PREFIX_NAME_V2: ColumnFamilyName = "transaction_v2"; pub const TRANSACTION_INFO_PREFIX_NAME: ColumnFamilyName = "transaction_info"; pub const TRANSACTION_INFO_PREFIX_NAME_V2: ColumnFamilyName = "transaction_info_v2"; pub const TRANSACTION_INFO_HASH_PREFIX_NAME: ColumnFamilyName = "transaction_info_hash"; pub const CONTRACT_EVENT_PREFIX_NAME: ColumnFamilyName = "contract_event"; pub const FAILED_BLOCK_PREFIX_NAME: ColumnFamilyName = "failed_block"; pub const TABLE_INFO_PREFIX_NAME: ColumnFamilyName = "table_info"; +pub const BLOCK_PREFIX_NAME_V2: ColumnFamilyName = "block_v2"; +pub const BLOCK_HEADER_PREFIX_NAME_V2: ColumnFamilyName = "block_header_v2"; +pub const FAILED_BLOCK_PREFIX_NAME_V2: ColumnFamilyName = "failed_block_v2"; ///db storage use prefix_name vec to init /// Please note that adding a prefix needs to be added in vec simultaneously, remember!! @@ -143,17 +148,44 @@ static VEC_PREFIX_NAME_V3: Lazy> = Lazy::new(|| { TABLE_INFO_PREFIX_NAME, ] }); +static VEC_PREFIX_NAME_V4: Lazy> = Lazy::new(|| { + vec![ + BLOCK_ACCUMULATOR_NODE_PREFIX_NAME, + TRANSACTION_ACCUMULATOR_NODE_PREFIX_NAME, + BLOCK_PREFIX_NAME, + BLOCK_HEADER_PREFIX_NAME, + BLOCK_PREFIX_NAME_V2, + BLOCK_HEADER_PREFIX_NAME_V2, + BLOCK_BODY_PREFIX_NAME, // unused column + BLOCK_INFO_PREFIX_NAME, + BLOCK_TRANSACTIONS_PREFIX_NAME, + BLOCK_TRANSACTION_INFOS_PREFIX_NAME, + STATE_NODE_PREFIX_NAME, + CHAIN_INFO_PREFIX_NAME, + TRANSACTION_PREFIX_NAME, + TRANSACTION_INFO_PREFIX_NAME, // unused column + TRANSACTION_INFO_PREFIX_NAME_V2, + TRANSACTION_INFO_HASH_PREFIX_NAME, + CONTRACT_EVENT_PREFIX_NAME, + FAILED_BLOCK_PREFIX_NAME, + FAILED_BLOCK_PREFIX_NAME_V2, + TRANSACTION_PREFIX_NAME_V2, + TABLE_INFO_PREFIX_NAME, + ] +}); + #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, IntoPrimitive, TryFromPrimitive)] #[repr(u8)] pub enum StorageVersion { V1 = 1, V2 = 2, V3 = 3, + V4 = 4, } impl StorageVersion { pub fn current_version() -> StorageVersion { - StorageVersion::V3 + StorageVersion::V4 } pub fn get_column_family_names(&self) -> &'static [ColumnFamilyName] { @@ -161,6 +193,7 @@ impl StorageVersion { StorageVersion::V1 => &VEC_PREFIX_NAME_V1, StorageVersion::V2 => &VEC_PREFIX_NAME_V2, StorageVersion::V3 => &VEC_PREFIX_NAME_V3, + StorageVersion::V4 => &VEC_PREFIX_NAME_V4, } } } @@ -241,6 +274,7 @@ pub trait BlockTransactionInfoStore { ids: Vec, ) -> Result>>; } + pub trait ContractEventStore { /// Save events by key `txn_info_id`. /// As txn_info has accumulator root of events, so there is a one-to-one mapping. @@ -338,6 +372,7 @@ impl Display for Storage { write!(f, "{}", self.clone()) } } + impl Debug for Storage { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { write!(f, "{}", self) diff --git a/storage/src/storage.rs b/storage/src/storage.rs index cddd7269b1..7cc4fe1abe 100644 --- a/storage/src/storage.rs +++ b/storage/src/storage.rs @@ -2,19 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 pub use crate::batch::WriteBatch; -use crate::cache_storage::CacheStorage; -use crate::db_storage::{DBStorage, SchemaIterator}; -use crate::upgrade::DBUpgrade; +use crate::{ + cache_storage::CacheStorage, + db_storage::{DBStorage, SchemaIterator}, + upgrade::DBUpgrade, +}; use anyhow::{bail, format_err, Result}; use byteorder::{BigEndian, ReadBytesExt}; +use rocksdb::{DBPinnableSlice, WriteBatch as DBWriteBatch}; use starcoin_config::NodeConfig; use starcoin_crypto::HashValue; use starcoin_logger::prelude::info; use starcoin_vm_types::state_store::table::TableHandle; -use std::convert::TryInto; -use std::fmt::Debug; -use std::marker::PhantomData; -use std::sync::Arc; +use std::{convert::TryInto, fmt::Debug, marker::PhantomData, sync::Arc}; /// Type alias to improve readability. pub type ColumnFamilyName = &'static str; @@ -46,6 +46,16 @@ pub trait InnerStore: Send + Sync { fn multi_get(&self, prefix_name: &str, keys: Vec>) -> Result>>>; } +pub trait RawDBStorage: Send + Sync { + fn raw_get_pinned_cf>( + &self, + prefix: &str, + key: K, + ) -> Result>; + + fn raw_write_batch(&self, batch: DBWriteBatch) -> Result<()>; +} + ///Storage instance type define #[derive(Clone)] #[allow(clippy::upper_case_acronyms)] diff --git a/storage/src/tests/test_block.rs b/storage/src/tests/test_block.rs index 4e663c57b7..0024af03de 100644 --- a/storage/src/tests/test_block.rs +++ b/storage/src/tests/test_block.rs @@ -43,6 +43,7 @@ fn test_block() { ChainId::test(), 0, BlockHeaderExtra::new([0u8; 4]), + None, ); storage .block_storage @@ -102,6 +103,7 @@ fn test_block_number() { ChainId::test(), 0, BlockHeaderExtra::new([0u8; 4]), + None, ); storage .block_storage @@ -149,6 +151,7 @@ fn test_old_failed_block_decode() { ChainId::test(), 0, BlockHeaderExtra::new([0u8; 4]), + None, ); let block_body = BlockBody::new(vec![SignedUserTransaction::mock()], None); @@ -185,6 +188,7 @@ fn test_save_failed_block() { ChainId::test(), 0, BlockHeaderExtra::new([0u8; 4]), + None, ); let block_body = BlockBody::new(vec![SignedUserTransaction::mock()], None); diff --git a/storage/src/tests/test_storage.rs b/storage/src/tests/test_storage.rs index be7a2eaa44..3343aee407 100644 --- a/storage/src/tests/test_storage.rs +++ b/storage/src/tests/test_storage.rs @@ -3,10 +3,15 @@ extern crate chrono; +use crate::block::{ + FailedBlock, OldBlockHeaderStorage, OldBlockInnerStorage, OldFailedBlockStorage, + OldFailedBlockV2, +}; use crate::cache_storage::CacheStorage; use crate::db_storage::DBStorage; use crate::storage::{CodecKVStore, InnerStore, StorageInstance, ValueCodec}; use crate::table_info::TableInfoStore; +use crate::transaction::LegacyTransactionStorage; use crate::transaction_info::{BlockTransactionInfo, OldTransactionInfoStorage}; use crate::{ BlockInfoStore, BlockStore, BlockTransactionInfoStore, Storage, @@ -18,15 +23,18 @@ use anyhow::Result; use starcoin_accumulator::accumulator_info::AccumulatorInfo; use starcoin_config::RocksdbConfig; use starcoin_crypto::HashValue; -use starcoin_types::{ - account_address::AccountAddress, - block::{Block, BlockBody, BlockHeader, BlockInfo}, - language_storage::TypeTag, - startup_info::SnapshotRange, - transaction::{RichTransactionInfo, SignedUserTransaction, Transaction, TransactionInfo}, - vm_error::KeptVMStatus, +use starcoin_logger::prelude::info; +use starcoin_types::block::{Block, BlockBody, BlockHeader, BlockInfo}; +use starcoin_types::startup_info::SnapshotRange; +use starcoin_types::transaction::{ + RichTransactionInfo, SignedUserTransaction, Transaction, TransactionInfo, }; +use starcoin_types::vm_error::KeptVMStatus; +use starcoin_vm_types::account_address::AccountAddress; +use starcoin_vm_types::block_metadata::LegacyBlockMetadata; +use starcoin_vm_types::language_storage::TypeTag; use starcoin_vm_types::state_store::table::{TableHandle, TableInfo}; +use starcoin_vm_types::transaction::LegacyTransaction; use std::path::Path; #[test] @@ -281,13 +289,53 @@ fn test_missing_key_handle() -> Result<()> { Ok(()) } -fn generate_old_db(path: &Path) -> Result> { +fn generate_old_block_data(instance: StorageInstance) -> Result<(Vec, Vec)> { + const BLOCK_COUNT: u64 = 1001; + let old_block_header_storage = OldBlockHeaderStorage::new(instance.clone()); + let old_block_storage = OldBlockInnerStorage::new(instance.clone()); + let old_failed_block_storage = OldFailedBlockStorage::new(instance); + + let failed_block_ids = (0..BLOCK_COUNT) + .map(|_| { + let failed_block = FailedBlock::random(); + let failed_block_id = { + let (block, _, _, _) = failed_block.clone().into(); + block.id() + }; + let old_failed_block: OldFailedBlockV2 = failed_block.into(); + old_failed_block_storage + .put(failed_block_id, old_failed_block) + .unwrap(); + failed_block_id + }) + .collect::>(); + + let block_ids = (0..BLOCK_COUNT) + .map(|_| { + let block = Block::random(); + let block_id = block.id(); + let old_block = block.clone().into(); + let old_block_header = block.header.into(); + + old_block_storage.put(block_id, old_block).unwrap(); + old_block_header_storage + .put(block_id, old_block_header) + .unwrap(); + block_id + }) + .collect::>(); + + Ok((block_ids, failed_block_ids)) +} + +fn generate_old_db(path: &Path) -> Result<(Vec, Vec, Vec)> { let instance = StorageInstance::new_cache_and_db_instance( CacheStorage::new(None), DBStorage::new(path, RocksdbConfig::default(), None)?, ); let storage = Storage::new(instance.clone())?; - let old_transaction_info_storage = OldTransactionInfoStorage::new(instance); + let old_transaction_info_storage = OldTransactionInfoStorage::new(instance.clone()); + let old_transaction_storage = LegacyTransactionStorage::new(instance.clone()); let block_header = BlockHeader::random(); let txn = SignedUserTransaction::mock(); @@ -296,7 +344,8 @@ fn generate_old_db(path: &Path) -> Result> { BlockBody::new(vec![txn.clone()], None), ); let mut txn_inf_ids = vec![]; - let block_metadata = block.to_metadata(0); + let mut txn_ids = vec![]; + let block_metadata: LegacyBlockMetadata = block.to_metadata(0).try_into().unwrap(); let txn_info_0 = TransactionInfo::new( block_metadata.id(), HashValue::random(), @@ -304,9 +353,9 @@ fn generate_old_db(path: &Path) -> Result> { 0, KeptVMStatus::Executed, ); - storage - .transaction_storage - .save_transaction(Transaction::BlockMetadata(block_metadata))?; + let txn_0 = LegacyTransaction::BlockMetadata(block_metadata); + txn_ids.push(txn_0.id()); + old_transaction_storage.save_transaction(txn_0)?; txn_inf_ids.push(txn_info_0.id()); let txn_info_1 = TransactionInfo::new( txn.id(), @@ -315,6 +364,9 @@ fn generate_old_db(path: &Path) -> Result> { 100, KeptVMStatus::Executed, ); + let txn_1 = LegacyTransaction::UserTransaction(txn); + txn_ids.push(txn_1.id()); + old_transaction_storage.save_transaction(txn_1)?; txn_inf_ids.push(txn_info_1.id()); let block_info = BlockInfo::new( block_header.id(), @@ -322,9 +374,6 @@ fn generate_old_db(path: &Path) -> Result> { AccumulatorInfo::new(HashValue::random(), vec![], 2, 3), AccumulatorInfo::new(HashValue::random(), vec![], 1, 1), ); - storage - .transaction_storage - .save_transaction(Transaction::UserTransaction(txn))?; storage.commit_block(block)?; storage.save_block_info(block_info)?; @@ -343,13 +392,16 @@ fn generate_old_db(path: &Path) -> Result> { }, )?; - Ok(txn_inf_ids) + let (block_ids, failed_block_ids) = generate_old_block_data(instance)?; + + Ok((txn_inf_ids, block_ids, failed_block_ids)) } #[stest::test] pub fn test_db_upgrade() -> Result<()> { let tmpdir = starcoin_config::temp_dir(); - let txn_info_ids = generate_old_db(tmpdir.path())?; + let (txn_info_ids, block_ids, failed_block_ids) = generate_old_db(tmpdir.path())?; + info!("Upgrade blocks:{},{:?}", block_ids.len(), block_ids); let mut instance = StorageInstance::new_cache_and_db_instance( CacheStorage::new(None), DBStorage::new(tmpdir.path(), RocksdbConfig::default(), None)?, @@ -357,6 +409,9 @@ pub fn test_db_upgrade() -> Result<()> { instance.check_upgrade()?; let storage = Storage::new(instance.clone())?; + let old_block_header_storage = OldBlockHeaderStorage::new(instance.clone()); + let old_block_storage = OldBlockInnerStorage::new(instance.clone()); + let old_failed_block_storage = OldFailedBlockStorage::new(instance.clone()); let old_transaction_info_storage = OldTransactionInfoStorage::new(instance); for txn_info_id in txn_info_ids { @@ -369,6 +424,38 @@ pub fn test_db_upgrade() -> Result<()> { "expect RichTransactionInfo is some" ); } + + for block_id in block_ids { + assert!( + old_block_header_storage.get(block_id)?.is_none(), + "expect OldBlockHeader is none" + ); + assert!( + storage.get_block_header_by_hash(block_id)?.is_some(), + "expect BlockHeader is some" + ); + + assert!( + old_block_storage.get(block_id)?.is_none(), + "expect OldBlock is none" + ); + assert!( + storage.get_block_by_hash(block_id)?.is_some(), + "expect Block is some" + ); + } + + for failed_block_id in failed_block_ids { + assert!( + old_failed_block_storage.get(failed_block_id)?.is_none(), + "expect OldBlock is none" + ); + assert!( + storage.get_failed_block_by_id(failed_block_id)?.is_some(), + "expect Block is some" + ); + } + Ok(()) } diff --git a/storage/src/transaction/legacy.rs b/storage/src/transaction/legacy.rs new file mode 100644 index 0000000000..7066d13d38 --- /dev/null +++ b/storage/src/transaction/legacy.rs @@ -0,0 +1,35 @@ +use crate::storage::{CodecKVStore, ValueCodec}; +use crate::{define_storage, TRANSACTION_PREFIX_NAME}; +use bcs_ext::BCSCodec; +use starcoin_crypto::HashValue; +use starcoin_vm_types::transaction::LegacyTransaction; + +define_storage!( + LegacyTransactionStorage, + HashValue, + LegacyTransaction, + TRANSACTION_PREFIX_NAME +); + +impl ValueCodec for LegacyTransaction { + fn encode_value(&self) -> anyhow::Result> { + self.encode() + } + + fn decode_value(data: &[u8]) -> anyhow::Result { + Self::decode(data) + } +} + +impl LegacyTransactionStorage { + pub fn get_transaction( + &self, + txn_hash: HashValue, + ) -> anyhow::Result> { + self.get(txn_hash) + } + + pub fn save_transaction(&self, txn_info: LegacyTransaction) -> anyhow::Result<()> { + self.put(txn_info.id(), txn_info) + } +} diff --git a/storage/src/transaction/mod.rs b/storage/src/transaction/mod.rs index ffbb7f2302..af33e0e934 100644 --- a/storage/src/transaction/mod.rs +++ b/storage/src/transaction/mod.rs @@ -2,10 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 use crate::storage::{CodecKVStore, CodecWriteBatch, ValueCodec}; -use crate::TRANSACTION_PREFIX_NAME; +use crate::{TRANSACTION_PREFIX_NAME, TRANSACTION_PREFIX_NAME_V2}; use crate::{define_storage, TransactionStore}; use anyhow::Result; use bcs_ext::BCSCodec; +pub use legacy::LegacyTransactionStorage; use starcoin_crypto::HashValue; use starcoin_types::transaction::Transaction; @@ -13,7 +14,7 @@ define_storage!( TransactionStorage, HashValue, Transaction, - TRANSACTION_PREFIX_NAME + TRANSACTION_PREFIX_NAME_V2 ); impl ValueCodec for Transaction { @@ -46,5 +47,6 @@ impl TransactionStore for TransactionStorage { } } +mod legacy; #[cfg(test)] mod test; diff --git a/storage/src/upgrade.rs b/storage/src/upgrade.rs index b8fcd18b43..9e3e9bf24d 100644 --- a/storage/src/upgrade.rs +++ b/storage/src/upgrade.rs @@ -1,15 +1,19 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::block::BlockStorage; +use crate::block::{ + BlockHeaderStorage, BlockInnerStorage, BlockStorage, FailedBlockStorage, OldBlockHeaderStorage, + OldBlockInnerStorage, OldFailedBlockStorage, +}; use crate::block_info::BlockInfoStorage; use crate::chain_info::ChainInfoStorage; -use crate::transaction::TransactionStorage; +use crate::storage::{CodecWriteBatch, ColumnFamily, KeyCodec, SchemaStorage, ValueCodec}; +use crate::transaction::{LegacyTransactionStorage, TransactionStorage}; use crate::transaction_info::OldTransactionInfoStorage; use crate::transaction_info::TransactionInfoStorage; use crate::{ - CodecKVStore, RichTransactionInfo, StorageInstance, StorageVersion, TransactionStore, - BLOCK_BODY_PREFIX_NAME, TRANSACTION_INFO_PREFIX_NAME, + CodecKVStore, RichTransactionInfo, StorageInstance, StorageVersion, BLOCK_BODY_PREFIX_NAME, + TRANSACTION_INFO_PREFIX_NAME, }; use anyhow::{bail, ensure, format_err, Result}; use once_cell::sync::Lazy; @@ -17,7 +21,7 @@ use starcoin_crypto::HashValue; use starcoin_logger::prelude::{debug, info, warn}; use starcoin_types::block::BlockNumber; use starcoin_types::startup_info::{BarnardHardFork, StartupInfo}; -use starcoin_types::transaction::Transaction; +use starcoin_vm_types::transaction::LegacyTransaction; use std::cmp::Ordering; pub struct DBUpgrade; @@ -63,7 +67,8 @@ impl DBUpgrade { let block_storage = BlockStorage::new(instance.clone()); let block_info_storage = BlockInfoStorage::new(instance.clone()); let transaction_info_storage = TransactionInfoStorage::new(instance.clone()); - let transaction_storage = TransactionStorage::new(instance.clone()); + // Use old store here, TransactionStorage is using different column family now + let transaction_storage = LegacyTransactionStorage::new(instance.clone()); let mut iter = old_transaction_info_storage.iter()?; iter.seek_to_first(); let mut processed_count = 0; @@ -114,12 +119,12 @@ impl DBUpgrade { })?; if transaction_index == 0 { ensure!( - matches!(transaction, Transaction::BlockMetadata(_)), + matches!(transaction, LegacyTransaction::BlockMetadata(_)), "transaction_index 0 must been BlockMetadata transaction, but got txn: {:?}, block:{:?}", transaction, block ); } else { ensure!( - matches!(transaction, Transaction::UserTransaction(_)), + matches!(transaction, LegacyTransaction::UserTransaction(_)), "transaction_index > 0 must been UserTransaction transaction, but got txn: {:?}, block:{:?}", transaction, block ); } @@ -163,6 +168,13 @@ impl DBUpgrade { Ok(()) } + fn db_upgrade_v3_v4(instance: &mut StorageInstance) -> Result<()> { + upgrade_block_header(instance.clone())?; + upgrade_transaction(instance.clone())?; + + Ok(()) + } + pub fn do_upgrade( version_in_db: StorageVersion, version_in_code: StorageVersion, @@ -185,6 +197,18 @@ impl DBUpgrade { (StorageVersion::V2, StorageVersion::V3) => { Self::db_upgrade_v2_v3(instance)?; } + (StorageVersion::V1, StorageVersion::V4) => { + Self::db_upgrade_v1_v2(instance)?; + Self::db_upgrade_v2_v3(instance)?; + Self::db_upgrade_v3_v4(instance)?; + } + (StorageVersion::V2, StorageVersion::V4) => { + Self::db_upgrade_v2_v3(instance)?; + Self::db_upgrade_v3_v4(instance)?; + } + (StorageVersion::V3, StorageVersion::V4) => { + Self::db_upgrade_v3_v4(instance)?; + } _ => bail!( "Can not upgrade db from {:?} to {:?}", version_in_db, @@ -234,3 +258,94 @@ impl DBUpgrade { Ok(()) } } + +fn upgrade_store(old_store: T1, store: T2, batch_size: usize) -> Result +where + K: KeyCodec + Copy, + V1: ValueCodec + Into, + V2: ValueCodec, + T1: SchemaStorage + ColumnFamily, + T2: SchemaStorage + ColumnFamily, +{ + let mut total_size: usize = 0; + let mut old_iter = old_store.iter()?; + old_iter.seek_to_first(); + + let mut to_delete = Some(CodecWriteBatch::new()); + let mut to_put = Some(CodecWriteBatch::new()); + let mut item_count = 0; + + for item in old_iter { + let (id, old_block) = item?; + let block: V2 = old_block.into(); + to_delete + .as_mut() + .unwrap() + .delete(id) + .expect("should never fail"); + to_put + .as_mut() + .unwrap() + .put(id, block) + .expect("should never fail"); + + item_count += 1; + if item_count == batch_size { + total_size = total_size.saturating_add(item_count); + item_count = 0; + old_store + .write_batch(to_delete.take().unwrap()) + .expect("should never fail"); + store + .write_batch(to_put.take().unwrap()) + .expect("should never fail"); + + to_delete = Some(CodecWriteBatch::new()); + to_put = Some(CodecWriteBatch::new()); + } + } + if item_count != 0 { + total_size = total_size.saturating_add(item_count); + old_store + .write_batch(to_delete.take().unwrap()) + .expect("should never fail"); + store + .write_batch(to_put.take().unwrap()) + .expect("should never fail"); + } + + Ok(total_size) +} + +fn upgrade_block_header(instance: StorageInstance) -> Result<()> { + const BATCH_SIZE: usize = 1000usize; + + let old_header_store = OldBlockHeaderStorage::new(instance.clone()); + let header_store = BlockHeaderStorage::new(instance.clone()); + let total_size = upgrade_store(old_header_store, header_store, BATCH_SIZE)?; + info!("upgraded {total_size} block headers"); + + let old_block_store = OldBlockInnerStorage::new(instance.clone()); + let block_store = BlockInnerStorage::new(instance.clone()); + let total_blocks = upgrade_store(old_block_store, block_store, BATCH_SIZE)?; + info!("upgraded {total_blocks} blocks"); + + let old_failed_block_store = OldFailedBlockStorage::new(instance.clone()); + let failed_block_store = FailedBlockStorage::new(instance); + let total_failed_blocks = + upgrade_store(old_failed_block_store, failed_block_store, BATCH_SIZE)?; + info!("upgraded {total_failed_blocks} failed_blocks"); + + Ok(()) +} + +fn upgrade_transaction(instance: StorageInstance) -> Result<()> { + const BATCH_SIZE: usize = 1000usize; + + let old_txn_store = LegacyTransactionStorage::new(instance.clone()); + let txn_store = TransactionStorage::new(instance); + let total_size = upgrade_store(old_txn_store, txn_store, BATCH_SIZE)?; + info!("upgraded {total_size} Transactions"); + + Ok(()) +} diff --git a/sync/Cargo.toml b/sync/Cargo.toml index e0e763bf53..e4236c1089 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -42,6 +42,11 @@ stest = { workspace = true } stream-task = { workspace = true } sysinfo = { workspace = true } thiserror = { workspace = true } +starcoin-consensus = { workspace = true } +timeout-join-handler = { workspace = true } +starcoin-flexidag = { workspace = true } +starcoin-dag = { workspace = true } +starcoin-chain-mock = { workspace = true } [dev-dependencies] hex = { workspace = true } @@ -57,6 +62,7 @@ starcoin-txpool-mock-service = { workspace = true } starcoin-executor = { workspace = true } test-helper = { workspace = true } tokio = { features = ["full"], workspace = true } +starcoin-genesis = { workspace = true } [package] authors = { workspace = true } diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index d35d9e4757..481c555ddf 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -1,13 +1,23 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 +#[cfg(test)] +use super::CheckBlockConnectorHashValue; +#[cfg(test)] +use super::CreateBlockRequest; +#[cfg(test)] +use super::CreateBlockResponse; use crate::block_connector::{ExecuteRequest, ResetRequest, WriteBlockChainService}; use crate::sync::{CheckSyncEvent, SyncService}; -use crate::tasks::{BlockConnectedEvent, BlockDiskCheckEvent}; -use anyhow::{format_err, Result}; +use crate::tasks::{BlockConnectedEvent, BlockConnectedFinishEvent, BlockDiskCheckEvent}; +#[cfg(test)] +use anyhow::bail; +use anyhow::{format_err, Ok, Result}; use network_api::PeerProvider; -use starcoin_chain_api::{ConnectBlockError, WriteableChainService}; +use starcoin_chain_api::{ChainReader, ConnectBlockError, WriteableChainService}; use starcoin_config::{NodeConfig, G_CRATE_VERSION}; +use starcoin_crypto::HashValue; +use starcoin_dag::blockdag::BlockDAG; use starcoin_executor::VMMetrics; use starcoin_logger::prelude::*; use starcoin_network::NetworkServiceRef; @@ -17,6 +27,9 @@ use starcoin_service_registry::{ use starcoin_storage::{BlockStore, Storage}; use starcoin_sync_api::PeerNewBlock; use starcoin_txpool::TxPoolService; +use starcoin_txpool_api::TxPoolSyncService; +#[cfg(test)] +use starcoin_txpool_mock_service::MockTxPoolService; use starcoin_types::block::ExecutedBlock; use starcoin_types::sync_status::SyncStatus; use starcoin_types::system_events::{MinedBlock, SyncStatusChangeEvent, SystemShutdown}; @@ -26,15 +39,21 @@ use sysinfo::{DiskExt, System, SystemExt}; const DISK_CHECKPOINT_FOR_PANIC: u64 = 1024 * 1024 * 1024 * 3; const DISK_CHECKPOINT_FOR_WARN: u64 = 1024 * 1024 * 1024 * 5; -pub struct BlockConnectorService { - chain_service: WriteBlockChainService, +pub struct BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ + chain_service: WriteBlockChainService, sync_status: Option, config: Arc, } -impl BlockConnectorService { +impl BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ pub fn new( - chain_service: WriteBlockChainService, + chain_service: WriteBlockChainService, config: Arc, ) -> Self { Self { @@ -51,6 +70,10 @@ impl BlockConnectorService { } } + pub fn chain_head_id(&self) -> HashValue { + self.chain_service.get_main().status().head.id() + } + pub fn check_disk_space(&mut self) -> Option> { if System::IS_SUPPORTED { let mut sys = System::new_all(); @@ -97,16 +120,23 @@ impl BlockConnectorService { } } -impl ServiceFactory for BlockConnectorService { - fn create(ctx: &mut ServiceContext) -> Result { +impl ServiceFactory + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ + fn create( + ctx: &mut ServiceContext>, + ) -> Result> { let config = ctx.get_shared::>()?; let bus = ctx.bus_ref().clone(); - let txpool = ctx.get_shared::()?; + let txpool = ctx.get_shared::()?; let storage = ctx.get_shared::>()?; let startup_info = storage .get_startup_info()? .ok_or_else(|| format_err!("Startup info should exist."))?; let vm_metrics = ctx.get_shared_opt::()?; + let dag = ctx.get_shared::()?; let chain_service = WriteBlockChainService::new( config.clone(), startup_info, @@ -114,13 +144,17 @@ impl ServiceFactory for BlockConnectorService { txpool, bus, vm_metrics, + dag, )?; Ok(Self::new(chain_service, config)) } } -impl ActorService for BlockConnectorService { +impl ActorService for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn started(&mut self, ctx: &mut ServiceContext) -> Result<()> { //TODO figure out a more suitable value. ctx.set_mailbox_capacity(1024); @@ -141,15 +175,19 @@ impl ActorService for BlockConnectorService { } } -impl EventHandler for BlockConnectorService { +impl EventHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle_event( &mut self, _: BlockDiskCheckEvent, - ctx: &mut ServiceContext, + ctx: &mut ServiceContext>, ) { if let Some(res) = self.check_disk_space() { match res { - Ok(available_space) => { + std::result::Result::Ok(available_space) => { warn!("Available diskspace only {}/GB left ", available_space) } Err(e) => { @@ -161,30 +199,75 @@ impl EventHandler for BlockConnectorService { } } -impl EventHandler for BlockConnectorService { +impl EventHandler for BlockConnectorService { fn handle_event( &mut self, msg: BlockConnectedEvent, - _ctx: &mut ServiceContext, + ctx: &mut ServiceContext>, ) { //because this block has execute at sync task, so just try connect to select head chain. //TODO refactor connect and execute + let block = msg.block; + let feedback = msg.feedback; + + match msg.action { + crate::tasks::BlockConnectAction::ConnectNewBlock => { + if let Err(e) = self.chain_service.try_connect(block) { + error!("Process connected new block from sync error: {:?}", e); + } + } + crate::tasks::BlockConnectAction::ConnectExecutedBlock => { + if let Err(e) = self.chain_service.switch_new_main(block.header().id(), ctx) { + error!("Process connected executed block from sync error: {:?}", e); + } + } + } + feedback.map(|f| f.unbounded_send(BlockConnectedFinishEvent)); + } +} + +#[cfg(test)] +impl EventHandler for BlockConnectorService { + fn handle_event( + &mut self, + msg: BlockConnectedEvent, + ctx: &mut ServiceContext>, + ) { + //because this block has execute at sync task, so just try connect to select head chain. + //TODO refactor connect and execute let block = msg.block; - if let Err(e) = self.chain_service.try_connect(block) { - error!("Process connected block error: {:?}", e); + let feedback = msg.feedback; + + match msg.action { + crate::tasks::BlockConnectAction::ConnectNewBlock => { + if let Err(e) = self.chain_service.apply_failed(block) { + error!("Process connected new block from sync error: {:?}", e); + } + } + crate::tasks::BlockConnectAction::ConnectExecutedBlock => { + if let Err(e) = self.chain_service.switch_new_main(block.header().id(), ctx) { + error!("Process connected executed block from sync error: {:?}", e); + } + } } + + feedback.map(|f| f.unbounded_send(BlockConnectedFinishEvent)); } } -impl EventHandler for BlockConnectorService { +impl EventHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle_event(&mut self, msg: MinedBlock, _ctx: &mut ServiceContext) { let MinedBlock(new_block) = msg; let id = new_block.header().id(); debug!("try connect mined block: {}", id); match self.chain_service.try_connect(new_block.as_ref().clone()) { - Ok(_) => debug!("Process mined block {} success.", id), + std::result::Result::Ok(()) => debug!("Process mined block {} success.", id), Err(e) => { warn!("Process mined block {} fail, error: {:?}", id, e); } @@ -192,13 +275,21 @@ impl EventHandler for BlockConnectorService { } } -impl EventHandler for BlockConnectorService { +impl EventHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle_event(&mut self, msg: SyncStatusChangeEvent, _ctx: &mut ServiceContext) { self.sync_status = Some(msg.0); } } -impl EventHandler for BlockConnectorService { +impl EventHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle_event(&mut self, msg: PeerNewBlock, ctx: &mut ServiceContext) { if !self.is_synced() { debug!("[connector] Ignore PeerNewBlock event because the node has not been synchronized yet."); @@ -207,11 +298,13 @@ impl EventHandler for BlockConnectorService { let peer_id = msg.get_peer_id(); if let Err(e) = self.chain_service.try_connect(msg.get_block().clone()) { match e.downcast::() { - Ok(connect_error) => { + std::result::Result::Ok(connect_error) => { match connect_error { ConnectBlockError::FutureBlock(block) => { //TODO cache future block - if let Ok(sync_service) = ctx.service_ref::() { + if let std::result::Result::Ok(sync_service) = + ctx.service_ref::() + { info!( "BlockConnector try connect future block ({:?},{}), peer_id:{:?}, notify Sync service check sync.", block.id(), @@ -257,22 +350,77 @@ impl EventHandler for BlockConnectorService { } } -impl ServiceHandler for BlockConnectorService { +impl ServiceHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle( &mut self, msg: ResetRequest, - _ctx: &mut ServiceContext, + _ctx: &mut ServiceContext>, ) -> Result<()> { self.chain_service.reset(msg.block_hash) } } -impl ServiceHandler for BlockConnectorService { +impl ServiceHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle( &mut self, msg: ExecuteRequest, - _ctx: &mut ServiceContext, + _ctx: &mut ServiceContext>, ) -> Result { self.chain_service.execute(msg.block) } } + +#[cfg(test)] +impl ServiceHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ + fn handle( + &mut self, + msg: CreateBlockRequest, + _ctx: &mut ServiceContext, + ) -> ::Response { + for _i in 0..msg.count { + let block = self.chain_service.create_block( + msg.author, + msg.parent_hash, + msg.user_txns.clone(), + msg.uncles.clone(), + msg.block_gas_limit, + msg.tips.clone(), + )?; + self.chain_service.try_connect(block)?; + } + Ok(CreateBlockResponse) + } +} + +#[cfg(test)] +impl ServiceHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ + fn handle( + &mut self, + msg: CheckBlockConnectorHashValue, + _ctx: &mut ServiceContext>, + ) -> Result<()> { + if self.chain_service.get_main().status().head().id() == msg.head_hash { + info!("the branch in chain service is the same as target's branch"); + Ok(()) + } else { + info!("mock branch in chain service is not the same as target's branch"); + bail!("blockchain in chain service is not the same as target!"); + } + } +} diff --git a/sync/src/block_connector/mod.rs b/sync/src/block_connector/mod.rs index 05b7cfd2b2..6f726c3e85 100644 --- a/sync/src/block_connector/mod.rs +++ b/sync/src/block_connector/mod.rs @@ -11,9 +11,17 @@ mod metrics; mod test_illegal_block; #[cfg(test)] mod test_write_block_chain; +#[cfg(test)] +mod test_write_dag_block_chain; mod write_block_chain; pub use block_connector_service::BlockConnectorService; +#[cfg(test)] +use starcoin_types::block::BlockHeader; +#[cfg(test)] +use starcoin_types::transaction::SignedUserTransaction; +#[cfg(test)] +use starcoin_vm_types::account_address::AccountAddress; pub use write_block_chain::WriteBlockChainService; #[cfg(test)] @@ -40,3 +48,36 @@ pub struct ExecuteRequest { impl ServiceRequest for ExecuteRequest { type Response = anyhow::Result; } + +#[cfg(test)] +#[derive(Clone, Debug)] +pub struct CreateBlockRequest { + pub count: u64, + pub author: AccountAddress, + pub parent_hash: Option, + pub user_txns: Vec, + pub uncles: Vec, + pub block_gas_limit: Option, + pub tips: Option>, +} + +#[cfg(test)] +#[derive(Clone, Debug)] +pub struct CreateBlockResponse; + +#[cfg(test)] +impl ServiceRequest for CreateBlockRequest { + type Response = anyhow::Result; +} + +#[cfg(test)] +#[derive(Debug, Clone)] +pub struct CheckBlockConnectorHashValue { + pub head_hash: HashValue, + pub number: u64, +} + +#[cfg(test)] +impl ServiceRequest for CheckBlockConnectorHashValue { + type Response = anyhow::Result<()>; +} diff --git a/sync/src/block_connector/test_illegal_block.rs b/sync/src/block_connector/test_illegal_block.rs index ec2b662895..9956f6dace 100644 --- a/sync/src/block_connector/test_illegal_block.rs +++ b/sync/src/block_connector/test_illegal_block.rs @@ -50,7 +50,8 @@ async fn new_block_and_main() -> (Block, BlockChain) { .get_main() .current_header() .id(); - let main = BlockChain::new(net.time_service(), head_id, storage, None).unwrap(); + let dag = writeable_block_chain_service.get_main().dag(); + let main = BlockChain::new(net.time_service(), head_id, storage, None, dag).unwrap(); let new_block = new_block( None, &mut writeable_block_chain_service, @@ -86,10 +87,18 @@ async fn uncle_block_and_writeable_block_chain( .unwrap() .unwrap() .id(); - - let new_branch = BlockChain::new(net.time_service(), tmp_head, storage.clone(), None).unwrap(); + let dag = writeable_block_chain_service.get_main().dag(); + let new_branch = + BlockChain::new(net.time_service(), tmp_head, storage.clone(), None, dag).unwrap(); let (block_template, _) = new_branch - .create_block_template(*miner_account.address(), None, Vec::new(), vec![], None) + .create_block_template( + *miner_account.address(), + None, + Vec::new(), + vec![], + None, + None, + ) .unwrap(); let new_block = writeable_block_chain_service .get_main() @@ -114,7 +123,14 @@ fn apply_with_illegal_uncle( let miner_account = AccountInfo::random(); let (block_template, _) = writeable_block_chain_service .get_main() - .create_block_template(*miner_account.address(), None, Vec::new(), uncles, None)?; + .create_block_template( + *miner_account.address(), + None, + Vec::new(), + uncles, + None, + None, + )?; let consensus_strategy = writeable_block_chain_service.get_main().consensus(); let new_block = consensus_strategy.create_block(block_template, net.time_service().as_ref())?; @@ -122,7 +138,8 @@ fn apply_with_illegal_uncle( .get_main() .current_header() .id(); - let mut main = BlockChain::new(net.time_service(), head_id, storage, None)?; + let dag = writeable_block_chain_service.get_main().dag(); + let mut main = BlockChain::new(net.time_service(), head_id, storage, None, dag)?; main.apply(new_block.clone())?; Ok(new_block) } @@ -135,7 +152,14 @@ fn apply_legal_block( let miner_account = AccountInfo::random(); let (block_template, _) = writeable_block_chain_service .get_main() - .create_block_template(*miner_account.address(), None, Vec::new(), uncles, None) + .create_block_template( + *miner_account.address(), + None, + Vec::new(), + uncles, + None, + None, + ) .unwrap(); let new_block = consensus_strategy .create_block( @@ -247,7 +271,6 @@ async fn test_verify_timestamp_failed() { error!("apply failed : {:?}", apply_err); } } - async fn test_verify_future_timestamp(succ: bool) -> Result<()> { let (mut new_block, mut main) = new_block_and_main().await; if !succ { @@ -360,12 +383,20 @@ async fn test_verify_can_not_be_uncle_check_ancestor_failed() { .unwrap() .unwrap() .id(); + let dag = writeable_block_chain_service.get_main().dag(); let mut new_branch = - BlockChain::new(net.time_service(), tmp_head, storage.clone(), None).unwrap(); + BlockChain::new(net.time_service(), tmp_head, storage.clone(), None, dag).unwrap(); for _i in 0..2 { let (block_template, _) = new_branch - .create_block_template(*miner_account.address(), None, Vec::new(), vec![], None) + .create_block_template( + *miner_account.address(), + None, + Vec::new(), + vec![], + None, + None, + ) .unwrap(); let new_block = new_branch .consensus() @@ -432,7 +463,8 @@ async fn test_verify_illegal_uncle_consensus(succ: bool) -> Result<()> { genesis_config.consensus_config.strategy = ConsensusStrategy::CryptoNight.value(); let net = ChainNetwork::new_custom("block_test".to_string(), ChainId::new(100), genesis_config)?; - let mut mock_chain = MockChain::new(net.clone()).unwrap(); + let mut mock_chain = + MockChain::new(net.clone()).unwrap(); let mut times = 3; mock_chain.produce_and_apply_times(times).unwrap(); @@ -445,7 +477,7 @@ async fn test_verify_illegal_uncle_consensus(succ: bool) -> Result<()> { let fork_block_chain = mock_chain.fork_new_branch(Some(fork_id)).unwrap(); let miner = mock_chain.miner(); let (block_template, _) = fork_block_chain - .create_block_template(*miner.address(), None, Vec::new(), Vec::new(), None) + .create_block_template(*miner.address(), None, Vec::new(), Vec::new(), None, None) .unwrap(); let uncle_block = fork_block_chain .consensus() @@ -461,7 +493,7 @@ async fn test_verify_illegal_uncle_consensus(succ: bool) -> Result<()> { let uncles = vec![uncle_block_header]; let mut main_block_chain = mock_chain.fork_new_branch(None).unwrap(); let (block_template, _) = main_block_chain - .create_block_template(*miner.address(), None, Vec::new(), uncles, None) + .create_block_template(*miner.address(), None, Vec::new(), uncles, None, None) .unwrap(); let new_block = main_block_chain .consensus() @@ -760,6 +792,7 @@ async fn test_verify_uncles_uncle_exist_failed() { Vec::new(), uncles.clone(), None, + None, ) .unwrap(); let new_block = writeable_block_chain_service @@ -830,7 +863,14 @@ async fn test_verify_uncle_and_parent_number_failed() { let miner_account = AccountInfo::random(); let (block_template, _) = writeable_block_chain_service .get_main() - .create_block_template(*miner_account.address(), None, Vec::new(), Vec::new(), None) + .create_block_template( + *miner_account.address(), + None, + Vec::new(), + Vec::new(), + None, + None, + ) .unwrap(); let new_block = writeable_block_chain_service .get_main() diff --git a/sync/src/block_connector/test_write_block_chain.rs b/sync/src/block_connector/test_write_block_chain.rs index c94ebe91b9..47c473441b 100644 --- a/sync/src/block_connector/test_write_block_chain.rs +++ b/sync/src/block_connector/test_write_block_chain.rs @@ -1,6 +1,7 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 #![allow(clippy::integer_arithmetic)] + use crate::block_connector::WriteBlockChainService; use starcoin_account_api::AccountInfo; use starcoin_chain::{BlockChain, ChainReader}; @@ -13,7 +14,7 @@ use starcoin_service_registry::{RegistryAsyncService, RegistryService}; use starcoin_storage::Store; use starcoin_time_service::TimeService; use starcoin_txpool_mock_service::MockTxPoolService; -use starcoin_types::block::Block; +use starcoin_types::block::{Block, TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH}; use starcoin_types::startup_info::StartupInfo; use std::sync::Arc; @@ -25,8 +26,11 @@ pub async fn create_writeable_block_chain() -> ( let node_config = NodeConfig::random_for_test(); let node_config = Arc::new(node_config); - let (storage, chain_info, _) = StarcoinGenesis::init_storage_for_test(node_config.net()) - .expect("init storage by genesis fail."); + let (storage, chain_info, _, dag) = StarcoinGenesis::init_storage_for_test( + node_config.net(), + TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH, + ) + .expect("init storage by genesis fail."); let registry = RegistryService::launch(); let bus = registry.service_ref::().await.unwrap(); let txpool_service = MockTxPoolService::new(); @@ -38,6 +42,7 @@ pub async fn create_writeable_block_chain() -> ( txpool_service, bus, None, + dag, ) .unwrap(), node_config, @@ -75,7 +80,7 @@ pub fn new_block( let miner_address = *miner.address(); let block_chain = writeable_block_chain_service.get_main(); let (block_template, _) = block_chain - .create_block_template(miner_address, None, Vec::new(), vec![], None) + .create_block_template(miner_address, None, Vec::new(), vec![], None, None) .unwrap(); block_chain .consensus() @@ -108,6 +113,7 @@ fn gen_fork_block_chain( times: u64, writeable_block_chain_service: &mut WriteBlockChainService, ) { + let dag = writeable_block_chain_service.get_main().dag(); let miner_account = AccountInfo::random(); if let Some(block_header) = writeable_block_chain_service .get_main() @@ -122,10 +128,18 @@ fn gen_fork_block_chain( parent_id, writeable_block_chain_service.get_main().get_storage(), None, + dag.clone(), ) .unwrap(); let (block_template, _) = block_chain - .create_block_template(*miner_account.address(), None, Vec::new(), vec![], None) + .create_block_template( + *miner_account.address(), + None, + Vec::new(), + vec![], + None, + None, + ) .unwrap(); let block = block_chain .consensus() diff --git a/sync/src/block_connector/test_write_dag_block_chain.rs b/sync/src/block_connector/test_write_dag_block_chain.rs new file mode 100644 index 0000000000..b797b0d86c --- /dev/null +++ b/sync/src/block_connector/test_write_dag_block_chain.rs @@ -0,0 +1,227 @@ +// Copyright (c) The Starcoin Core Contributors +// SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::integer_arithmetic)] +use crate::block_connector::test_write_block_chain::create_writeable_block_chain; +use crate::block_connector::WriteBlockChainService; +use starcoin_account_api::AccountInfo; +use starcoin_chain::{BlockChain, ChainReader}; +use starcoin_chain_service::WriteableChainService; +use starcoin_config::NodeConfig; +use starcoin_consensus::Consensus; +use starcoin_crypto::HashValue; +use starcoin_time_service::TimeService; +use starcoin_txpool_mock_service::MockTxPoolService; +use starcoin_types::block::Block; +use std::sync::Arc; + +pub fn gen_dag_blocks( + times: u64, + writeable_block_chain_service: &mut WriteBlockChainService, + time_service: &dyn TimeService, +) -> Option { + let miner_account = AccountInfo::random(); + let mut last_block_hash = None; + if times > 0 { + for i in 0..times { + let block = new_dag_block( + Some(&miner_account), + writeable_block_chain_service, + time_service, + ); + last_block_hash = Some(block.id()); + let e = writeable_block_chain_service.try_connect(block); + println!("try_connect result: {:?}", e); + assert!(e.is_ok()); + if (i + 1) % 3 == 0 { + writeable_block_chain_service.time_sleep(5000000); + } + } + last_block_hash + } else { + None + } + + // match result { + // super::write_block_chain::ConnectOk::Duplicate(block) + // | super::write_block_chain::ConnectOk::ExeConnectMain(block) + // | super::write_block_chain::ConnectOk::ExeConnectBranch(block) + // | super::write_block_chain::ConnectOk::Connect(block) => Some(block.header().id()), + // super::write_block_chain::ConnectOk::DagConnected + // | super::write_block_chain::ConnectOk::MainDuplicate + // | super::write_block_chain::ConnectOk::DagPending + // | super::write_block_chain::ConnectOk::DagConnectMissingBlock => { + // unreachable!("should not reach here, result: {:?}", result); + // } + // } +} + +pub fn new_dag_block( + miner_account: Option<&AccountInfo>, + writeable_block_chain_service: &mut WriteBlockChainService, + time_service: &dyn TimeService, +) -> Block { + let miner = match miner_account { + Some(m) => m.clone(), + None => AccountInfo::random(), + }; + let miner_address = *miner.address(); + let block_chain = writeable_block_chain_service.get_main(); + let current_header = block_chain.current_header(); + let (_dag_genesis, tips) = block_chain + .current_tips_hash(¤t_header) + .expect("failed to get tips") + .expect("failed to get the tip and dag genesis"); + let (block_template, _) = block_chain + .create_block_template( + miner_address, + Some(current_header.id()), + Vec::new(), + vec![], + None, + Some(tips), + ) + .unwrap(); + block_chain + .consensus() + .create_block(block_template, time_service) + .unwrap() +} + +#[stest::test] +async fn test_dag_block_chain_apply() { + let times = 12; + let (mut writeable_block_chain_service, node_config, _) = create_writeable_block_chain().await; + let net = node_config.net(); + let last_header_id = gen_dag_blocks( + times, + &mut writeable_block_chain_service, + net.time_service().as_ref(), + ); + assert_eq!( + writeable_block_chain_service + .get_main() + .current_header() + .id(), + last_header_id.unwrap() + ); + println!("finish test_block_chain_apply"); +} + +fn gen_fork_dag_block_chain( + fork_number: u64, + node_config: Arc, + times: u64, + writeable_block_chain_service: &mut WriteBlockChainService, +) -> Option { + let miner_account = AccountInfo::random(); + let dag = writeable_block_chain_service.get_dag(); + if let Some(block_header) = writeable_block_chain_service + .get_main() + .get_header_by_number(fork_number) + .unwrap() + { + let mut parent_id = block_header.id(); + let net = node_config.net(); + for _i in 0..times { + let block_chain = BlockChain::new( + net.time_service(), + parent_id, + writeable_block_chain_service.get_main().get_storage(), + None, + dag.clone(), + ) + .unwrap(); + let (block_template, _) = block_chain + .create_block_template( + *miner_account.address(), + None, + Vec::new(), + vec![], + None, + None, + ) + .unwrap(); + let block = block_chain + .consensus() + .create_block(block_template, net.time_service().as_ref()) + .unwrap(); + parent_id = block.id(); + + writeable_block_chain_service.try_connect(block).unwrap(); + } + Some(parent_id) + } else { + None + } +} + +#[stest::test(timeout = 120)] +async fn test_block_chain_switch_main() { + let times = 12; + let (mut writeable_block_chain_service, node_config, _) = create_writeable_block_chain().await; + let net = node_config.net(); + let mut last_block = gen_dag_blocks( + times, + &mut writeable_block_chain_service, + net.time_service().as_ref(), + ); + assert_eq!( + writeable_block_chain_service + .get_main() + .current_header() + .id(), + last_block.unwrap() + ); + + last_block = gen_fork_dag_block_chain( + 0, + node_config, + 2 * times, + &mut writeable_block_chain_service, + ); + + assert_eq!( + writeable_block_chain_service + .get_main() + .current_header() + .id(), + last_block.unwrap() + ); +} + +#[stest::test] +async fn test_block_chain_reset() -> anyhow::Result<()> { + let times = 10; + let (mut writeable_block_chain_service, node_config, _) = create_writeable_block_chain().await; + let net = node_config.net(); + let last_block = gen_dag_blocks( + times, + &mut writeable_block_chain_service, + net.time_service().as_ref(), + ); + assert_eq!( + writeable_block_chain_service + .get_main() + .current_header() + .id(), + last_block.unwrap() + ); + let block = writeable_block_chain_service + .get_main() + .get_block_by_number(3)? + .unwrap(); + writeable_block_chain_service.reset(block.id())?; + assert_eq!( + writeable_block_chain_service + .get_main() + .current_header() + .number(), + 3 + ); + + assert!(writeable_block_chain_service + .get_main() + .get_block_by_number(2)? + .is_some()); + Ok(()) +} diff --git a/sync/src/block_connector/write_block_chain.rs b/sync/src/block_connector/write_block_chain.rs index c22ff42408..18c3b28918 100644 --- a/sync/src/block_connector/write_block_chain.rs +++ b/sync/src/block_connector/write_block_chain.rs @@ -2,15 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 use crate::block_connector::metrics::ChainMetrics; -use anyhow::{format_err, Result}; +use anyhow::{format_err, Ok, Result}; use starcoin_chain::BlockChain; use starcoin_chain_api::{ChainReader, ChainWriter, ConnectBlockError, WriteableChainService}; use starcoin_config::NodeConfig; +#[cfg(test)] +use starcoin_consensus::Consensus; use starcoin_crypto::HashValue; +use starcoin_dag::blockdag::BlockDAG; use starcoin_executor::VMMetrics; use starcoin_logger::prelude::*; use starcoin_service_registry::bus::{Bus, BusService}; -use starcoin_service_registry::ServiceRef; +use starcoin_service_registry::{ServiceContext, ServiceRef}; use starcoin_storage::Store; use starcoin_txpool_api::TxPoolSyncService; use starcoin_types::block::BlockInfo; @@ -19,8 +22,11 @@ use starcoin_types::{ startup_info::StartupInfo, system_events::{NewBranch, NewHeadBlock}, }; -use std::fmt::Formatter; -use std::sync::Arc; +#[cfg(test)] +use starcoin_vm_types::{account_address::AccountAddress, transaction::SignedUserTransaction}; +use std::{fmt::Formatter, sync::Arc}; + +use super::BlockConnectorService; const MAX_ROLL_BACK_BLOCK: usize = 10; @@ -36,6 +42,7 @@ where bus: ServiceRef, metrics: Option, vm_metrics: Option, + dag: BlockDAG, } #[derive(Copy, Clone, Debug)] @@ -75,7 +82,7 @@ where if let Some(metrics) = self.metrics.as_ref() { let result = match result.as_ref() { - Ok(connect) => format!("Ok_{}", connect), + std::result::Result::Ok(connect) => format!("Ok_{}", connect), Err(err) => { if let Some(connect_err) = err.downcast_ref::() { format!("Err_{}", connect_err.reason()) @@ -93,17 +100,18 @@ where } } -impl

WriteBlockChainService

+impl WriteBlockChainService where - P: TxPoolSyncService + 'static, + TransactionPoolServiceT: TxPoolSyncService + 'static, { pub fn new( config: Arc, startup_info: StartupInfo, storage: Arc, - txpool: P, + txpool: TransactionPoolServiceT, bus: ServiceRef, vm_metrics: Option, + dag: BlockDAG, ) -> Result { let net = config.net(); let main = BlockChain::new( @@ -111,6 +119,7 @@ where startup_info.main, storage.clone(), vm_metrics.clone(), + dag.clone(), )?; let metrics = config .metrics @@ -126,6 +135,7 @@ where bus, metrics, vm_metrics, + dag, }) } @@ -145,6 +155,7 @@ where block_id, self.storage.clone(), self.vm_metrics.clone(), + self.dag.clone(), )?) } } else if self.block_exist(header.parent_hash())? { @@ -154,6 +165,7 @@ where header.parent_hash(), self.storage.clone(), self.vm_metrics.clone(), + self.dag.clone(), )?) } else { None @@ -169,6 +181,88 @@ where &self.main } + pub fn get_dag(&self) -> BlockDAG { + self.dag.clone() + } + + #[cfg(test)] + pub fn create_block( + &self, + author: AccountAddress, + parent_hash: Option, + user_txns: Vec, + uncles: Vec, + block_gas_limit: Option, + tips: Option>, + ) -> Result { + let (block_template, _transactions) = self.main.create_block_template( + author, + parent_hash, + user_txns, + uncles, + block_gas_limit, + tips, + )?; + Ok(self + .main + .consensus() + .create_block(block_template, self.main.time_service().as_ref()) + .unwrap()) + } + + #[cfg(test)] + pub fn time_sleep(&self, millis: u64) { + self.config.net().time_service().sleep(millis); + } + + #[cfg(test)] + pub fn apply_failed(&mut self, block: Block) -> Result<()> { + use starcoin_chain::verifier::FullVerifier; + + // apply but no connection + let verified_block = self.main.verify_with_verifier::(block)?; + let executed_block = self.main.execute(verified_block)?; + let enacted_blocks = vec![executed_block.block().clone()]; + self.do_new_head(executed_block, 1, enacted_blocks, 0, vec![])?; + // bail!("failed to apply for tesing the connection later!"); + Ok(()) + } + + // for sync task to connect to its chain, if chain's total difficulties is larger than the main + // switch by: + // 1, update the startup info + // 2, broadcast the new header + pub fn switch_new_main( + &mut self, + new_head_block: HashValue, + ctx: &mut ServiceContext>, + ) -> Result<()> + where + TransactionPoolServiceT: TxPoolSyncService, + { + let new_branch = BlockChain::new( + self.config.net().time_service(), + new_head_block, + self.storage.clone(), + self.vm_metrics.clone(), + self.main.dag(), + )?; + + let main_total_difficulty = self.main.get_total_difficulty()?; + let branch_total_difficulty = new_branch.get_total_difficulty()?; + if branch_total_difficulty > main_total_difficulty { + self.main = new_branch; + self.update_startup_info(self.main.head_block().header())?; + ctx.broadcast(NewHeadBlock { + executed_block: Arc::new(self.main.head_block()), + // tips: self.main.status().tips_hash.clone(), + }); + Ok(()) + } else { + Ok(()) + } + } + pub fn select_head(&mut self, new_branch: BlockChain) -> Result<()> { let executed_block = new_branch.head_block(); let main_total_difficulty = self.main.get_total_difficulty()?; @@ -247,6 +341,7 @@ where block_id, self.storage.clone(), self.vm_metrics.clone(), + self.dag.clone(), )?; // delete block since from block.number + 1 to latest. @@ -279,11 +374,12 @@ where ///Directly execute the block and save result, do not try to connect. pub fn execute(&mut self, block: Block) -> Result { - let chain = BlockChain::new( + let mut chain = BlockChain::new( self.config.net().time_service(), block.header().parent_hash(), self.storage.clone(), self.vm_metrics.clone(), + self.dag.clone(), )?; let verify_block = chain.verify(block)?; chain.execute(verify_block) @@ -381,7 +477,10 @@ where .inc() } - if let Err(e) = self.bus.broadcast(NewHeadBlock(Arc::new(block))) { + if let Err(e) = self.bus.broadcast(NewHeadBlock { + executed_block: Arc::new(block), + // tips: self.main.status().tips_hash.clone(), + }) { error!("Broadcast NewHeadBlock error: {:?}", e); } } diff --git a/sync/src/sync.rs b/sync/src/sync.rs index dd4bb57f3c..a89f32e793 100644 --- a/sync/src/sync.rs +++ b/sync/src/sync.rs @@ -11,8 +11,10 @@ use futures_timer::Delay; use network_api::peer_score::PeerScoreMetrics; use network_api::{PeerId, PeerProvider, PeerSelector, PeerStrategy, ReputationChange}; use starcoin_chain::BlockChain; -use starcoin_chain_api::ChainReader; +use starcoin_chain_api::{ChainAsyncService, ChainReader}; +use starcoin_chain_service::ChainReaderService; use starcoin_config::NodeConfig; +use starcoin_dag::blockdag::BlockDAG; use starcoin_executor::VMMetrics; use starcoin_logger::prelude::*; use starcoin_network::NetworkServiceRef; @@ -26,10 +28,12 @@ use starcoin_sync_api::{ PeerScoreRequest, PeerScoreResponse, SyncCancelRequest, SyncProgressReport, SyncProgressRequest, SyncServiceHandler, SyncStartRequest, SyncStatusRequest, SyncTarget, }; +use starcoin_txpool::TxPoolService; use starcoin_types::block::BlockIdAndNumber; use starcoin_types::startup_info::ChainStatus; use starcoin_types::sync_status::SyncStatus; use starcoin_types::system_events::{NewHeadBlock, SyncStatusChangeEvent, SystemStarted}; +use std::result::Result::Ok; use std::sync::Arc; use std::time::Duration; use stream_task::{TaskError, TaskEventCounterHandle, TaskHandle}; @@ -99,6 +103,73 @@ impl SyncService { }) } + pub async fn create_verified_client( + network: NetworkServiceRef, + config: Arc, + peer_strategy: Option, + peers: Vec, + peer_score_metrics: Option, + ) -> Result> { + let peer_select_strategy = + peer_strategy.unwrap_or_else(|| config.sync.peer_select_strategy()); + + let mut peer_set = network.peer_set().await?; + + loop { + if peer_set.is_empty() || peer_set.len() < (config.net().min_peers() as usize) { + let level = if config.net().is_dev() || config.net().is_test() { + Level::Debug + } else { + Level::Info + }; + log!( + level, + "[sync]Waiting enough peers to sync, current: {:?} peers, min peers: {:?}", + peer_set.len(), + config.net().min_peers() + ); + + Delay::new(Duration::from_secs(1)).await; + peer_set = network.peer_set().await?; + } else { + break; + } + } + + let peer_reputations = network + .reputations(REPUTATION_THRESHOLD) + .await? + .await? + .into_iter() + .map(|(peer, reputation)| { + ( + peer, + (REPUTATION_THRESHOLD.abs().saturating_add(reputation)) as u64, + ) + }) + .collect(); + + let peer_selector = PeerSelector::new_with_reputation( + peer_reputations, + peer_set, + peer_select_strategy, + peer_score_metrics, + ); + + peer_selector.retain_rpc_peers(); + if !peers.is_empty() { + peer_selector.retain(peers.as_ref()) + } + if peer_selector.is_empty() { + return Err(format_err!("[sync] No peers to sync.")); + } + + Ok(Arc::new(VerifiedRpcClient::new( + peer_selector, + network.clone(), + ))) + } + pub fn check_and_start_sync( &mut self, peers: Vec, @@ -144,66 +215,17 @@ impl SyncService { let network = ctx.get_shared::()?; let storage = self.storage.clone(); let self_ref = ctx.self_ref(); - let connector_service = ctx.service_ref::()?.clone(); + let connector_service = ctx + .service_ref::>()? + .clone(); + let chain_service = ctx.service_ref::()?.clone(); let config = self.config.clone(); let peer_score_metrics = self.peer_score_metrics.clone(); let sync_metrics = self.metrics.clone(); let vm_metrics = self.vm_metrics.clone(); + let dag = ctx.get_shared::()?; let fut = async move { - let peer_select_strategy = - peer_strategy.unwrap_or_else(|| config.sync.peer_select_strategy()); - - let mut peer_set = network.peer_set().await?; - - loop { - if peer_set.is_empty() || peer_set.len() < (config.net().min_peers() as usize) { - let level = if config.net().is_dev() || config.net().is_test() { - Level::Debug - } else { - Level::Info - }; - log!( - level, - "[sync]Waiting enough peers to sync, current: {:?} peers, min peers: {:?}", - peer_set.len(), - config.net().min_peers() - ); - - Delay::new(Duration::from_secs(1)).await; - peer_set = network.peer_set().await?; - } else { - break; - } - } - - let peer_reputations = network - .reputations(REPUTATION_THRESHOLD) - .await? - .await? - .into_iter() - .map(|(peer, reputation)| { - ( - peer, - (REPUTATION_THRESHOLD.abs().saturating_add(reputation)) as u64, - ) - }) - .collect(); - - let peer_selector = PeerSelector::new_with_reputation( - peer_reputations, - peer_set, - peer_select_strategy, - peer_score_metrics, - ); - - peer_selector.retain_rpc_peers(); - if !peers.is_empty() { - peer_selector.retain(peers.as_ref()) - } - if peer_selector.is_empty() { - return Err(format_err!("[sync] No peers to sync.")); - } - + let dag_fork_number = chain_service.dag_fork_number().await?; let startup_info = storage .get_startup_info()? .ok_or_else(|| format_err!("Startup info should exist."))?; @@ -213,10 +235,14 @@ impl SyncService { format_err!("Can not find block info by id: {}", current_block_id) })?; - let rpc_client = Arc::new(VerifiedRpcClient::new( - peer_selector.clone(), + let rpc_client = Self::create_verified_client( network.clone(), - )); + config.clone(), + peer_strategy, + peers, + peer_score_metrics, + ) + .await?; if let Some(target) = rpc_client.get_best_target(current_block_info.get_total_difficulty())? { @@ -235,20 +261,22 @@ impl SyncService { config.sync.max_retry_times(), sync_metrics.clone(), vm_metrics.clone(), + dag, + dag_fork_number, )?; self_ref.notify(SyncBeginEvent { target, task_handle, task_event_handle, - peer_selector, + peer_selector: rpc_client.selector().clone(), })?; if let Some(sync_task_total) = sync_task_total.as_ref() { sync_task_total.with_label_values(&["start"]).inc(); } Ok(Some(fut.await?)) } else { - debug!("[sync]No best peer to request, current is beast."); + info!("[sync]No best peer to request, current is best."); Ok(None) } }; @@ -574,10 +602,9 @@ impl EventHandler for SyncService { impl EventHandler for SyncService { fn handle_event(&mut self, msg: NewHeadBlock, ctx: &mut ServiceContext) { - let NewHeadBlock(block) = msg; if self.sync_status.update_chain_status(ChainStatus::new( - block.header().clone(), - block.block_info.clone(), + msg.executed_block.header().clone(), + msg.executed_block.block_info.clone(), )) { ctx.broadcast(SyncStatusChangeEvent(self.sync_status.clone())); } diff --git a/sync/src/tasks/accumulator_sync_task.rs b/sync/src/tasks/accumulator_sync_task.rs index 9ed0fb008f..3899c1b2fb 100644 --- a/sync/src/tasks/accumulator_sync_task.rs +++ b/sync/src/tasks/accumulator_sync_task.rs @@ -91,6 +91,7 @@ pub struct AccumulatorCollector { accumulator: MerkleAccumulator, ancestor: BlockIdAndNumber, target: AccumulatorInfo, + dag_fork_heigh: BlockNumber, } impl AccumulatorCollector { @@ -99,12 +100,15 @@ impl AccumulatorCollector { ancestor: BlockIdAndNumber, start: AccumulatorInfo, target: AccumulatorInfo, + dag_fork_heigh: BlockNumber, ) -> Self { + info!("now start to collect the hash value for building the accumulator ahead, ancestor: {:?}", ancestor); let accumulator = MerkleAccumulator::new_with_info(start, store); Self { accumulator, ancestor, target, + dag_fork_heigh, } } } @@ -124,12 +128,15 @@ impl TaskResultCollector for AccumulatorCollector { fn finish(self) -> Result { let info = self.accumulator.get_info(); - ensure!( - info == self.target, - "Target accumulator: {:?}, but got: {:?}", - self.target, - info - ); + let block_number = info.num_leaves.saturating_sub(1); + if block_number < self.dag_fork_heigh { + ensure!( + info == self.target, + "Target accumulator: {:?}, but got: {:?}", + self.target, + info + ); + } Ok((self.ancestor, self.accumulator)) } } diff --git a/sync/src/tasks/block_sync_task.rs b/sync/src/tasks/block_sync_task.rs index 57f6703a9d..cc88980475 100644 --- a/sync/src/tasks/block_sync_task.rs +++ b/sync/src/tasks/block_sync_task.rs @@ -3,23 +3,29 @@ use crate::tasks::{BlockConnectedEvent, BlockConnectedEventHandle, BlockFetcher, BlockLocalStore}; use crate::verified_rpc_client::RpcVerifyError; -use anyhow::{format_err, Result}; +use anyhow::{anyhow, bail, format_err, Result}; use futures::future::BoxFuture; use futures::FutureExt; use network_api::PeerId; use network_api::PeerProvider; use starcoin_accumulator::{Accumulator, MerkleAccumulator}; +use starcoin_chain::verifier::DagBasicVerifier; use starcoin_chain::{verifier::BasicVerifier, BlockChain}; use starcoin_chain_api::{ChainReader, ChainWriter, ConnectBlockError, ExecutedBlock}; use starcoin_config::G_CRATE_VERSION; +use starcoin_crypto::HashValue; use starcoin_logger::prelude::*; -use starcoin_storage::BARNARD_HARD_FORK_HASH; +use starcoin_network_rpc_api::MAX_BLOCK_HEADER_REQUEST_SIZE; +use starcoin_storage::{Store, BARNARD_HARD_FORK_HASH}; use starcoin_sync_api::SyncTarget; -use starcoin_types::block::{Block, BlockIdAndNumber, BlockInfo, BlockNumber}; +use starcoin_types::block::{Block, BlockHeader, BlockIdAndNumber, BlockInfo, BlockNumber}; use std::collections::HashMap; use std::sync::Arc; +use std::time::Duration; use stream_task::{CollectorState, TaskError, TaskResultCollector, TaskState}; +use super::{BlockConnectAction, BlockConnectedFinishEvent}; + #[derive(Clone, Debug)] pub struct SyncBlockData { pub(crate) block: Block, @@ -187,6 +193,8 @@ pub struct BlockCollector { event_handle: H, peer_provider: N, skip_pow_verify: bool, + local_store: Arc, + fetcher: Arc, } impl BlockCollector @@ -201,6 +209,8 @@ where event_handle: H, peer_provider: N, skip_pow_verify: bool, + local_store: Arc, + fetcher: Arc, ) -> Self { Self { current_block_info, @@ -209,6 +219,8 @@ where event_handle, peer_provider, skip_pow_verify, + local_store, + fetcher, } } @@ -217,6 +229,69 @@ where self.apply_block(block, None) } + fn notify_connected_block( + &mut self, + block: Block, + block_info: BlockInfo, + action: BlockConnectAction, + state: CollectorState, + ) -> Result { + let total_difficulty = block_info.get_total_difficulty(); + + // if the new block's total difficulty is smaller than the current, + // do nothing because we do not need to update the current chain in any other services. + if total_difficulty <= self.current_block_info.total_difficulty { + return Ok(state); // nothing to do + } + + // only try connect block when sync chain total_difficulty > node's current chain. + + // first, create the sender and receiver for ensuring that + // the last block is connected before the next synchronization is triggered. + // if the block is not the last one, we do not want to do this. + let (sender, mut receiver) = match state { + CollectorState::Enough => { + let (s, r) = futures::channel::mpsc::unbounded::(); + (Some(s), Some(r)) + } + CollectorState::Need => (None, None), + }; + + // second, construct the block connect event. + let block_connect_event = BlockConnectedEvent { + block, + feedback: sender, + action, + }; + + // third, broadcast it. + if let Err(e) = self.event_handle.handle(block_connect_event.clone()) { + error!( + "Send BlockConnectedEvent error: {:?}, block_id: {}", + e, + block_info.block_id() + ); + } + + // finally, if it is the last one, wait for the last block to be processed. + if block_connect_event.feedback.is_some() && receiver.is_some() { + let mut count: i32 = 0; + while count < 3 { + count = count.saturating_add(1); + match receiver.as_mut().unwrap().try_next() { + Ok(_) => { + break; + } + Err(_) => { + info!("Waiting for last block to be processed"); + async_std::task::block_on(async_std::task::sleep(Duration::from_secs(10))); + } + } + } + } + Ok(state) + } + fn apply_block(&mut self, block: Block, peer_id: Option) -> Result<()> { if let Some((_failed_block, pre_peer_id, err, version)) = self .chain @@ -282,48 +357,319 @@ where Ok(()) } } -} -impl TaskResultCollector for BlockCollector -where - N: PeerProvider + 'static, - H: BlockConnectedEventHandle + 'static, -{ - type Output = BlockChain; + fn find_absent_parent_dag_blocks( + &self, + block_header: BlockHeader, + // ancestors: &mut Vec, + absent_blocks: &mut Vec, + ) -> Result<()> { + let parents = block_header.parents_hash().unwrap_or_default(); + if parents.is_empty() { + return Ok(()); + } + for parent in parents { + if !self.chain.has_dag_block(parent)? { + if absent_blocks.contains(&parent) { + continue; + } + absent_blocks.push(parent) + } + // if ancestors.contains(&parent) { + // continue; + // } + // ancestors.push(parent); + } + Ok(()) + } - fn collect(&mut self, item: SyncBlockData) -> Result { - let (block, block_info, peer_id) = item.into(); - let block_id = block.id(); - let timestamp = block.header().timestamp(); - let block_info = match block_info { - Some(block_info) => { - //If block_info exists, it means that this block was already executed and try connect in the previous sync, but the sync task was interrupted. - //So, we just need to update chain and continue - self.chain.connect(ExecutedBlock { - block, - block_info: block_info.clone(), - })?; - block_info + fn find_absent_parent_dag_blocks_for_blocks( + &self, + block_headers: Vec, + // ancestors: &mut Vec, + absent_blocks: &mut Vec, + ) -> Result<()> { + for block_header in block_headers { + self.find_absent_parent_dag_blocks(block_header, absent_blocks)?; + } + Ok(()) + } + + async fn find_absent_ancestor( + &self, + mut block_headers: Vec, + ) -> Result> { + // let mut ancestors = vec![]; + let mut absent_block_headers = vec![]; + loop { + let mut absent_blocks = vec![]; + self.find_absent_parent_dag_blocks_for_blocks( + block_headers, + // &mut ancestors, + &mut absent_blocks, + )?; + if absent_blocks.is_empty() { + return Ok(absent_block_headers); } - None => { - self.apply_block(block.clone(), peer_id)?; - self.chain.time_service().adjust(timestamp); - let block_info = self.chain.status().info; - let total_difficulty = block_info.get_total_difficulty(); - // only try connect block when sync chain total_difficulty > node's current chain. - if total_difficulty > self.current_block_info.total_difficulty { - if let Err(e) = self.event_handle.handle(BlockConnectedEvent { block }) { - error!( - "Send BlockConnectedEvent error: {:?}, block_id: {}", - e, block_id - ); + let remote_absent_block_headers = self.fetch_block_headers(absent_blocks).await?; + if remote_absent_block_headers.iter().any(|(id, header)| { + if header.is_none() { + error!( + "fetch absent block header failed, block id: {:?}, it should not be absent!", + id + ); + return true; + } + false + }) { + bail!("fetch absent block header failed, it should not be absent!"); + } + block_headers = remote_absent_block_headers + .iter() + .map(|(_, header)| header.clone().expect("block header should not be none!").clone()) + .collect(); + absent_block_headers.append(&mut remote_absent_block_headers.into_iter().map(|(_, header)| header.expect("block header should not be none!")).collect()); + } + } + + pub fn ensure_dag_parent_blocks_exist( + &mut self, + block_header: BlockHeader, + ) -> Result<()> { + if !self.chain.is_dag(&block_header)? { + info!( + "the block is not a dag block, skipping, its id: {:?}, its number {:?}", + block_header.id(), + block_header.number() + ); + return Ok(()); + } + if self.chain.has_dag_block(block_header.id())? { + info!( + "the dag block exists, skipping, its id: {:?}, its number {:?}", + block_header.id(), + block_header.number() + ); + return Ok(()); + } + info!( + "the block is a dag block, its id: {:?}, number: {:?}, its parents: {:?}", + block_header.id(), + block_header.number(), + block_header.parents_hash() + ); + let fut = async { + let mut absent_ancestor = + self + .find_absent_ancestor(vec![block_header.clone()]) + .await?; + + if absent_ancestor.is_empty() { + return Ok(()); + } + + absent_ancestor.sort_by(|a, b| a.number().cmp(&b.number())); + info!("now apply absent ancestors: {:?}", absent_ancestor); + + let mut process_dag_ancestors = HashMap::new(); + loop { + for ancestor_block_header in absent_ancestor.iter() { + if self.chain.has_dag_block(ancestor_block_header.id())? { + info!("{:?} was already applied", ancestor_block_header.id()); + process_dag_ancestors.insert(ancestor_block_header.id(), ancestor_block_header.clone()); + } else { + for (block, _peer_id) in self + .fetcher + .fetch_blocks(vec![ancestor_block_header.id()]) + .await? + { + if self.chain.has_dag_block(ancestor_block_header.id())? { + info!("{:?} was already applied", ancestor_block_header.id()); + process_dag_ancestors.insert(ancestor_block_header.id(), ancestor_block_header.clone()); + continue; + } + + if block.id() != ancestor_block_header.id() { + bail!( + "fetch block failed, expect block id: {:?}, but got block id: {:?}", + ancestor_block_header.id(), + block.id() + ); + } + + info!( + "now apply for sync after fetching a dag block: {:?}, number: {:?}", + block.id(), + block.header().number() + ); + + if !self.check_parents_exist(block.header())? { + info!( + "block: {:?}, number: {:?}, its parent still dose not exist, waiting for next round", + ancestor_block_header.id(), + ancestor_block_header.number() + ); + continue; + } + // let executed_block = if self.skip_pow_verify { + let executed_block = self + .chain + .apply_with_verifier::(block.clone())?; + // } else { + // self.chain.apply(block.clone())? + // }; + // let executed_block = self.chain.apply(block)?; + info!( + "succeed to apply a dag block: {:?}, number: {:?}", + executed_block.block.id(), + executed_block.block.header().number() + ); + process_dag_ancestors.insert(ancestor_block_header.id(), ancestor_block_header.clone()); + self.notify_connected_block( + executed_block.block, + executed_block.block_info.clone(), + BlockConnectAction::ConnectNewBlock, + self.check_enough_by_info(executed_block.block_info)?, + )?; + } } } - block_info + + if process_dag_ancestors.is_empty() { + bail!("no absent ancestor block is executed!, absent ancestor block: {:?}, their child block id: {:?}, number: {:?}", absent_ancestor, block_header.id(), block_header.number()); + } else { + absent_ancestor.retain(|header| !process_dag_ancestors.contains_key(&header.id())); + } + + if absent_ancestor.is_empty() { + break; + } } + + // dag_ancestors = std::mem::take(&mut process_dag_ancestors); + // // process_dag_ancestors = vec![]; + + // dag_ancestors = Self::remove_repeated( + // &self.fetch_dag_block_absent_children(dag_ancestors).await?, + // ); + // source_path.extend(&dag_ancestors); + + // if !dag_ancestors.is_empty() { + // for (id, op_header) in self.fetch_block_headers(dag_ancestors.clone()).await? { + // if let Some(header) = op_header { + // self.ensure_dag_parent_blocks_exist(header, source_path)?; + // } else { + // bail!("when finding the ancestor's children's parents, fetching block header failed, block id: {:?}", id); + // } + // } + // } + + // info!("next dag children blocks: {:?}", dag_ancestors); + + Ok(()) }; + async_std::task::block_on(fut) + } - //verify target + async fn fetch_block_headers( + &self, + block_ids: Vec, + ) -> Result)>> { + let mut result = vec![]; + for chunk in block_ids.chunks(usize::try_from(MAX_BLOCK_HEADER_REQUEST_SIZE)?) { + result.extend(self.fetcher.fetch_block_headers(chunk.to_vec()).await?); + } + Ok(result) + } + + fn check_parents_exist(&self, block_header: &BlockHeader) -> Result { + for parent in block_header.parents_hash().ok_or_else(|| { + anyhow!( + "the dag block's parents should exist, block id: {:?}, number: {:?}", + block_header.id(), + block_header.number() + ) + })? { + if !self.chain.has_dag_block(parent)? { + info!("block: {:?}, number: {:?}, its parent({:?}) still dose not exist, waiting for next round", block_header.id(), block_header.number(), parent); + return Ok(false); + } + } + Ok(true) + } + + // fn remove_repeated(repeated: &[HashValue]) -> Vec { + // let mut uniqued = vec![]; + // let mut remove_repeated = HashSet::new(); + // for d in repeated { + // if remove_repeated.insert(*d) { + // uniqued.push(*d); + // } + // } + // uniqued + // } + + // async fn fetch_dag_block_absent_children( + // &self, + // mut dag_ancestors: Vec, + // ) -> Result> { + // let mut absent_children = Vec::new(); + // while !dag_ancestors.is_empty() { + // let children = self + // .fetch_dag_block_children(std::mem::take(&mut dag_ancestors)) + // .await?; + // for child in children { + // if self.chain.has_dag_block(child)? { + // if !dag_ancestors.contains(&child) { + // dag_ancestors.push(child); + // } + // } else if !absent_children.contains(&child) { + // absent_children.push(child); + // } + // } + // } + // Ok(absent_children) + // } + + // async fn fetch_dag_block_children( + // &self, + // dag_ancestors: Vec, + // ) -> Result> { + // let mut result = vec![]; + // for chunk in dag_ancestors.chunks(usize::try_from(MAX_BLOCK_REQUEST_SIZE)?) { + // result.extend(self.fetch_dag_block_children_inner(chunk.to_vec()).await?); + // } + // Ok(result) + // } + + // async fn fetch_dag_block_children_inner( + // &self, + // dag_ancestors: Vec, + // ) -> Result> { + // let mut count: i32 = 20; + // while count > 0 { + // info!("fetch block chidlren retry count = {}", count); + // match self + // .fetcher + // .fetch_dag_block_children(dag_ancestors.clone()) + // .await + // { + // Ok(result) => { + // return Ok(result); + // } + // Err(e) => { + // count = count.saturating_sub(1); + // if count == 0 { + // bail!("failed to fetch dag block children due to: {:?}", e); + // } + // async_std::task::sleep(Duration::from_secs(1)).await; + // } + // } + // } + // bail!("failed to fetch dag block children"); + // } + + pub fn check_enough_by_info(&self, block_info: BlockInfo) -> Result { if block_info.block_accumulator_info.num_leaves == self.target.block_info.block_accumulator_info.num_leaves { @@ -332,10 +678,10 @@ where RpcVerifyError::new_with_peers( self.target.peers.clone(), format!( - "Verify target error, expect target: {:?}, collect target block_info:{:?}", - self.target.block_info, - block_info - ), + "Verify target error, expect target: {:?}, collect target block_info:{:?}", + self.target.block_info, + block_info + ), ) .into(), ) @@ -348,6 +694,90 @@ where } } + pub fn check_enough(&self) -> Result { + if let Some(block_info) = self + .local_store + .get_block_info(self.chain.current_header().id())? + { + self.check_enough_by_info(block_info) + } else { + Ok(CollectorState::Need) + } + } +} + +impl TaskResultCollector for BlockCollector +where + N: PeerProvider + 'static, + H: BlockConnectedEventHandle + 'static, +{ + type Output = BlockChain; + + fn collect(&mut self, item: SyncBlockData) -> Result { + let (block, block_info, peer_id) = item.into(); + + // if it is a dag block, we must ensure that its dag parent blocks exist. + // if it is not, we must pull the dag parent blocks from the peer. + info!("now sync dag block -- ensure_dag_parent_blocks_exist"); + self.ensure_dag_parent_blocks_exist(block.header().clone())?; + let state = self.check_enough(); + if let anyhow::Result::Ok(CollectorState::Enough) = &state { + let current_header = self.chain.current_header(); + let current_block = self + .local_store + .get_block(current_header.id())? + .expect("failed to get the current block which should exist"); + return self.notify_connected_block( + current_block, + self.local_store + .get_block_info(current_header.id())? + .expect("block info should exist"), + BlockConnectAction::ConnectExecutedBlock, + state?, + ); + } + info!("successfully ensure block's parents exist"); + + let timestamp = block.header().timestamp(); + + let block_info = if self.chain.is_dag(block.header())? { + if self.chain.has_dag_block(block.header().id())? { + block_info + } else { + None + } + } else { + block_info + }; + + let (block_info, action) = match block_info { + Some(block_info) => { + //If block_info exists, it means that this block was already executed and try to connect in the previous sync, but the sync task was interrupted. + //So, we need make sure the dag genesis is initialized properly, then update chain and continue + self.chain.init_dag_with_genesis(block.header().clone())?; + self.chain.connect(ExecutedBlock { + block: block.clone(), + block_info: block_info.clone(), + })?; + (block_info, BlockConnectAction::ConnectExecutedBlock) + } + None => { + self.apply_block(block.clone(), peer_id)?; + self.chain.time_service().adjust(timestamp); + ( + self.chain.status().info, + BlockConnectAction::ConnectNewBlock, + ) + } + }; + + //verify target + let state: Result = + self.check_enough_by_info(block_info.clone()); + + self.notify_connected_block(block, block_info, action, state?) + } + fn finish(self) -> Result { Ok(self.chain) } diff --git a/sync/src/tasks/inner_sync_task.rs b/sync/src/tasks/inner_sync_task.rs index 7552656417..f5ed4afef7 100644 --- a/sync/src/tasks/inner_sync_task.rs +++ b/sync/src/tasks/inner_sync_task.rs @@ -1,22 +1,24 @@ -use crate::tasks::{ - AccumulatorCollector, BlockAccumulatorSyncTask, BlockCollector, BlockConnectedEventHandle, - BlockFetcher, BlockIdFetcher, BlockSyncTask, PeerOperator, -}; use anyhow::format_err; use network_api::PeerProvider; use starcoin_accumulator::node::AccumulatorStoreType; use starcoin_chain::BlockChain; +use starcoin_dag::blockdag::BlockDAG; use starcoin_executor::VMMetrics; use starcoin_storage::Store; use starcoin_sync_api::SyncTarget; use starcoin_time_service::TimeService; -use starcoin_types::block::{BlockIdAndNumber, BlockInfo}; +use starcoin_types::block::{BlockIdAndNumber, BlockInfo, BlockNumber}; use std::cmp::min; use std::sync::Arc; use stream_task::{ CustomErrorHandle, Generator, TaskError, TaskEventHandle, TaskGenerator, TaskHandle, TaskState, }; +use super::{ + AccumulatorCollector, BlockAccumulatorSyncTask, BlockCollector, BlockConnectedEventHandle, + BlockFetcher, BlockIdFetcher, BlockSyncTask, PeerOperator, +}; + pub struct InnerSyncTask where H: BlockConnectedEventHandle + Sync + 'static, @@ -32,6 +34,8 @@ where time_service: Arc, peer_provider: N, custom_error_handle: Arc, + dag: BlockDAG, + dag_fork_heigh: BlockNumber, } impl InnerSyncTask @@ -50,6 +54,8 @@ where time_service: Arc, peer_provider: N, custom_error_handle: Arc, + dag_fork_heigh: BlockNumber, + dag: BlockDAG, ) -> Self { Self { ancestor, @@ -61,6 +67,8 @@ where time_service, peer_provider, custom_error_handle, + dag, + dag_fork_heigh, } } @@ -111,13 +119,14 @@ where self.ancestor, ancestor_block_info.clone().block_accumulator_info, self.target.block_info.block_accumulator_info.clone(), + self.dag_fork_heigh, ), self.event_handle.clone(), self.custom_error_handle.clone(), ) .and_then(move |(ancestor, accumulator), event_handle| { let check_local_store = - ancestor_block_info.total_difficulty < current_block_info.total_difficulty; + ancestor_block_info.total_difficulty <= current_block_info.total_difficulty; let block_sync_task = BlockSyncTask::new( accumulator, @@ -132,6 +141,7 @@ where ancestor.id, self.storage.clone(), vm_metrics, + self.dag.clone(), )?; let block_collector = BlockCollector::new_with_handle( current_block_info.clone(), @@ -140,6 +150,8 @@ where self.block_event_handle.clone(), self.peer_provider.clone(), skip_pow_verify_when_sync, + self.storage.clone(), + self.fetcher.clone(), ); Ok(TaskGenerator::new( block_sync_task, diff --git a/sync/src/tasks/mock.rs b/sync/src/tasks/mock.rs index 5f5c66034d..7ff6e5c458 100644 --- a/sync/src/tasks/mock.rs +++ b/sync/src/tasks/mock.rs @@ -4,7 +4,7 @@ use crate::tasks::{ BlockConnectedEvent, BlockFetcher, BlockIdFetcher, BlockInfoFetcher, PeerOperator, SyncFetcher, }; -use anyhow::{format_err, Context, Result}; +use anyhow::{format_err, Context, Ok, Result}; use async_std::task::JoinHandle; use futures::channel::mpsc::UnboundedReceiver; use futures::future::BoxFuture; @@ -14,15 +14,19 @@ use network_api::messages::NotificationMessage; use network_api::{PeerId, PeerInfo, PeerSelector, PeerStrategy}; use network_p2p_core::{NetRpcError, RpcErrorCode}; use rand::Rng; +use starcoin_account_api::AccountInfo; use starcoin_accumulator::{Accumulator, MerkleAccumulator}; use starcoin_chain::BlockChain; use starcoin_chain_api::ChainReader; use starcoin_chain_mock::MockChain; use starcoin_config::ChainNetwork; use starcoin_crypto::HashValue; +use starcoin_dag::blockdag::BlockDAG; use starcoin_network_rpc_api::G_RPC_INFO; +use starcoin_storage::Storage; use starcoin_sync_api::SyncTarget; -use starcoin_types::block::{Block, BlockIdAndNumber, BlockInfo, BlockNumber}; +use starcoin_types::block::{Block, BlockHeader, BlockIdAndNumber, BlockInfo, BlockNumber}; +use starcoin_types::startup_info::ChainInfo; use std::sync::Arc; use std::time::Duration; @@ -162,6 +166,34 @@ impl SyncNodeMocker { )) } + pub fn new_with_storage( + net: ChainNetwork, + storage: Arc, + chain_info: ChainInfo, + miner: AccountInfo, + delay_milliseconds: u64, + random_error_percent: u32, + dag: BlockDAG, + ) -> Result { + let chain = MockChain::new_with_storage(net, storage, chain_info.head().id(), miner, dag)?; + let peer_id = PeerId::random(); + let peer_info = PeerInfo::new( + peer_id.clone(), + chain.chain_info(), + NotificationMessage::protocols(), + G_RPC_INFO.clone().into_protocols(), + None, + ); + let peer_selector = PeerSelector::new(vec![peer_info], PeerStrategy::default(), None); + Ok(Self::new_inner( + peer_id, + chain, + ErrorStrategy::Timeout(delay_milliseconds), + random_error_percent, + peer_selector, + )) + } + pub fn new_with_strategy( net: ChainNetwork, error_strategy: ErrorStrategy, @@ -250,10 +282,31 @@ impl SyncNodeMocker { self.chain_mocker.head() } + pub fn get_storage(&self) -> Arc { + self.chain_mocker.get_storage() + } + pub fn produce_block(&mut self, times: u64) -> Result<()> { self.chain_mocker.produce_and_apply_times(times) } + pub fn produce_block_by_header( + &mut self, + parent_header: BlockHeader, + ) -> Result { + let next_block = self.chain_mocker.produce_block_by_header(parent_header)?; + Ok(self + .chain_mocker + .get_storage() + .get_block_by_hash(next_header.id())? + .expect("failed to get block by hash")) + } + + // pub fn produce_block_and_create_dag(&mut self, times: u64) -> Result<()> { + // self.chain_mocker.produce_and_apply_times(times)?; + // Ok(()) + // } + pub fn select_head(&mut self, block: Block) -> Result<()> { self.chain_mocker.select_head(block) } @@ -313,7 +366,7 @@ impl BlockFetcher for SyncNodeMocker { .into_iter() .map(|block_id| { if let Some(block) = self.chain().get_block(block_id)? { - Ok((block, None)) + Ok((block, Some(PeerId::random()))) } else { Err(format_err!("Can not find block by id: {}", block_id)) } @@ -326,6 +379,35 @@ impl BlockFetcher for SyncNodeMocker { } .boxed() } + + fn fetch_block_headers( + &self, + block_ids: Vec, + ) -> BoxFuture)>>> { + async move { + let blocks = self.fetch_blocks(block_ids).await?; + blocks + .into_iter() + .map(|(block, _)| Ok((block.id(), Some(block.header().clone())))) + .collect() + } + .boxed() + } + + fn fetch_dag_block_children( + &self, + block_ids: Vec, + ) -> BoxFuture>> { + async move { + let blocks = self.fetch_blocks(block_ids).await?; + let mut result = vec![]; + for block in blocks { + result.extend(self.chain().dag().get_children(block.0.id())?); + } + Ok(result) + } + .boxed() + } } impl BlockInfoFetcher for SyncNodeMocker { @@ -339,8 +421,8 @@ impl BlockInfoFetcher for SyncNodeMocker { result.push(self.chain().get_block_info(Some(hash)).unwrap()); }); async move { - let _ = self.select_a_peer()?; - self.err_mocker.random_err().await?; + // let _ = self.select_a_peer()?; + // self.err_mocker.random_err().await?; Ok(result) } .boxed() diff --git a/sync/src/tasks/mod.rs b/sync/src/tasks/mod.rs index 1ed2424924..0c7dc0bad9 100644 --- a/sync/src/tasks/mod.rs +++ b/sync/src/tasks/mod.rs @@ -1,6 +1,7 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 +use crate::block_connector::BlockConnectorService; use crate::tasks::block_sync_task::SyncBlockData; use crate::tasks::inner_sync_task::InnerSyncTask; use crate::verified_rpc_client::{RpcVerifyError, VerifiedRpcClient}; @@ -14,12 +15,16 @@ use starcoin_accumulator::node::AccumulatorStoreType; use starcoin_accumulator::MerkleAccumulator; use starcoin_chain::{BlockChain, ChainReader}; use starcoin_crypto::HashValue; +use starcoin_dag::blockdag::BlockDAG; use starcoin_logger::prelude::*; use starcoin_service_registry::{ActorService, EventHandler, ServiceRef}; use starcoin_storage::Store; use starcoin_sync_api::SyncTarget; use starcoin_time_service::TimeService; -use starcoin_types::block::{Block, BlockIdAndNumber, BlockInfo, BlockNumber}; +use starcoin_txpool::TxPoolService; +#[cfg(test)] +use starcoin_txpool_mock_service::MockTxPoolService; +use starcoin_types::block::{Block, BlockHeader, BlockIdAndNumber, BlockInfo, BlockNumber}; use starcoin_types::startup_info::ChainStatus; use starcoin_types::U256; use std::str::FromStr; @@ -280,6 +285,16 @@ pub trait BlockFetcher: Send + Sync { &self, block_ids: Vec, ) -> BoxFuture)>>>; + + fn fetch_block_headers( + &self, + block_ids: Vec, + ) -> BoxFuture)>>>; + + fn fetch_dag_block_children( + &self, + block_ids: Vec, + ) -> BoxFuture>>; } impl BlockFetcher for Arc @@ -292,6 +307,20 @@ where ) -> BoxFuture<'_, Result)>>> { BlockFetcher::fetch_blocks(self.as_ref(), block_ids) } + + fn fetch_block_headers( + &self, + block_ids: Vec, + ) -> BoxFuture)>>> { + BlockFetcher::fetch_block_headers(self.as_ref(), block_ids) + } + + fn fetch_dag_block_children( + &self, + block_ids: Vec, + ) -> BoxFuture>> { + BlockFetcher::fetch_dag_block_children(self.as_ref(), block_ids) + } } impl BlockFetcher for VerifiedRpcClient { @@ -301,7 +330,7 @@ impl BlockFetcher for VerifiedRpcClient { ) -> BoxFuture<'_, Result)>>> { self.get_blocks(block_ids.clone()) .and_then(|blocks| async move { - let results: Result)>> = block_ids + let results = block_ids .iter() .zip(blocks) .map(|(id, block)| { @@ -309,11 +338,29 @@ impl BlockFetcher for VerifiedRpcClient { format_err!("Get block by id: {} failed, remote node return None", id) }) }) - .collect(); + .collect::>>(); results.map_err(fetcher_err_map) }) .boxed() } + + fn fetch_block_headers( + &self, + block_ids: Vec, + ) -> BoxFuture)>>> { + self.get_block_headers_by_hash(block_ids) + .map_err(fetcher_err_map) + .boxed() + } + + fn fetch_dag_block_children( + &self, + block_ids: Vec, + ) -> BoxFuture>> { + self.get_dag_block_children(block_ids) + .map_err(fetcher_err_map) + .boxed() + } } pub trait BlockInfoFetcher: Send + Sync { @@ -372,6 +419,7 @@ impl BlockLocalStore for Arc { Some(block) => { let id = block.id(); let block_info = self.get_block_info(id)?; + Ok(Some(SyncBlockData::new(block, block_info, None))) } None => Ok(None), @@ -380,11 +428,22 @@ impl BlockLocalStore for Arc { } } +#[derive(Clone, Debug)] +pub enum BlockConnectAction { + ConnectNewBlock, + ConnectExecutedBlock, +} + #[derive(Clone, Debug)] pub struct BlockConnectedEvent { pub block: Block, + pub feedback: Option>, + pub action: BlockConnectAction, } +#[derive(Clone, Debug)] +pub struct BlockConnectedFinishEvent; + #[derive(Clone, Debug)] pub struct BlockDiskCheckEvent {} @@ -392,10 +451,15 @@ pub trait BlockConnectedEventHandle: Send + Clone + std::marker::Unpin { fn handle(&mut self, event: BlockConnectedEvent) -> Result<()>; } -impl BlockConnectedEventHandle for ServiceRef -where - S: ActorService + EventHandler, -{ +impl BlockConnectedEventHandle for ServiceRef> { + fn handle(&mut self, event: BlockConnectedEvent) -> Result<()> { + self.notify(event)?; + Ok(()) + } +} + +#[cfg(test)] +impl BlockConnectedEventHandle for ServiceRef> { fn handle(&mut self, event: BlockConnectedEvent) -> Result<()> { self.notify(event)?; Ok(()) @@ -459,6 +523,24 @@ impl BlockConnectedEventHandle for UnboundedSender { } } +#[derive(Debug, Clone)] +pub struct BlockConnectEventHandleMock { + sender: UnboundedSender, +} + +impl BlockConnectEventHandleMock { + pub fn new(sender: UnboundedSender) -> Result { + Ok(Self { sender }) + } +} + +impl BlockConnectedEventHandle for BlockConnectEventHandleMock { + fn handle(&mut self, event: BlockConnectedEvent) -> Result<()> { + self.sender.start_send(event)?; + Ok(()) + } +} + pub struct ExtSyncTaskErrorHandle where F: SyncFetcher + 'static, @@ -509,7 +591,11 @@ mod inner_sync_task; #[cfg(test)] pub(crate) mod mock; #[cfg(test)] -mod tests; +mod test_tools; +#[cfg(test)] +pub mod tests; +#[cfg(test)] +mod tests_dag; use crate::sync_metrics::SyncMetrics; pub use accumulator_sync_task::{AccumulatorCollector, BlockAccumulatorSyncTask}; @@ -530,6 +616,8 @@ pub fn full_sync_task( max_retry_times: u64, sync_metrics: Option, vm_metrics: Option, + dag: BlockDAG, + dag_fork_number: BlockNumber, ) -> Result<( BoxFuture<'static, Result>, TaskHandle, @@ -586,6 +674,7 @@ where let all_fut = async move { let ancestor = fut.await?; + info!("got ancestor for sync: {:?}", ancestor); let mut ancestor_block_info = storage .get_block_info(ancestor.id) .map_err(TaskError::BreakError)? @@ -635,6 +724,8 @@ where time_service.clone(), peer_provider.clone(), ext_error_handle.clone(), + dag_fork_number, + dag.clone(), ); let start_now = Instant::now(); let (block_chain, _) = inner @@ -673,11 +764,20 @@ where .sync_peer_count .set(fetcher.peer_selector().len() as u64); } - if target.target_id.number() <= latest_block_chain.status().head.number() { break; } + // chain read the fork number from remote peers, break and start again + if latest_block_chain.dag_fork_height().map_err(TaskError::BreakError)? < BlockNumber::MAX && + dag_fork_number != BlockNumber::MAX { + break; + } let chain_status = latest_block_chain.status(); + if latest_block_chain.is_dag(&latest_block_chain.status().head).map_err(TaskError::BreakError)? { + if chain_status.info().get_total_difficulty() >= target.block_info.get_total_difficulty() { + break; + } + } max_peers = max_better_peers( target_block_number, latest_block_chain.current_header().number(), diff --git a/sync/src/tasks/test_tools.rs b/sync/src/tasks/test_tools.rs new file mode 100644 index 0000000000..faa428ef5e --- /dev/null +++ b/sync/src/tasks/test_tools.rs @@ -0,0 +1,220 @@ +// Copyright (c) The Starcoin Core Contributors +// SPDX-License-Identifier: Apache-2.0 + +#![allow(clippy::integer_arithmetic)] +use crate::block_connector::BlockConnectorService; +use crate::tasks::full_sync_task; +use crate::tasks::mock::SyncNodeMocker; +use anyhow::Result; +use futures::channel::mpsc::unbounded; +use futures_timer::Delay; +use pin_utils::core_reexport::time::Duration; +use starcoin_account_api::AccountInfo; +use starcoin_chain_api::ChainReader; +use starcoin_chain_service::ChainReaderService; +use starcoin_config::{BuiltinNetworkID, ChainNetwork, NodeConfig, RocksdbConfig}; +use starcoin_dag::consensusdb::prelude::FlexiDagStorageConfig; +use starcoin_genesis::Genesis; +use starcoin_logger::prelude::*; +use starcoin_service_registry::{RegistryAsyncService, RegistryService, ServiceRef}; +use starcoin_storage::db_storage::DBStorage; +use starcoin_storage::storage::StorageInstance; +use starcoin_storage::Storage; +// use starcoin_txpool_mock_service::MockTxPoolService; +#[cfg(test)] +use starcoin_txpool_mock_service::MockTxPoolService; +use std::fs; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use stest::actix_export::System; +use test_helper::DummyNetworkService; + +#[cfg(test)] +pub struct SyncTestSystem { + pub target_node: SyncNodeMocker, + pub local_node: SyncNodeMocker, + pub registry: ServiceRef, +} + +#[cfg(test)] +impl SyncTestSystem { + pub async fn initialize_sync_system() -> Result { + let config = Arc::new(NodeConfig::random_for_test()); + + // let (storage, chain_info, _, _) = StarcoinGenesis::init_storage_for_test(config.net()) + // .expect("init storage by genesis fail."); + + let temp_path = PathBuf::from(starcoin_config::temp_dir().as_ref()); + let storage_path = temp_path.join(Path::new("local/storage")); + let dag_path = temp_path.join(Path::new("local/dag")); + fs::create_dir_all(storage_path.clone())?; + fs::create_dir_all(dag_path.clone())?; + let storage = Arc::new( + Storage::new(StorageInstance::new_db_instance( + DBStorage::new(storage_path.as_path(), RocksdbConfig::default(), None).unwrap(), + )) + .unwrap(), + ); + let genesis = Genesis::load_or_build(config.net())?; + // init dag + let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + dag_path.as_path(), + FlexiDagStorageConfig::new(), + ) + .expect("init dag storage fail."); + let dag = starcoin_dag::blockdag::BlockDAG::new(8, dag_storage); // local dag + + let chain_info = + genesis.execute_genesis_block(config.net(), storage.clone(), dag.clone())?; + + let target_node = SyncNodeMocker::new(config.net().clone(), 300, 0)?; + let local_node = SyncNodeMocker::new_with_storage( + config.net().clone(), + storage.clone(), + chain_info.clone(), + AccountInfo::random(), + 300, + 0, + dag.clone(), + )?; + + let (registry_sender, registry_receiver) = async_std::channel::unbounded(); + + info!( + "in test_sync_block_apply_failed_but_connect_success, start tokio runtime for main thread" + ); + + let _handle = timeout_join_handler::spawn(move || { + let system = System::with_tokio_rt(|| { + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .on_thread_stop(|| debug!("main thread stopped")) + .thread_name("main") + .build() + .expect("failed to create tokio runtime for main") + }); + async_std::task::block_on(async { + let registry = RegistryService::launch(); + + registry.put_shared(config.clone()).await.unwrap(); + registry.put_shared(storage.clone()).await.unwrap(); + registry + .put_shared(dag) + .await + .expect("failed to put dag in registry"); + registry.put_shared(MockTxPoolService::new()).await.unwrap(); + + Delay::new(Duration::from_secs(2)).await; + + registry.register::().await.unwrap(); + registry + .register::>() + .await + .unwrap(); + + registry_sender.send(registry).await.unwrap(); + }); + + system.run().unwrap(); + }); + + let registry = registry_receiver.recv().await.unwrap(); + + Ok(SyncTestSystem { + target_node, + local_node, + registry, + }) + } +} + +#[cfg(test)] +pub async fn full_sync_new_node() -> Result<()> { + let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); + let mut node1 = SyncNodeMocker::new(net1, 300, 0)?; + node1.produce_block(10)?; + + let mut arc_node1 = Arc::new(node1); + + let net2 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); + + let node2 = SyncNodeMocker::new(net2.clone(), 300, 0)?; + + let target = arc_node1.sync_target(); + + let current_block_header = node2.chain().current_header(); + + let storage = node2.chain().get_storage(); + let dag = node2.chain().dag(); + let (sender_1, receiver_1) = unbounded(); + let (sender_2, _receiver_2) = unbounded(); + let (sync_task, _task_handle, task_event_counter) = full_sync_task( + current_block_header.id(), + target.clone(), + false, + net2.time_service(), + storage.clone(), + sender_1, + arc_node1.clone(), + sender_2, + DummyNetworkService::default(), + 15, + None, + None, + dag.clone(), + )?; + let join_handle = node2.process_block_connect_event(receiver_1).await; + let branch = sync_task.await?; + let node2 = join_handle.await; + let current_block_header = node2.chain().current_header(); + assert_eq!(branch.current_header().id(), target.target_id.id()); + assert_eq!(target.target_id.id(), current_block_header.id()); + let reports = task_event_counter.get_reports(); + reports + .iter() + .for_each(|report| debug!("reports: {}", report)); + + Arc::get_mut(&mut arc_node1).unwrap().produce_block(20)?; + + let (sender_1, receiver_1) = unbounded(); + let (sender_2, _receiver_2) = unbounded(); + //sync again + let target = arc_node1.sync_target(); + let (sync_task, _task_handle, task_event_counter) = full_sync_task( + current_block_header.id(), + target.clone(), + false, + net2.time_service(), + storage.clone(), + sender_1, + arc_node1.clone(), + sender_2, + DummyNetworkService::default(), + 15, + None, + None, + dag, + )?; + let join_handle = node2.process_block_connect_event(receiver_1).await; + let branch = sync_task.await?; + let node2 = join_handle.await; + let current_block_header = node2.chain().current_header(); + assert_eq!(branch.current_header().id(), target.target_id.id()); + assert_eq!(target.target_id.id(), current_block_header.id()); + + let reports = task_event_counter.get_reports(); + reports + .iter() + .for_each(|report| debug!("reports: {}", report)); + + Ok(()) +} + +// #[cfg(test)] +// pub async fn generate_red_dag_block() -> Result { +// let net = ChainNetwork::new_builtin(BuiltinNetworkID::Test); +// let mut node = SyncNodeMocker::new(net, 300, 0)?; +// node.produce_block(10)?; +// let block = node.produce_block(1)?; +// Ok(block) +// } diff --git a/sync/src/tasks/tests.rs b/sync/src/tasks/tests.rs index 06206f227e..64b3a266c2 100644 --- a/sync/src/tasks/tests.rs +++ b/sync/src/tasks/tests.rs @@ -9,8 +9,8 @@ use crate::tasks::{ BlockCollector, BlockFetcher, BlockLocalStore, BlockSyncTask, FindAncestorTask, SyncFetcher, }; use crate::verified_rpc_client::RpcVerifyError; -use anyhow::Context; use anyhow::{format_err, Result}; +use anyhow::{Context, Ok}; use futures::channel::mpsc::unbounded; use futures::future::BoxFuture; use futures::FutureExt; @@ -25,10 +25,12 @@ use starcoin_chain_api::ChainReader; use starcoin_chain_mock::MockChain; use starcoin_config::{BuiltinNetworkID, ChainNetwork}; use starcoin_crypto::HashValue; +use starcoin_dag::blockdag::BlockDAG; use starcoin_genesis::Genesis; use starcoin_logger::prelude::*; -use starcoin_storage::BlockStore; +use starcoin_storage::{BlockStore, Storage}; use starcoin_sync_api::SyncTarget; +use starcoin_types::block::BlockNumber; use starcoin_types::{ block::{Block, BlockBody, BlockHeaderBuilder, BlockIdAndNumber, BlockInfo}, U256, @@ -40,97 +42,26 @@ use stream_task::{ }; use test_helper::DummyNetworkService; +use super::test_tools::{full_sync_new_node, SyncTestSystem}; +use super::BlockConnectedEvent; + #[stest::test(timeout = 120)] pub async fn test_full_sync_new_node() -> Result<()> { - let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let mut node1 = SyncNodeMocker::new(net1, 1, 50)?; - node1.produce_block(10)?; - - let mut arc_node1 = Arc::new(node1); - - let net2 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - - let node2 = SyncNodeMocker::new(net2.clone(), 1, 50)?; - - let target = arc_node1.sync_target(); - - let current_block_header = node2.chain().current_header(); - - let storage = node2.chain().get_storage(); - let (sender_1, receiver_1) = unbounded(); - let (sender_2, _receiver_2) = unbounded(); - let (sync_task, _task_handle, task_event_counter) = full_sync_task( - current_block_header.id(), - target.clone(), - false, - net2.time_service(), - storage.clone(), - sender_1, - arc_node1.clone(), - sender_2, - DummyNetworkService::default(), - 15, - None, - None, - )?; - let join_handle = node2.process_block_connect_event(receiver_1).await; - let branch = sync_task.await?; - let node2 = join_handle.await; - let current_block_header = node2.chain().current_header(); - assert_eq!(branch.current_header().id(), target.target_id.id()); - assert_eq!(target.target_id.id(), current_block_header.id()); - let reports = task_event_counter.get_reports(); - reports - .iter() - .for_each(|report| debug!("reports: {}", report)); - - Arc::get_mut(&mut arc_node1).unwrap().produce_block(20)?; - - let (sender_1, receiver_1) = unbounded(); - let (sender_2, _receiver_2) = unbounded(); - //sync again - let target = arc_node1.sync_target(); - let (sync_task, _task_handle, task_event_counter) = full_sync_task( - current_block_header.id(), - target.clone(), - false, - net2.time_service(), - storage.clone(), - sender_1, - arc_node1.clone(), - sender_2, - DummyNetworkService::default(), - 15, - None, - None, - )?; - let join_handle = node2.process_block_connect_event(receiver_1).await; - let branch = sync_task.await?; - let node2 = join_handle.await; - let current_block_header = node2.chain().current_header(); - assert_eq!(branch.current_header().id(), target.target_id.id()); - assert_eq!(target.target_id.id(), current_block_header.id()); - - let reports = task_event_counter.get_reports(); - reports - .iter() - .for_each(|report| debug!("reports: {}", report)); - - Ok(()) + full_sync_new_node().await } #[stest::test] pub async fn test_sync_invalid_target() -> Result<()> { let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let mut node1 = SyncNodeMocker::new(net1, 1, 0)?; + let mut node1 = SyncNodeMocker::new(net1, 300, 0)?; node1.produce_block(10)?; let arc_node1 = Arc::new(node1); let net2 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let node2 = SyncNodeMocker::new(net2.clone(), 1, 0)?; - + let node2 = SyncNodeMocker::new(net2.clone(), 300, 0)?; + let dag = node2.chain().dag(); let mut target = arc_node1.sync_target(); target.block_info.total_difficulty = U256::max_value(); @@ -153,6 +84,7 @@ pub async fn test_sync_invalid_target() -> Result<()> { 15, None, None, + dag, )?; let _join_handle = node2.process_block_connect_event(receiver_1).await; let sync_result = sync_task.await; @@ -174,14 +106,16 @@ pub async fn test_sync_invalid_target() -> Result<()> { #[stest::test] pub async fn test_failed_block() -> Result<()> { let net = ChainNetwork::new_builtin(BuiltinNetworkID::Halley); - let (storage, chain_info, _) = Genesis::init_storage_for_test(&net)?; + let (storage, chain_info, _, dag) = Genesis::init_storage_for_test(&net)?; let chain = BlockChain::new( net.time_service(), chain_info.head().id(), storage.clone(), None, + dag, )?; + let fetcher = MockBlockFetcher::new(); let (sender, _) = unbounded(); let chain_status = chain.status(); let target = SyncTarget { @@ -196,6 +130,8 @@ pub async fn test_failed_block() -> Result<()> { sender, DummyNetworkService::default(), true, + storage.clone(), + Arc::new(fetcher), ); let header = BlockHeaderBuilder::random().with_number(1).build(); let body = BlockBody::new(Vec::new(), None); @@ -212,19 +148,19 @@ pub async fn test_failed_block() -> Result<()> { #[stest::test(timeout = 120)] pub async fn test_full_sync_fork() -> Result<()> { let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let mut node1 = SyncNodeMocker::new(net1, 1, 50)?; + let mut node1 = SyncNodeMocker::new(net1, 300, 0)?; node1.produce_block(10)?; let mut arc_node1 = Arc::new(node1); let net2 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let node2 = SyncNodeMocker::new(net2.clone(), 1, 50)?; + let node2 = SyncNodeMocker::new(net2.clone(), 300, 0)?; let target = arc_node1.sync_target(); let current_block_header = node2.chain().current_header(); - + let dag = node2.chain().dag(); let storage = node2.chain().get_storage(); let (sender, receiver) = unbounded(); let (sender_2, _receiver_2) = unbounded(); @@ -241,6 +177,7 @@ pub async fn test_full_sync_fork() -> Result<()> { 15, None, None, + dag.clone(), )?; let join_handle = node2.process_block_connect_event(receiver).await; let branch = sync_task.await?; @@ -274,6 +211,7 @@ pub async fn test_full_sync_fork() -> Result<()> { 15, None, None, + dag, )?; let join_handle = node2.process_block_connect_event(receiver).await; let branch = sync_task.await?; @@ -292,7 +230,7 @@ pub async fn test_full_sync_fork() -> Result<()> { #[stest::test(timeout = 120)] pub async fn test_full_sync_fork_from_genesis() -> Result<()> { let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let mut node1 = SyncNodeMocker::new(net1, 1, 50)?; + let mut node1 = SyncNodeMocker::new(net1, 300, 0)?; node1.produce_block(10)?; let arc_node1 = Arc::new(node1); @@ -300,13 +238,13 @@ pub async fn test_full_sync_fork_from_genesis() -> Result<()> { let net2 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); //fork from genesis - let mut node2 = SyncNodeMocker::new(net2.clone(), 1, 50)?; + let mut node2 = SyncNodeMocker::new(net2.clone(), 300, 0)?; node2.produce_block(5)?; let target = arc_node1.sync_target(); let current_block_header = node2.chain().current_header(); - + let dag = node2.chain().dag(); let storage = node2.chain().get_storage(); let (sender, receiver) = unbounded(); let (sender_2, _receiver_2) = unbounded(); @@ -323,6 +261,7 @@ pub async fn test_full_sync_fork_from_genesis() -> Result<()> { 15, None, None, + dag, )?; let join_handle = node2.process_block_connect_event(receiver).await; let branch = sync_task.await?; @@ -344,16 +283,15 @@ pub async fn test_full_sync_fork_from_genesis() -> Result<()> { #[stest::test(timeout = 120)] pub async fn test_full_sync_continue() -> Result<()> { - let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let mut node1 = SyncNodeMocker::new(net1, 10, 50)?; + // let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); + let test_system = SyncTestSystem::initialize_sync_system().await?; + let mut node1 = test_system.target_node; // SyncNodeMocker::new(net1, 10, 50)?; + let dag = node1.chain().dag(); node1.produce_block(10)?; - let arc_node1 = Arc::new(node1); - let net2 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - //fork from genesis - let mut node2 = SyncNodeMocker::new(net2.clone(), 1, 50)?; + let mut node2 = test_system.local_node; // SyncNodeMocker::new(net2.clone(), 1, 50)?; node2.produce_block(7)?; // first set target to 5. @@ -377,6 +315,7 @@ pub async fn test_full_sync_continue() -> Result<()> { 15, None, None, + dag.clone(), )?; let join_handle = node2.process_block_connect_event(receiver).await; let branch = sync_task.await?; @@ -412,6 +351,7 @@ pub async fn test_full_sync_continue() -> Result<()> { 15, None, None, + dag, )?; let join_handle = node2.process_block_connect_event(receiver).await; @@ -435,7 +375,7 @@ pub async fn test_full_sync_continue() -> Result<()> { #[stest::test] pub async fn test_full_sync_cancel() -> Result<()> { let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let mut node1 = SyncNodeMocker::new(net1, 1, 50)?; + let mut node1 = SyncNodeMocker::new(net1, 300, 0)?; node1.produce_block(10)?; let arc_node1 = Arc::new(node1); @@ -447,7 +387,7 @@ pub async fn test_full_sync_cancel() -> Result<()> { let target = arc_node1.sync_target(); let current_block_header = node2.chain().current_header(); - + let dag = node2.chain().dag(); let storage = node2.chain().get_storage(); let (sender, receiver) = unbounded(); let (sender_2, _receiver_2) = unbounded(); @@ -464,6 +404,7 @@ pub async fn test_full_sync_cancel() -> Result<()> { 15, None, None, + dag, )?; let join_handle = node2.process_block_connect_event(receiver).await; let sync_join_handle = tokio::task::spawn(sync_task); @@ -514,13 +455,13 @@ async fn test_accumulator_sync_by_stream_task() -> Result<()> { let task_state = BlockAccumulatorSyncTask::new(info0.num_leaves, info1.clone(), fetcher, 7).unwrap(); let ancestor = BlockIdAndNumber::new(HashValue::random(), info0.num_leaves - 1); - let collector = AccumulatorCollector::new(Arc::new(store2), ancestor, info0, info1.clone()); + let collector = AccumulatorCollector::new(Arc::new(store2), ancestor, info0, info1.clone(), BlockNumber::MAX); let event_handle = Arc::new(TaskEventCounterHandle::new()); let sync_task = TaskGenerator::new( task_state, 5, 3, - 1, + 300, collector, event_handle.clone(), Arc::new(DefaultCustomErrorHandle), @@ -556,7 +497,7 @@ pub async fn test_find_ancestor_same_number() -> Result<()> { task_state, 5, 3, - 1, + 300, collector, event_handle.clone(), Arc::new(DefaultCustomErrorHandle), @@ -596,7 +537,7 @@ pub async fn test_find_ancestor_block_number_behind() -> Result<()> { task_state, 5, 3, - 1, + 300, collector, event_handle.clone(), Arc::new(DefaultCustomErrorHandle), @@ -645,7 +586,7 @@ pub async fn test_find_ancestor_chain_fork() -> Result<()> { task_state, 5, 3, - 1, + 300, collector, event_handle.clone(), Arc::new(DefaultCustomErrorHandle), @@ -686,7 +627,29 @@ impl BlockFetcher for MockBlockFetcher { .iter() .map(|block_id| { if let Some(block) = blocks.get(block_id).cloned() { - Ok((block, None)) + Ok((block, Some(PeerId::random()))) + } else { + Err(format_err!("Can not find block by id: {:?}", block_id)) + } + }) + .collect(); + async { + Delay::new(Duration::from_millis(100)).await; + result + } + .boxed() + } + + fn fetch_block_headers( + &self, + block_ids: Vec, + ) -> BoxFuture)>>> { + let blocks = self.blocks.lock().unwrap(); + let result = block_ids + .iter() + .map(|block_id| { + if let Some(block) = blocks.get(block_id).cloned() { + Ok((block.id(), Some(block.header().clone()))) } else { Err(format_err!("Can not find block by id: {:?}", block_id)) } @@ -698,6 +661,33 @@ impl BlockFetcher for MockBlockFetcher { } .boxed() } + + fn fetch_dag_block_children( + &self, + block_ids: Vec, + ) -> BoxFuture>> { + let blocks = self.blocks.lock().unwrap(); + let mut result = vec![]; + block_ids.iter().for_each(|block_id| { + if let Some(block) = blocks.get(block_id).cloned() { + while let Some(hashes) = block.header().parents_hash() { + for hash in hashes { + if result.contains(&hash) { + continue; + } + result.push(hash); + } + } + } else { + info!("Can not find block by id: {:?}", block_id) + } + }); + async { + Delay::new(Duration::from_millis(100)).await; + Ok(result) + } + .boxed() + } } fn build_block_fetcher(total_blocks: u64) -> (MockBlockFetcher, MerkleAccumulator) { @@ -735,7 +725,7 @@ impl MockLocalBlockStore { ); self.store.lock().unwrap().insert( block.id(), - SyncBlockData::new(block.clone(), Some(block_info), None), + SyncBlockData::new(block.clone(), Some(block_info), Some(PeerId::random())), ); } } @@ -773,7 +763,7 @@ async fn block_sync_task_test(total_blocks: u64, ancestor_number: u64) -> Result block_sync_state, 5, 3, - 1, + 300, vec![], event_handle.clone(), Arc::new(DefaultCustomErrorHandle), @@ -841,7 +831,7 @@ async fn test_block_sync_with_local() -> Result<()> { block_sync_state, 5, 3, - 1, + 300, vec![], event_handle.clone(), Arc::new(DefaultCustomErrorHandle), @@ -890,7 +880,7 @@ async fn test_net_rpc_err() -> Result<()> { let target = arc_node1.sync_target(); let current_block_header = node2.chain().current_header(); - + let dag = node2.chain().dag(); let storage = node2.chain().get_storage(); let (sender, receiver) = unbounded(); let (sender_2, _receiver_2) = unbounded(); @@ -907,6 +897,7 @@ async fn test_net_rpc_err() -> Result<()> { 15, None, None, + dag, )?; let _join_handle = node2.process_block_connect_event(receiver).await; let sync_join_handle = tokio::task::spawn(sync_task); @@ -935,7 +926,7 @@ async fn test_err_context() -> Result<()> { async fn test_sync_target() { let mut peer_infos = vec![]; let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let mut node1 = SyncNodeMocker::new(net1, 1, 0).unwrap(); + let mut node1 = SyncNodeMocker::new(net1, 300, 0).unwrap(); node1.produce_block(10).unwrap(); let low_chain_info = node1.peer_info().chain_info().clone(); peer_infos.push(PeerInfo::new( @@ -956,11 +947,12 @@ async fn test_sync_target() { )); let net2 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); - let (_, genesis_chain_info, _) = + let (_, genesis_chain_info, _, _) = Genesis::init_storage_for_test(&net2).expect("init storage by genesis fail."); let mock_chain = MockChain::new_with_chain( net2, node1.chain().fork(high_chain_info.head().id()).unwrap(), + node1.get_storage(), ) .unwrap(); @@ -968,7 +960,7 @@ async fn test_sync_target() { let node2 = Arc::new(SyncNodeMocker::new_with_chain_selector( PeerId::random(), mock_chain, - 1, + 300, 0, peer_selector, )); @@ -984,3 +976,232 @@ async fn test_sync_target() { assert_eq!(target.target_id.number(), low_chain_info.head().number()); assert_eq!(target.target_id.id(), low_chain_info.head().id()); } + +fn sync_block_in_async_connection( + mut target_node: Arc, + local_node: Arc, + storage: Arc, + block_count: u64, + dag: BlockDAG, +) -> Result> { + Arc::get_mut(&mut target_node) + .unwrap() + .produce_block(block_count)?; + let target = target_node.sync_target(); + let target_id = target.target_id.id(); + + let (sender, mut receiver) = futures::channel::mpsc::unbounded::(); + let thread_local_node = local_node.clone(); + + let inner_dag = dag.clone(); + let process_block = move || { + let mut chain = MockChain::new_with_storage( + thread_local_node.chain_mocker.net().clone(), + storage.clone(), + thread_local_node.chain_mocker.head().status().head.id(), + thread_local_node.chain_mocker.miner().clone(), + inner_dag, + ) + .unwrap(); + loop { + if let std::result::Result::Ok(result) = receiver.try_next() { + match result { + Some(event) => { + chain + .select_head(event.block) + .expect("select head must be successful"); + if event.feedback.is_some() { + event + .feedback + .unwrap() + .unbounded_send(super::BlockConnectedFinishEvent) + .unwrap(); + assert_eq!(target_id, chain.head().status().head.id()); + break; + } + } + None => break, + } + } + } + }; + let handle = std::thread::spawn(process_block); + + let current_block_header = local_node.chain().current_header(); + let storage = local_node.chain().get_storage(); + + let local_net = local_node.chain_mocker.net(); + let (local_ancestor_sender, _local_ancestor_receiver) = unbounded(); + + let (sync_task, _task_handle, task_event_counter) = full_sync_task( + current_block_header.id(), + target.clone(), + false, + local_net.time_service(), + storage.clone(), + sender, + target_node.clone(), + local_ancestor_sender, + DummyNetworkService::default(), + 15, + None, + None, + dag, + )?; + let branch = async_std::task::block_on(sync_task)?; + assert_eq!(branch.current_header().id(), target.target_id.id()); + + handle.join().unwrap(); + + let reports = task_event_counter.get_reports(); + reports + .iter() + .for_each(|report| debug!("reports: {}", report)); + + Ok(target_node) +} + +#[stest::test] +async fn test_sync_block_in_async_connection() -> Result<()> { + let _net = ChainNetwork::new_builtin(BuiltinNetworkID::Test); + let test_system = SyncTestSystem::initialize_sync_system().await?; + let mut target_node = Arc::new(test_system.target_node); + + // let (storage, chain_info, _, _) = + // Genesis::init_storage_for_test(&net).expect("init storage by genesis fail."); + + let local_node = Arc::new(test_system.local_node); + + // let dag_storage = starcoin_dag::consensusdb::prelude::FlexiDagStorage::create_from_path( + // Path::new("."), + // FlexiDagStorageConfig::new(), + // )?; + // let dag = starcoin_dag::blockdag::BlockDAG::new(8, dag_storage); + + target_node = sync_block_in_async_connection( + target_node, + local_node.clone(), + local_node.chain_mocker.get_storage(), + 10, + local_node.chain().dag(), + )?; + _ = sync_block_in_async_connection( + target_node, + local_node.clone(), + local_node.chain_mocker.get_storage(), + 20, + local_node.chain().dag(), + )?; + + Ok(()) +} + +// #[cfg(test)] +// async fn sync_dag_chain( +// mut target_node: Arc, +// local_node: Arc, +// registry: &ServiceRef, +// ) -> Result<()> { +// Arc::get_mut(&mut target_node) +// .unwrap() +// .produce_block_and_create_dag(21)?; +// Ok(()) + +// let flexidag_service = registry.service_ref::().await?; +// let local_dag_accumulator_info = flexidag_service.send(GetDagAccumulatorInfo).await??.ok_or(anyhow!("dag accumulator is none"))?; + +// let result = sync_dag_full_task( +// local_dag_accumulator_info, +// target_accumulator_info, +// target_node.clone(), +// accumulator_store, +// accumulator_snapshot, +// local_store, +// local_net.time_service(), +// None, +// connector_service, +// network, +// false, +// dag, +// block_chain_service, +// flexidag_service, +// local_net.id().clone(), +// )?; + +// Ok(result) +// } + +// #[cfg(test)] +// async fn sync_dag_block_from_single_chain( +// mut target_node: Arc, +// local_node: Arc, +// registry: &ServiceRef, +// block_count: u64, +// ) -> Result> { +// use starcoin_consensus::BlockDAG; + +// Arc::get_mut(&mut target_node) +// .unwrap() +// .produce_block(block_count)?; +// loop { +// let target = target_node.sync_target(); + +// let storage = local_node.chain().get_storage(); +// let startup_info = storage +// .get_startup_info()? +// .ok_or_else(|| format_err!("Startup info should exist."))?; +// let current_block_id = startup_info.main; + +// let local_net = local_node.chain_mocker.net(); +// let (local_ancestor_sender, _local_ancestor_receiver) = unbounded(); + +// let block_chain_service = async_std::task::block_on( +// registry.service_ref::>(), +// )?; + +// let (sync_task, _task_handle, task_event_counter) = if local_node.chain().head_block().block.header().number() +// > BlockDAG::dag_fork_height_with_net(local_net.id().clone()) { + +// } else { +// full_sync_task( +// current_block_id, +// target.clone(), +// false, +// local_net.time_service(), +// storage.clone(), +// block_chain_service, +// target_node.clone(), +// local_ancestor_sender, +// DummyNetworkService::default(), +// 15, +// ChainNetworkID::TEST, +// None, +// None, +// )? +// }; + +// let branch = sync_task.await?; +// info!("checking branch in sync service is the same as target's branch"); +// assert_eq!(branch.current_header().id(), target.target_id.id()); + +// let block_connector_service = registry +// .service_ref::>() +// .await? +// .clone(); +// let result = block_connector_service +// .send(CheckBlockConnectorHashValue { +// head_hash: target.target_id.id(), +// number: target.target_id.number(), +// }) +// .await?; +// if result.is_ok() { +// break; +// } +// let reports = task_event_counter.get_reports(); +// reports +// .iter() +// .for_each(|report| debug!("reports: {}", report)); +// } + +// Ok(target_node) +// } diff --git a/sync/src/tasks/tests_dag.rs b/sync/src/tasks/tests_dag.rs new file mode 100644 index 0000000000..88c26a0159 --- /dev/null +++ b/sync/src/tasks/tests_dag.rs @@ -0,0 +1,193 @@ +use crate::{ + block_connector::{BlockConnectorService, CheckBlockConnectorHashValue, CreateBlockRequest}, + tasks::full_sync_task, +}; +use std::sync::Arc; + +use super::mock::SyncNodeMocker; +use super::test_tools::full_sync_new_node; +use anyhow::{format_err, Result}; +use futures::channel::mpsc::unbounded; +use starcoin_account_api::AccountInfo; +use starcoin_chain_api::{message::ChainResponse, ChainReader}; +use starcoin_chain_service::ChainReaderService; +use starcoin_logger::prelude::*; +use starcoin_service_registry::{RegistryAsyncService, RegistryService, ServiceRef}; +use starcoin_txpool_mock_service::MockTxPoolService; +use starcoin_types::block::BlockHeader; +use test_helper::DummyNetworkService; + +#[stest::test(timeout = 120)] +pub async fn test_full_sync_new_node_dag() { + starcoin_types::block::set_test_flexidag_fork_height(10); + full_sync_new_node() + .await + .expect("dag full sync should success"); + starcoin_types::block::reset_test_custom_fork_height(); +} + +async fn sync_block_process( + target_node: Arc, + local_node: Arc, + registry: &ServiceRef, +) -> Result<(Arc, Arc)> { + loop { + let target = target_node.sync_target(); + + let storage = local_node.chain().get_storage(); + let startup_info = storage + .get_startup_info()? + .ok_or_else(|| format_err!("Startup info should exist."))?; + let current_block_id = startup_info.main; + + let local_net = local_node.chain_mocker.net(); + let (local_ancestor_sender, _local_ancestor_receiver) = unbounded(); + + let block_chain_service = async_std::task::block_on( + registry.service_ref::>(), + )?; + + let (sync_task, _task_handle, task_event_counter) = full_sync_task( + current_block_id, + target.clone(), + false, + local_net.time_service(), + storage.clone(), + block_chain_service, + target_node.clone(), + local_ancestor_sender, + DummyNetworkService::default(), + 15, + None, + None, + local_node.chain().dag().clone(), + )?; + let branch = sync_task.await?; + info!("checking branch in sync service is the same as target's branch"); + assert_eq!(branch.current_header().id(), target.target_id.id()); + + let block_connector_service = registry + .service_ref::>() + .await? + .clone(); + let result = block_connector_service + .send(CheckBlockConnectorHashValue { + head_hash: target.target_id.id(), + number: target.target_id.number(), + }) + .await?; + if result.is_ok() { + break; + } + let reports = task_event_counter.get_reports(); + reports + .iter() + .for_each(|report| debug!("reports: {}", report)); + } + + Ok((local_node, target_node)) +} + +async fn sync_block_in_block_connection_service_mock( + mut target_node: Arc, + local_node: Arc, + registry: &ServiceRef, + block_count: u64, +) -> Result<(Arc, Arc)> { + Arc::get_mut(&mut target_node) + .unwrap() + .produce_block(block_count)?; + sync_block_process(target_node, local_node, registry).await +} + +#[stest::test(timeout = 600)] +async fn test_sync_single_chain_to_dag_chain() -> Result<()> { + starcoin_types::block::set_test_flexidag_fork_height(10); + let test_system = super::test_tools::SyncTestSystem::initialize_sync_system().await?; + let (_local_node, _target_node) = sync_block_in_block_connection_service_mock( + Arc::new(test_system.target_node), + Arc::new(test_system.local_node), + &test_system.registry, + 40, + ) + .await?; + starcoin_types::block::reset_test_custom_fork_height(); + Ok(()) +} + +fn create_red_blocks(header: BlockHeader) -> Result { + +} + +#[stest::test(timeout = 600)] +async fn test_sync_red_blocks_dag() -> Result<()> { + starcoin_types::block::set_test_flexidag_fork_height(10); + let test_system = super::test_tools::SyncTestSystem::initialize_sync_system() + .await + .expect("failed to init system"); + let mut target_node = Arc::new(test_system.target_node); + let local_node = Arc::new(test_system.local_node); + Arc::get_mut(&mut target_node) + .unwrap() + .produce_block(10) + .expect("failed to produce block"); + let dag_genesis_header = target_node.chain().status().head; + assert!( + dag_genesis_header.number() == 10, + "dag genesis header number should be 10, but {}", + dag_genesis_header.number() + ); + + let (local_node, mut target_node) = + sync_block_process(target_node, local_node, &test_system.registry).await?; + + // the blocks following the 10th block will be blue dag blocks + let block_connect_service = test_system + .registry + .service_ref::>() + .await?; + let miner_info = AccountInfo::random(); + block_connect_service + .send(CreateBlockRequest { + count: 3, + author: *miner_info.address(), + parent_hash: None, + user_txns: vec![], + uncles: vec![], + block_gas_limit: None, + tips: None, + }) + .await??; + + let chain_reader_service = test_system + .registry + .service_ref::() + .await?; + match chain_reader_service + .send(starcoin_chain_api::message::ChainRequest::GetHeadChainStatus()) + .await?? + { + ChainResponse::ChainStatus(chain_status) => { + debug!( + "local_node chain hash: {:?}, number: {:?}", + chain_status.head.id(), + chain_status.head.number() + ); + } + _ => { + panic!("failed to get chain status"); + } + } + + Arc::get_mut(&mut target_node) + .unwrap() + .produce_block(10) + .expect("failed to produce block"); + + sync_block_process(target_node, local_node, &test_system.registry).await?; + // // genertate the red blocks + // Arc::get_mut(&mut target_node).unwrap().produce_block_by_header(dag_genesis_header, 5).expect("failed to produce block"); + + starcoin_types::block::reset_test_custom_fork_height(); + Ok(()) +} diff --git a/sync/src/verified_rpc_client.rs b/sync/src/verified_rpc_client.rs index fc4bc6f8f5..78470600f5 100644 --- a/sync/src/verified_rpc_client.rs +++ b/sync/src/verified_rpc_client.rs @@ -6,6 +6,7 @@ use network_api::peer_score::{InverseScore, Score}; use network_api::PeerId; use network_api::PeerInfo; use network_api::PeerSelector; +use network_api::PeerStrategy; use starcoin_accumulator::node::AccumulatorStoreType; use starcoin_accumulator::AccumulatorNode; use starcoin_crypto::hash::HashValue; @@ -15,7 +16,7 @@ use starcoin_network_rpc_api::{ GetBlockIds, GetTxnsWithHash, RawRpcClient, }; use starcoin_state_tree::StateNode; -use starcoin_types::block::Block; +use starcoin_types::block::{Block, LegacyBlock}; use starcoin_types::transaction::{SignedUserTransaction, Transaction}; use starcoin_types::{ block::{BlockHeader, BlockInfo, BlockNumber}, @@ -99,6 +100,7 @@ static G_BLOCK_BODY_VERIFIER: fn(&HashValue, &BlockBody) -> bool = static G_BLOCK_INFO_VERIFIER: fn(&HashValue, &BlockInfo) -> bool = |block_id, block_info| -> bool { *block_id == block_info.block_id }; +static G_RPC_RETRY_COUNT: i32 = 20; /// Enhancement RpcClient, for verify rpc response by request and auto select peer. #[derive(Clone)] pub struct VerifiedRpcClient { @@ -123,6 +125,10 @@ impl VerifiedRpcClient { } } + pub fn switch_strategy(&mut self, strategy: PeerStrategy) { + self.peer_selector.switch_strategy(strategy) + } + pub fn selector(&self) -> &PeerSelector { &self.peer_selector } @@ -145,6 +151,45 @@ impl VerifiedRpcClient { .ok_or_else(|| format_err!("No peers for send request.")) } + async fn get_txns_with_hash_from_pool_inner( + &self, + peer_id: PeerId, + req: GetTxnsWithHash, + ) -> Result>> { + let mut count = 0; + while count < G_RPC_RETRY_COUNT { + match self + .client + .get_txns_with_hash_from_pool(peer_id.clone(), req.clone()) + .await + { + Ok(result) => return Ok(result), + Err(e) => { + count = count.saturating_add(1); + if count == G_RPC_RETRY_COUNT { + return Err(RpcVerifyError::new( + peer_id.clone(), + format!( + "failed to get txns with hash from pool from peer : {:?}. error: {:?}", + peer_id, e + ), + ) + .into()); + } + continue; + } + } + } + Err(RpcVerifyError::new( + peer_id.clone(), + format!( + "failed to get txns with hash from pool from peer : {:?}.", + peer_id, + ), + ) + .into()) + } + pub async fn get_txns_with_hash_from_pool( &self, peer_id: Option, @@ -156,8 +201,7 @@ impl VerifiedRpcClient { self.select_a_peer()? }; let data = self - .client - .get_txns_with_hash_from_pool(peer_id.clone(), req.clone()) + .get_txns_with_hash_from_pool_inner(peer_id.clone(), req.clone()) .await?; if data.len() == req.len() { let mut none_txn_vec = Vec::new(); @@ -195,13 +239,45 @@ impl VerifiedRpcClient { } } + async fn get_txns_inner( + &self, + peer_id: PeerId, + req: GetTxnsWithHash, + ) -> Result>> { + let mut count = 0; + while count < G_RPC_RETRY_COUNT { + match self.client.get_txns(peer_id.clone(), req.clone()).await { + Ok(result) => return Ok(result), + Err(e) => { + count = count.saturating_add(1); + if count == G_RPC_RETRY_COUNT { + return Err(RpcVerifyError::new( + peer_id.clone(), + format!( + "failed to get txns from peer : {:?}. error: {:?}", + peer_id, e + ), + ) + .into()); + } + continue; + } + } + } + Err(RpcVerifyError::new( + peer_id.clone(), + format!("failed to get txns from peer : {:?}.", peer_id,), + ) + .into()) + } + pub async fn get_txns( &self, peer_id: Option, req: GetTxnsWithHash, ) -> Result<(Vec, Vec)> { let peer_id = peer_id.unwrap_or(self.select_a_peer()?); - let data = self.client.get_txns(peer_id.clone(), req.clone()).await?; + let data = self.get_txns_inner(peer_id.clone(), req.clone()).await?; if data.len() == req.len() { let mut none_txn_vec = Vec::new(); let mut verified_txns: Vec = Vec::new(); @@ -243,10 +319,31 @@ impl VerifiedRpcClient { block_id: HashValue, ) -> Result<(PeerId, Option>)> { let peer_id = self.select_a_peer()?; - Ok(( + let mut count = 0; + while count < G_RPC_RETRY_COUNT { + match self.client.get_txn_infos(peer_id.clone(), block_id).await { + Ok(result) => return Ok((peer_id, result)), + Err(e) => { + count = count.saturating_add(1); + if count == G_RPC_RETRY_COUNT { + return Err(RpcVerifyError::new( + peer_id.clone(), + format!( + "failed to get txn infos from peer : {:?}. error: {:?}", + peer_id, e + ), + ) + .into()); + } + continue; + } + } + } + Err(RpcVerifyError::new( peer_id.clone(), - self.client.get_txn_infos(peer_id, block_id).await?, - )) + format!("failed to get txn infos from peer : {:?}.", peer_id,), + ) + .into()) } pub async fn get_headers_by_number( @@ -254,12 +351,37 @@ impl VerifiedRpcClient { req: GetBlockHeadersByNumber, ) -> Result>> { let peer_id = self.select_a_peer()?; - let resp: Vec> = self - .client - .get_headers_by_number(peer_id.clone(), req.clone()) - .await?; - let resp = G_BLOCK_NUMBER_VERIFIER.verify(peer_id, req, resp)?; - Ok(resp) + let mut count = 0; + while count < G_RPC_RETRY_COUNT { + match self + .client + .get_headers_by_number(peer_id.clone(), req.clone()) + .await + { + Ok(result) => { + return Ok(G_BLOCK_NUMBER_VERIFIER.verify(peer_id, req, result)?); + } + Err(e) => { + count = count.saturating_add(1); + if count == G_RPC_RETRY_COUNT { + return Err(RpcVerifyError::new( + peer_id.clone(), + format!( + "failed to get block headers from peer : {:?}., error: {:?}", + peer_id, e + ), + ) + .into()); + } + continue; + } + } + } + Err(RpcVerifyError::new( + peer_id.clone(), + format!("failed to get block headers from peer : {:?}.", peer_id,), + ) + .into()) } pub async fn get_headers_by_hash( @@ -267,12 +389,37 @@ impl VerifiedRpcClient { req: Vec, ) -> Result>> { let peer_id = self.select_a_peer()?; - let resp: Vec> = self - .client - .get_headers_by_hash(peer_id.clone(), req.clone()) - .await?; - let resp = G_BLOCK_ID_VERIFIER.verify(peer_id, req, resp)?; - Ok(resp) + let mut count = 0; + while count < G_RPC_RETRY_COUNT { + match self + .client + .get_headers_by_hash(peer_id.clone(), req.clone()) + .await + { + Ok(result) => { + return Ok(G_BLOCK_ID_VERIFIER.verify(peer_id, req, result)?); + } + Err(e) => { + count = count.saturating_add(1); + if count == G_RPC_RETRY_COUNT { + return Err(RpcVerifyError::new( + peer_id.clone(), + format!( + "failed to get block headers from peer : {:?}., error: {:?}", + peer_id, e + ), + ) + .into()); + } + continue; + } + } + } + Err(RpcVerifyError::new( + peer_id.clone(), + format!("failed to get block headers from peer : {:?}.", peer_id,), + ) + .into()) } pub async fn get_bodies_by_hash( @@ -281,12 +428,40 @@ impl VerifiedRpcClient { ) -> Result<(Vec>, PeerId)> { let peer_id = self.select_a_peer()?; debug!("rpc select peer {}", &peer_id); - let resp: Vec> = self - .client - .get_bodies_by_hash(peer_id.clone(), req.clone()) - .await?; - let resp = G_BLOCK_BODY_VERIFIER.verify(peer_id.clone(), req, resp)?; - Ok((resp, peer_id)) + let mut count = 0; + while count < G_RPC_RETRY_COUNT { + match self + .client + .get_bodies_by_hash(peer_id.clone(), req.clone()) + .await + { + Ok(result) => { + return Ok(( + G_BLOCK_BODY_VERIFIER.verify(peer_id.clone(), req, result)?, + peer_id, + )); + } + Err(e) => { + count = count.saturating_add(1); + if count == G_RPC_RETRY_COUNT { + return Err(RpcVerifyError::new( + peer_id.clone(), + format!( + "failed to get block bodies from peer : {:?}. error: {:?}", + peer_id, e + ), + ) + .into()); + } + continue; + } + } + } + Err(RpcVerifyError::new( + peer_id.clone(), + format!("failed to get block bodies from peer : {:?}.", peer_id,), + ) + .into()) } pub async fn get_block_infos(&self, hashes: Vec) -> Result>> { @@ -302,12 +477,38 @@ impl VerifiedRpcClient { None => self.select_a_peer()?, Some(p) => p, }; - let resp = self - .client - .get_block_infos(peer_id.clone(), req.clone()) - .await?; - let resp = G_BLOCK_INFO_VERIFIER.verify(peer_id, req, resp)?; - Ok(resp) + + let mut count = 0; + while count < G_RPC_RETRY_COUNT { + match self + .client + .get_block_infos(peer_id.clone(), req.clone()) + .await + { + Ok(result) => { + return Ok(G_BLOCK_INFO_VERIFIER.verify(peer_id, req, result)?); + } + Err(e) => { + count = count.saturating_add(1); + if count == G_RPC_RETRY_COUNT { + return Err(RpcVerifyError::new( + peer_id.clone(), + format!( + "failed to get block infos from peer : {:?}. error: {:?}", + peer_id, e + ), + ) + .into()); + } + continue; + } + } + } + Err(RpcVerifyError::new( + peer_id.clone(), + format!("failed to get block infos from peer : {:?}.", peer_id,), + ) + .into()) } pub async fn get_state_node_by_node_hash( @@ -315,12 +516,74 @@ impl VerifiedRpcClient { node_key: HashValue, ) -> Result<(PeerId, Option)> { let peer_id = self.select_a_peer()?; - Ok(( + let mut count = 0; + while count < G_RPC_RETRY_COUNT { + match self + .client + .get_state_node_by_node_hash(peer_id.clone(), node_key) + .await + { + Ok(result) => return Ok((peer_id, result)), + Err(e) => { + count = count.saturating_add(1); + if count == G_RPC_RETRY_COUNT { + return Err(RpcVerifyError::new( + peer_id.clone(), + format!( + "failed to get state node by node hash from peer : {:?}. error: {:?}", + peer_id, e + ), + ) + .into()); + } + continue; + } + } + } + Err(RpcVerifyError::new( peer_id.clone(), - self.client - .get_state_node_by_node_hash(peer_id, node_key) - .await?, - )) + format!( + "failed to get state node by node hash from peer : {:?}", + peer_id, + ), + ) + .into()) + } + + async fn get_accumulator_node_by_node_hash_inner( + &self, + peer_id: PeerId, + req: GetAccumulatorNodeByNodeHash, + ) -> Result> { + let mut count = 0; + while count < G_RPC_RETRY_COUNT { + match self + .client + .get_accumulator_node_by_node_hash(peer_id.clone(), req.clone()) + .await + { + Ok(result) => return Ok(result), + Err(e) => { + count = count.saturating_add(1); + if count == G_RPC_RETRY_COUNT { + return Err(RpcVerifyError::new( + peer_id.clone(), + format!("failed to get accumulator node by node hash inner from peer : {:?}. error: {:?}", peer_id, e), + ) + .into()); + } + continue; + } + } + } + Err(RpcVerifyError::new( + peer_id.clone(), + format!( + "failed to get accumulator node by node hash inner from peer : {:?}.", + peer_id + ), + ) + .into()) } pub async fn get_accumulator_node_by_node_hash( @@ -330,8 +593,7 @@ impl VerifiedRpcClient { ) -> Result<(PeerId, AccumulatorNode)> { let peer_id = self.select_a_peer()?; if let Some(accumulator_node) = self - .client - .get_accumulator_node_by_node_hash( + .get_accumulator_node_by_node_hash_inner( peer_id.clone(), GetAccumulatorNodeByNodeHash { node_hash: node_key, @@ -374,7 +636,139 @@ impl VerifiedRpcClient { reverse, max_size, }; - self.client.get_block_ids(peer_id, request).await + let mut count = 0; + while count < G_RPC_RETRY_COUNT { + match self + .client + .get_block_ids(peer_id.clone(), request.clone()) + .await + { + Ok(result) => return Ok(result), + Err(e) => { + count = count.saturating_add(1); + if count == G_RPC_RETRY_COUNT { + return Err(RpcVerifyError::new( + peer_id.clone(), + format!( + "failed to get block ids from peer : {:?}. error: {:?}", + peer_id, e + ), + ) + .into()); + } + continue; + } + } + } + Err(RpcVerifyError::new( + peer_id.clone(), + format!("failed to get block ids from peer : {:?}.", peer_id), + ) + .into()) + } + + pub async fn get_block_headers_by_hash( + &self, + ids: Vec, + ) -> Result)>> { + let mut count = 0; + let peer_id = self.select_a_peer()?; + while count < G_RPC_RETRY_COUNT { + match self + .client + .get_headers_by_hash(peer_id.clone(), ids.clone()) + .await + { + Ok(result) => return Ok(ids.into_iter().zip(result.into_iter()).collect()), + Err(e) => { + count = count.saturating_add(1); + if count == G_RPC_RETRY_COUNT { + return Err(RpcVerifyError::new( + peer_id.clone(), + format!( + "failed to get block headers from peer : {:?}., error: {:?}", + peer_id, e + ), + ) + .into()); + } + continue; + } + } + } + Err(RpcVerifyError::new( + peer_id.clone(), + format!("failed to get block headers from peer : {:?}.", peer_id), + ) + .into()) + } + + async fn get_blocks_inner( + &self, + peer_id: PeerId, + ids: Vec, + ) -> Result>> { + let mut count = 0; + while count < G_RPC_RETRY_COUNT { + match self.client.get_blocks(peer_id.clone(), ids.clone()).await { + Ok(result) => return Ok(result), + Err(e) => { + count = count.saturating_add(1); + if count == G_RPC_RETRY_COUNT { + return Err(RpcVerifyError::new( + peer_id.clone(), + format!( + "failed to get legacy blocks from peer : {:?}. error: {:?}", + peer_id, e + ), + ) + .into()); + } + continue; + } + } + } + Err(RpcVerifyError::new( + peer_id.clone(), + format!("failed to get legacy blocks from peer : {:?}.", peer_id), + ) + .into()) + } + + async fn get_blocks_v1_inner( + &self, + peer_id: PeerId, + ids: Vec, + ) -> Result>> { + let mut count = 0; + while count < G_RPC_RETRY_COUNT { + match self + .client + .get_blocks_v1(peer_id.clone(), ids.clone()) + .await + { + Ok(result) => return Ok(result), + Err(e) => { + count = count.saturating_add(1); + if count == G_RPC_RETRY_COUNT { + return Err(RpcVerifyError::new( + peer_id.clone(), + format!( + "failed to get blocks v1 from peer : {:?}. error: {:?}", + peer_id, e + ), + ) + .into()); + } + continue; + } + } + } + Err(RpcVerifyError::new( + peer_id.clone(), + format!("failed to get blocks from peer : {:?}.", peer_id), + ) + .into()) } pub async fn get_blocks( @@ -383,8 +777,18 @@ impl VerifiedRpcClient { ) -> Result)>>> { let peer_id = self.select_a_peer()?; let start_time = Instant::now(); - let blocks: Vec> = - self.client.get_blocks(peer_id.clone(), ids.clone()).await?; + let blocks = match self.get_blocks_v1_inner(peer_id.clone(), ids.clone()).await { + Ok(blocks) => blocks, + Err(err) => { + warn!("get blocks failed:{}, call get blocks legacy", err); + self.get_blocks_inner(peer_id.clone(), ids.clone()) + .await? + .into_iter() + .map(|opt_block| opt_block.map(Into::into)) + .collect() + } + }; + let time = (Instant::now() .saturating_duration_since(start_time) .as_millis()) as u32; @@ -411,4 +815,30 @@ impl VerifiedRpcClient { }) .collect()) } + + pub async fn get_dag_block_children(&self, req: Vec) -> Result> { + let mut count = 0; + let peer_id = self.select_a_peer()?; + while count < G_RPC_RETRY_COUNT { + match self + .client + .get_dag_block_children(peer_id.clone(), req.clone()) + .await + { + Ok(result) => return Ok(result), + Err(_) => { + count = count.saturating_add(1); + continue; + } + } + } + Err(RpcVerifyError::new( + peer_id.clone(), + format!( + "failed to get dag block children from peer : {:?}.", + peer_id + ), + ) + .into()) + } } diff --git a/sync/tests/common_test_sync_libs.rs b/sync/tests/common_test_sync_libs.rs new file mode 100644 index 0000000000..ceb56826c2 --- /dev/null +++ b/sync/tests/common_test_sync_libs.rs @@ -0,0 +1,88 @@ + +use forkable_jellyfish_merkle::node_type::Node; +use starcoin_config::*; +use starcoin_node::NodeHandle; +use std::sync::Arc; +use network_api::PeerId; +use starcoin_crypto::HashValue; +use starcoin_types::block::BlockHeader; +use anyhow::{Ok, Result}; +use starcoin_logger::prelude::*; + +#[derive(Debug, Clone)] +pub struct DagBlockInfo { + pub header: BlockHeader, + pub children: Vec, +} + +pub fn gen_chain_env(config: NodeConfig) -> Result { + test_helper::run_node_by_config(Arc::new(config)) +} + +fn gen_node(seeds: Vec) -> Result<(NodeHandle, NetworkConfig)> { + let dir = match temp_dir() { + starcoin_config::DataDirPath::PathBuf(path) => path, + starcoin_config::DataDirPath::TempPath(path) => { + path.path().to_path_buf() + } + }; + let mut config = NodeConfig::proxima_for_test(dir); + let net_addr = config.network.self_address(); + debug!("Local node address: {:?}", net_addr); + + config.network.seeds = seeds.into_iter().map(|other_network_config| { + other_network_config.self_address() + }).collect::>().into(); + let network_config = config.network.clone(); + let handle = test_helper::run_node_by_config(Arc::new(config))?; + + Ok((handle, network_config)) +} + +pub fn init_multiple_node(count: usize) -> Result> { + let mut result = vec![]; + result.reserve(count); + let (main_node, network_config) = gen_node(vec![])?; + result.push(main_node); + for _ in 1..count { + result.push(gen_node(vec![network_config.clone()])?.0); + } + Ok(result) +} + +pub fn generate_dag_block(handle: &NodeHandle, count: usize) -> Result> { + let mut result = vec![]; + let dag = handle.get_dag()?; + while result.len() < count { + let block = handle.generate_block()?; + if block.header().is_dag() { + result.push(block); + } + } + Ok(result + .into_iter() + .map(|block| DagBlockInfo { + header: block.header().clone(), + children: dag.get_children(block.header().id()).unwrap(), + }) + .collect::>()) +} + +pub fn init_two_node() -> Result<(NodeHandle, NodeHandle, PeerId)> { + // network1 initialization + let (local_handle, local_net_addr) = { + let local_config = NodeConfig::random_for_test(); + let net_addr = local_config.network.self_address(); + debug!("Local node address: {:?}", net_addr); + (gen_chain_env(local_config).unwrap(), net_addr) + }; + + // network2 initialization + let (target_handle, target_peer_id) = { + let mut target_config = NodeConfig::random_for_test(); + target_config.network.seeds = vec![local_net_addr].into(); + let target_peer_id = target_config.network.self_peer_id(); + (gen_chain_env(target_config).unwrap(), target_peer_id) + }; + Ok((local_handle, target_handle, target_peer_id)) +} \ No newline at end of file diff --git a/sync/tests/full_sync_test.rs b/sync/tests/full_sync_test.rs index 2d6ba0a6cf..1468398a01 100644 --- a/sync/tests/full_sync_test.rs +++ b/sync/tests/full_sync_test.rs @@ -1,17 +1,24 @@ mod test_sync; +mod common_test_sync_libs; use futures::executor::block_on; +use network_api::PeerId; use rand::random; use starcoin_chain_api::ChainAsyncService; -use starcoin_config::NodeConfig; +use starcoin_chain_service::ChainReaderService; +use starcoin_config::{temp_dir, NodeConfig}; +use starcoin_crypto::HashValue; use starcoin_logger::prelude::*; use starcoin_node::NodeHandle; -use starcoin_service_registry::ActorService; +use starcoin_service_registry::{ActorService, ServiceRef}; use starcoin_sync::sync::SyncService; +use starcoin_vm_types::on_chain_config::ConfigID; use std::sync::Arc; use std::thread::sleep; use std::time::Duration; use test_helper::run_node_by_config; +use anyhow::{Ok, Result}; +use starcoin_logger::prelude::*; #[stest::test(timeout = 120)] fn test_full_sync() { @@ -129,3 +136,49 @@ fn wait_two_node_synced(first_node: &NodeHandle, second_node: &NodeHandle) { } } } + +async fn check_synced(target_hash: HashValue, chain_service: ServiceRef) -> Result { + loop { + if target_hash == chain_service.main_head_block().await.expect("failed to get main head block").id() { + debug!("succeed to sync main block id: {:?}", target_hash); + break; + } else { + debug!("waiting for sync, now sleep 60 second"); + async_std::task::sleep(Duration::from_secs(60)).await; + } + } + Ok(true) +} + +#[stest::test(timeout = 120)] +fn test_multiple_node_sync() { + let nodes = common_test_sync_libs::init_multiple_node(5).expect("failed to initialize multiple nodes"); + + let main_node = &nodes.first().expect("failed to get main node"); + + let _ = common_test_sync_libs::generate_dag_block(main_node, 20).expect("failed to generate dag block"); + let main_node_chain_service = main_node.chain_service().expect("failed to get main node chain service"); + let chain_service_1 = nodes[1].chain_service().expect("failed to get the chain service"); + let chain_service_2 = nodes[2].chain_service().expect("failed to get the chain service"); + let chain_service_3 = nodes[3].chain_service().expect("failed to get the chain service"); + let chain_service_4 = nodes[4].chain_service().expect("failed to get the chain service"); + + block_on(async move { + let main_block = main_node_chain_service.main_head_block().await.expect("failed to get main head block"); + + nodes[1].start_to_sync().await.expect("failed to start to sync"); + nodes[2].start_to_sync().await.expect("failed to start to sync"); + nodes[3].start_to_sync().await.expect("failed to start to sync"); + nodes[4].start_to_sync().await.expect("failed to start to sync"); + + check_synced(main_block.id(), chain_service_1).await.expect("failed to check sync"); + check_synced(main_block.id(), chain_service_2).await.expect("failed to check sync"); + check_synced(main_block.id(), chain_service_3).await.expect("failed to check sync"); + check_synced(main_block.id(), chain_service_4).await.expect("failed to check sync"); + + // close + nodes.into_iter().for_each(|handle| { + handle.stop().expect("failed to shutdown the node normally!"); + }); + }); +} \ No newline at end of file diff --git a/sync/tests/test_rpc_client.rs b/sync/tests/test_rpc_client.rs new file mode 100644 index 0000000000..66aea614f5 --- /dev/null +++ b/sync/tests/test_rpc_client.rs @@ -0,0 +1,100 @@ +// Copyright (c) The Starcoin Core Contributors +// SPDX-License-Identifier: Apache-2.0 + +mod common_test_sync_libs; + +use anyhow::{Ok, Result}; +use futures::executor::block_on; +use network_api::{PeerId, PeerProvider, PeerSelector, PeerStrategy}; +use starcoin_config::*; +use starcoin_crypto::HashValue; +use starcoin_logger::prelude::*; +use starcoin_node::NodeHandle; +use starcoin_sync::verified_rpc_client::VerifiedRpcClient; +use starcoin_types::block::{BlockHeader, TEST_FLEXIDAG_FORK_HEIGHT_FOR_DAG}; +use std::sync::Arc; + +#[derive(Debug, Clone)] +struct DagBlockInfo { + pub header: BlockHeader, + pub children: Vec, +} + +#[stest::test] +fn test_verified_client_for_dag() { + let (local_handle, target_handle, target_peer_id) = + init_two_node().expect("failed to initalize the local and target node"); + + let network = local_handle.network(); + // PeerProvider + let peer_info = block_on(network.get_peer(target_peer_id)) + .expect("failed to get peer info") + .expect("failed to peer info for it is none"); + let peer_selector = PeerSelector::new(vec![peer_info], PeerStrategy::default(), None); + let rpc_client = VerifiedRpcClient::new(peer_selector, network); + // testing dag rpc + let target_dag_blocks = + generate_dag_block(&target_handle, 5).expect("failed to generate dag block"); + target_dag_blocks.into_iter().for_each(|target_dag_block| { + let dag_children_from_client_rpc = + block_on(rpc_client.get_dag_block_children(vec![target_dag_block.header.id()])) + .expect("failed to get dag block children"); + info!( + "get dag children for:{},{:?}", + target_dag_block.header.id(), + dag_children_from_client_rpc + ); + assert!(target_dag_block + .clone() + .children + .into_iter() + .all(|child| { dag_children_from_client_rpc.contains(&child) })); + + assert!(dag_children_from_client_rpc + .into_iter() + .all(|child| { target_dag_block.children.contains(&child) })); + }); + target_handle.stop().unwrap(); + local_handle.stop().unwrap(); +} + +fn init_two_node() -> Result<(NodeHandle, NodeHandle, PeerId)> { + // network1 initialization + let (local_handle, local_net_addr) = { + let local_config = NodeConfig::random_for_test(); + let net_addr = local_config.network.self_address(); + debug!("Local node address: {:?}", net_addr); + (gen_chain_env(local_config).unwrap(), net_addr) + }; + + // network2 initialization + let (target_handle, target_peer_id) = { + let mut target_config = NodeConfig::random_for_test(); + target_config.network.seeds = vec![local_net_addr].into(); + let target_peer_id = target_config.network.self_peer_id(); + (gen_chain_env(target_config).unwrap(), target_peer_id) + }; + Ok((local_handle, target_handle, target_peer_id)) +} + +fn generate_dag_block(handle: &NodeHandle, count: usize) -> Result> { + let mut result = vec![]; + let dag = handle.get_dag()?; + while result.len() < count { + let (block, is_dag_block) = handle.generate_block()?; + if is_dag_block { + result.push(block); + } + } + Ok(result + .into_iter() + .map(|block| DagBlockInfo { + header: block.header().clone(), + children: dag.get_children(block.header().id()).unwrap(), + }) + .collect::>()) +} + +fn gen_chain_env(config: NodeConfig) -> Result { + test_helper::run_node_by_config(Arc::new(config)) +} diff --git a/test-helper/Cargo.toml b/test-helper/Cargo.toml index 4bc37d0425..2a40eddfa0 100644 --- a/test-helper/Cargo.toml +++ b/test-helper/Cargo.toml @@ -49,7 +49,7 @@ stdlib = { workspace = true } thiserror = { workspace = true } tokio = { features = ["full"], workspace = true } move-ir-compiler = { workspace = true } - +starcoin-dag = { workspace = true } [dev-dependencies] stest = { workspace = true } diff --git a/test-helper/data/Block/data b/test-helper/data/Block/data index babd4a9926..db42659a9a 100644 --- a/test-helper/data/Block/data +++ b/test-helper/data/Block/data @@ -1 +1 @@ -20000000000000000000000000000000000000000000000000000000000000000038b710e2760100000000000000000000000000000000000000000000000000010020414343554d554c41544f525f504c414345484f4c4445525f484153480000000020414343554d554c41544f525f504c414345484f4c4445525f4841534800000000205350415253455f4d45524b4c455f504c414345484f4c4445525f4841534800000000000000000000000000000000000000000000000000000000000000000000000000000000000120c01e0329de6d899348a8ef4bd51db56175b3fa0988e57c3dcec8eaf13a164d97ff00000000000000000000 \ No newline at end of file +20000000000000000000000000000000000000000000000000000000000000000038b710e2760100000000000000000000000000000000000000000000000000010020414343554d554c41544f525f504c414345484f4c4445525f484153480000000020414343554d554c41544f525f504c414345484f4c4445525f4841534800000000205350415253455f4d45524b4c455f504c414345484f4c4445525f4841534800000000000000000000000000000000000000000000000000000000000000000000000000000000000120c01e0329de6d899348a8ef4bd51db56175b3fa0988e57c3dcec8eaf13a164d97ff0000000000000000000000 \ No newline at end of file diff --git a/test-helper/data/Block/hash b/test-helper/data/Block/hash index 2037d065b3..3647a114f0 100644 --- a/test-helper/data/Block/hash +++ b/test-helper/data/Block/hash @@ -1 +1 @@ -a1a1f34b7bafb294895f852420e9553bd552881bc89f2ea7b71e84757bcbab44 \ No newline at end of file +d48ca588ba3ff1b72504371001e31908f3ca6457281322d6434181b794971173 \ No newline at end of file diff --git a/test-helper/data/Block/json b/test-helper/data/Block/json index 91f6fbdf6a..7e8cb7b4ef 100644 --- a/test-helper/data/Block/json +++ b/test-helper/data/Block/json @@ -15,7 +15,8 @@ "id": 255 }, "nonce": 0, - "extra": "0x00000000" + "extra": "0x00000000", + "parents_hash": null }, "body": { "transactions": [], diff --git a/test-helper/data/BlockHeader/data b/test-helper/data/BlockHeader/data index 019172e904..e7312424da 100644 --- a/test-helper/data/BlockHeader/data +++ b/test-helper/data/BlockHeader/data @@ -1 +1 @@ -20000000000000000000000000000000000000000000000000000000000000000038b710e2760100000000000000000000000000000000000000000000000000010020414343554d554c41544f525f504c414345484f4c4445525f484153480000000020414343554d554c41544f525f504c414345484f4c4445525f4841534800000000205350415253455f4d45524b4c455f504c414345484f4c4445525f4841534800000000000000000000000000000000000000000000000000000000000000000000000000000000000120c01e0329de6d899348a8ef4bd51db56175b3fa0988e57c3dcec8eaf13a164d97ff0000000000000000 \ No newline at end of file +20000000000000000000000000000000000000000000000000000000000000000038b710e2760100000000000000000000000000000000000000000000000000010020414343554d554c41544f525f504c414345484f4c4445525f484153480000000020414343554d554c41544f525f504c414345484f4c4445525f4841534800000000205350415253455f4d45524b4c455f504c414345484f4c4445525f4841534800000000000000000000000000000000000000000000000000000000000000000000000000000000000120c01e0329de6d899348a8ef4bd51db56175b3fa0988e57c3dcec8eaf13a164d97ff000000000000000000 \ No newline at end of file diff --git a/test-helper/data/BlockHeader/hash b/test-helper/data/BlockHeader/hash index 0b24635b9d..273d44bb3b 100644 --- a/test-helper/data/BlockHeader/hash +++ b/test-helper/data/BlockHeader/hash @@ -1 +1 @@ -772acd09032fe354de7a43bda37f4b93dabede991e5fdabbd601b20834684cdb \ No newline at end of file +7889ee492fd2b8da082978d1543b52ea2065fe280137ae4d6da0475655c02be2 \ No newline at end of file diff --git a/test-helper/data/BlockHeader/json b/test-helper/data/BlockHeader/json index ffffd14658..f4d7077410 100644 --- a/test-helper/data/BlockHeader/json +++ b/test-helper/data/BlockHeader/json @@ -14,5 +14,6 @@ "id": 255 }, "nonce": 0, - "extra": "0x00000000" + "extra": "0x00000000", + "parents_hash": null } \ No newline at end of file diff --git a/test-helper/data/BlockMetadata/data b/test-helper/data/BlockMetadata/data index 68cfad2845..02f505bc96 100644 --- a/test-helper/data/BlockMetadata/data +++ b/test-helper/data/BlockMetadata/data @@ -1 +1 @@ -2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000ff0000000000000000 \ No newline at end of file +2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000ff00000000000000000100 \ No newline at end of file diff --git a/test-helper/data/BlockMetadata/hash b/test-helper/data/BlockMetadata/hash index c79ea8ca99..65c3c0b642 100644 --- a/test-helper/data/BlockMetadata/hash +++ b/test-helper/data/BlockMetadata/hash @@ -1 +1 @@ -dd06255ab50b0cf641e5612472d4c71e5339709bf7aaacfeba51bc5b9bafd55d \ No newline at end of file +a6882a425b763ee19b08587d205af69ef27c763a5a22f2d1c3fd698b0658d59b \ No newline at end of file diff --git a/test-helper/data/BlockMetadata/json b/test-helper/data/BlockMetadata/json index a1735e31cb..f2cfb53b48 100644 --- a/test-helper/data/BlockMetadata/json +++ b/test-helper/data/BlockMetadata/json @@ -8,5 +8,6 @@ "chain_id": { "id": 255 }, - "parent_gas_used": 0 + "parent_gas_used": 0, + "parents_hash": [] } \ No newline at end of file diff --git a/test-helper/data/ChainStatus/data b/test-helper/data/ChainStatus/data index 7b0a152de5..1da184ed38 100644 --- a/test-helper/data/ChainStatus/data +++ b/test-helper/data/ChainStatus/data @@ -1 +1 @@ -20000000000000000000000000000000000000000000000000000000000000000038b710e2760100000000000000000000000000000000000000000000000000010020414343554d554c41544f525f504c414345484f4c4445525f484153480000000020414343554d554c41544f525f504c414345484f4c4445525f4841534800000000205350415253455f4d45524b4c455f504c414345484f4c4445525f4841534800000000000000000000000000000000000000000000000000000000000000000000000000000000000120c01e0329de6d899348a8ef4bd51db56175b3fa0988e57c3dcec8eaf13a164d97ff000000000000000020772acd09032fe354de7a43bda37f4b93dabede991e5fdabbd601b20834684cdb000000000000000000000000000000000000000000000000000000000000000020414343554d554c41544f525f504c414345484f4c4445525f4841534800000000000000000000000000000000000000000020414343554d554c41544f525f504c414345484f4c4445525f48415348000000000000000000000000000000000000000000 \ No newline at end of file +20000000000000000000000000000000000000000000000000000000000000000038b710e2760100000000000000000000000000000000000000000000000000010020414343554d554c41544f525f504c414345484f4c4445525f484153480000000020414343554d554c41544f525f504c414345484f4c4445525f4841534800000000205350415253455f4d45524b4c455f504c414345484f4c4445525f4841534800000000000000000000000000000000000000000000000000000000000000000000000000000000000120c01e0329de6d899348a8ef4bd51db56175b3fa0988e57c3dcec8eaf13a164d97ff00000000000000000020772acd09032fe354de7a43bda37f4b93dabede991e5fdabbd601b20834684cdb000000000000000000000000000000000000000000000000000000000000000020414343554d554c41544f525f504c414345484f4c4445525f4841534800000000000000000000000000000000000000000020414343554d554c41544f525f504c414345484f4c4445525f48415348000000000000000000000000000000000000000000 \ No newline at end of file diff --git a/test-helper/data/ChainStatus/json b/test-helper/data/ChainStatus/json index 63bb179969..0bef1b5e93 100644 --- a/test-helper/data/ChainStatus/json +++ b/test-helper/data/ChainStatus/json @@ -15,7 +15,8 @@ "id": 255 }, "nonce": 0, - "extra": "0x00000000" + "extra": "0x00000000", + "parents_hash": null }, "info": { "block_id": "0x772acd09032fe354de7a43bda37f4b93dabede991e5fdabbd601b20834684cdb", diff --git a/test-helper/data/CompactBlock/data b/test-helper/data/CompactBlock/data index db42659a9a..142d3aad0b 100644 --- a/test-helper/data/CompactBlock/data +++ b/test-helper/data/CompactBlock/data @@ -1 +1 @@ -20000000000000000000000000000000000000000000000000000000000000000038b710e2760100000000000000000000000000000000000000000000000000010020414343554d554c41544f525f504c414345484f4c4445525f484153480000000020414343554d554c41544f525f504c414345484f4c4445525f4841534800000000205350415253455f4d45524b4c455f504c414345484f4c4445525f4841534800000000000000000000000000000000000000000000000000000000000000000000000000000000000120c01e0329de6d899348a8ef4bd51db56175b3fa0988e57c3dcec8eaf13a164d97ff0000000000000000000000 \ No newline at end of file +20000000000000000000000000000000000000000000000000000000000000000038b710e2760100000000000000000000000000000000000000000000000000010020414343554d554c41544f525f504c414345484f4c4445525f484153480000000020414343554d554c41544f525f504c414345484f4c4445525f4841534800000000205350415253455f4d45524b4c455f504c414345484f4c4445525f4841534800000000000000000000000000000000000000000000000000000000000000000000000000000000000120c01e0329de6d899348a8ef4bd51db56175b3fa0988e57c3dcec8eaf13a164d97ff000000000000000000000000 \ No newline at end of file diff --git a/test-helper/data/CompactBlock/json b/test-helper/data/CompactBlock/json index ba37ffe2d1..c0e506c02e 100644 --- a/test-helper/data/CompactBlock/json +++ b/test-helper/data/CompactBlock/json @@ -15,7 +15,8 @@ "id": 255 }, "nonce": 0, - "extra": "0x00000000" + "extra": "0x00000000", + "parents_hash": null }, "short_ids": [], "prefilled_txn": [], diff --git a/test-helper/data/CompactBlockMessage/data b/test-helper/data/CompactBlockMessage/data index 191c6406c4..8de281dcc7 100644 --- a/test-helper/data/CompactBlockMessage/data +++ b/test-helper/data/CompactBlockMessage/data @@ -1 +1 @@ -20000000000000000000000000000000000000000000000000000000000000000038b710e2760100000000000000000000000000000000000000000000000000010020414343554d554c41544f525f504c414345484f4c4445525f484153480000000020414343554d554c41544f525f504c414345484f4c4445525f4841534800000000205350415253455f4d45524b4c455f504c414345484f4c4445525f4841534800000000000000000000000000000000000000000000000000000000000000000000000000000000000120c01e0329de6d899348a8ef4bd51db56175b3fa0988e57c3dcec8eaf13a164d97ff000000000000000000000020772acd09032fe354de7a43bda37f4b93dabede991e5fdabbd601b20834684cdb000000000000000000000000000000000000000000000000000000000000000020414343554d554c41544f525f504c414345484f4c4445525f4841534800000000000000000000000000000000000000000020414343554d554c41544f525f504c414345484f4c4445525f48415348000000000000000000000000000000000000000000 \ No newline at end of file +20000000000000000000000000000000000000000000000000000000000000000038b710e2760100000000000000000000000000000000000000000000000000010020414343554d554c41544f525f504c414345484f4c4445525f484153480000000020414343554d554c41544f525f504c414345484f4c4445525f4841534800000000205350415253455f4d45524b4c455f504c414345484f4c4445525f4841534800000000000000000000000000000000000000000000000000000000000000000000000000000000000120c01e0329de6d899348a8ef4bd51db56175b3fa0988e57c3dcec8eaf13a164d97ff00000000000000000000000020772acd09032fe354de7a43bda37f4b93dabede991e5fdabbd601b20834684cdb000000000000000000000000000000000000000000000000000000000000000020414343554d554c41544f525f504c414345484f4c4445525f4841534800000000000000000000000000000000000000000020414343554d554c41544f525f504c414345484f4c4445525f48415348000000000000000000000000000000000000000000 \ No newline at end of file diff --git a/test-helper/data/CompactBlockMessage/json b/test-helper/data/CompactBlockMessage/json index e7e0632706..8de7c3b3c9 100644 --- a/test-helper/data/CompactBlockMessage/json +++ b/test-helper/data/CompactBlockMessage/json @@ -16,7 +16,8 @@ "id": 255 }, "nonce": 0, - "extra": "0x00000000" + "extra": "0x00000000", + "parents_hash": null }, "short_ids": [], "prefilled_txn": [], diff --git a/test-helper/src/block.rs b/test-helper/src/block.rs new file mode 100644 index 0000000000..e299415247 --- /dev/null +++ b/test-helper/src/block.rs @@ -0,0 +1,17 @@ +use starcoin_chain::BlockChain; +use starcoin_consensus::Consensus; +use starcoin_types::account::Account; +use starcoin_types::block::Block; +use starcoin_vm_types::transaction::SignedUserTransaction; + +pub fn create_new_block( + chain: &BlockChain, + account: &Account, + txns: Vec, +) -> anyhow::Result { + let (template, _) = + chain.create_block_template(*account.address(), None, txns, vec![], None, None)?; + chain + .consensus() + .create_block(template, chain.time_service().as_ref()) +} diff --git a/test-helper/src/chain.rs b/test-helper/src/chain.rs index ba337c327b..da77a59995 100644 --- a/test-helper/src/chain.rs +++ b/test-helper/src/chain.rs @@ -8,12 +8,36 @@ use starcoin_chain::ChainWriter; use starcoin_config::ChainNetwork; use starcoin_consensus::Consensus; use starcoin_genesis::Genesis; +use starcoin_types::block::BlockNumber; +use starcoin_types::block::TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH; pub fn gen_blockchain_for_test(net: &ChainNetwork) -> Result { - let (storage, chain_info, _) = - Genesis::init_storage_for_test(net).expect("init storage by genesis fail."); + let (storage, chain_info, _, dag) = + Genesis::init_storage_for_test(net, TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH) + .expect("init storage by genesis fail."); - let block_chain = BlockChain::new(net.time_service(), chain_info.head().id(), storage, None)?; + let block_chain = BlockChain::new( + net.time_service(), + chain_info.head().id(), + storage, + None, + dag, + )?; + Ok(block_chain) +} + +pub fn gen_blockchain_for_dag_test(net: &ChainNetwork, fork_number: BlockNumber) -> Result { + let (storage, chain_info, _, dag) = + Genesis::init_storage_for_test(net, fork_number) + .expect("init storage by genesis fail."); + + let block_chain = BlockChain::new( + net.time_service(), + chain_info.head().id(), + storage, + None, + dag, + )?; Ok(block_chain) } @@ -22,7 +46,14 @@ pub fn gen_blockchain_with_blocks_for_test(count: u64, net: &ChainNetwork) -> Re let miner_account = AccountInfo::random(); for _i in 0..count { let (block_template, _) = block_chain - .create_block_template(*miner_account.address(), None, Vec::new(), vec![], None) + .create_block_template( + *miner_account.address(), + None, + Vec::new(), + vec![], + None, + None, + ) .unwrap(); let block = block_chain .consensus() diff --git a/test-helper/src/dao.rs b/test-helper/src/dao.rs index f443106ca3..1c66721066 100644 --- a/test-helper/src/dao.rs +++ b/test-helper/src/dao.rs @@ -413,6 +413,21 @@ pub fn vote_language_version(_net: &ChainNetwork, lang_version: u64) -> ScriptFu ) } +pub fn vote_flexi_dag_config(_net: &ChainNetwork, effective_height: u64) -> ScriptFunction { + ScriptFunction::new( + ModuleId::new( + core_code_address(), + Identifier::new("OnChainConfigScripts").unwrap(), + ), + Identifier::new("propose_update_flexi_dag_effective_height").unwrap(), + vec![], + vec![ + bcs_ext::to_bytes(&effective_height).unwrap(), + bcs_ext::to_bytes(&0u64).unwrap(), + ], + ) +} + /// execute on chain config scripts pub fn execute_script_on_chain_config( _net: &ChainNetwork, diff --git a/test-helper/src/lib.rs b/test-helper/src/lib.rs index 847ae6d8c0..d59b0190fd 100644 --- a/test-helper/src/lib.rs +++ b/test-helper/src/lib.rs @@ -13,6 +13,7 @@ pub mod txn; pub mod txpool; pub use chain::gen_blockchain_for_test; +pub use chain::gen_blockchain_for_dag_test; pub use dummy_network_service::DummyNetworkService; pub use network::{build_network, build_network_cluster, build_network_pair}; pub use node::{run_node_by_config, run_test_node}; diff --git a/test-helper/src/network.rs b/test-helper/src/network.rs index 2e5faea961..3ba609a412 100644 --- a/test-helper/src/network.rs +++ b/test-helper/src/network.rs @@ -17,6 +17,7 @@ use starcoin_service_registry::{ }; use starcoin_storage::block_info::BlockInfoStore; use starcoin_storage::{BlockStore, Storage}; +use starcoin_types::block::TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH; use starcoin_types::startup_info::{ChainInfo, ChainStatus}; use std::any::Any; use std::borrow::Cow; @@ -138,7 +139,8 @@ pub async fn build_network_with_config( rpc_service_mocker: Option<(RpcInfo, MockRpcHandler)>, ) -> Result { let registry = RegistryService::launch(); - let (storage, _chain_info, genesis) = Genesis::init_storage_for_test(node_config.net())?; + let (storage, _chain_info, genesis, _) = + Genesis::init_storage_for_test(node_config.net(), TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH)?; registry.put_shared(genesis).await?; registry.put_shared(node_config.clone()).await?; registry.put_shared(storage.clone()).await?; diff --git a/test-helper/src/starcoin_dao.rs b/test-helper/src/starcoin_dao.rs index 7be7ba0ae5..36f6f93d9f 100644 --- a/test-helper/src/starcoin_dao.rs +++ b/test-helper/src/starcoin_dao.rs @@ -415,7 +415,7 @@ fn stake_to_be_member_function( } fn block_from_metadata(block_meta: BlockMetadata, chain_state: &ChainStateDB) -> Result { - let (parent_hash, timestamp, author, _author_auth_key, _, number, _, _) = + let (parent_hash, timestamp, author, _author_auth_key, _, number, _, _, parents_hash) = block_meta.into_inner(); let block_body = BlockBody::new(vec![], None); let block_header = BlockHeader::new( @@ -432,6 +432,7 @@ fn block_from_metadata(block_meta: BlockMetadata, chain_state: &ChainStateDB) -> chain_state.get_chain_id()?, 0, BlockHeaderExtra::new([0u8; 4]), + parents_hash, ); Ok(Block::new(block_header, block_body)) } diff --git a/test-helper/src/txpool.rs b/test-helper/src/txpool.rs index fb1b86a15f..895874131e 100644 --- a/test-helper/src/txpool.rs +++ b/test-helper/src/txpool.rs @@ -4,15 +4,16 @@ use futures_timer::Delay; use starcoin_account_service::{AccountService, AccountStorage}; use starcoin_config::NodeConfig; +use starcoin_dag::blockdag::BlockDAG; use starcoin_genesis::Genesis; use starcoin_miner::{BlockBuilderService, MinerService}; use starcoin_service_registry::bus::BusService; use starcoin_service_registry::{RegistryAsyncService, RegistryService, ServiceRef}; use starcoin_storage::Storage; use starcoin_txpool::{TxPoolActorService, TxPoolService}; +use starcoin_types::block::TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH; use std::sync::Arc; use std::time::Duration; - pub async fn start_txpool_with_size( pool_size: u64, ) -> ( @@ -21,6 +22,7 @@ pub async fn start_txpool_with_size( Arc, ServiceRef, ServiceRef, + BlockDAG, ) { start_txpool_with_miner(pool_size, false).await } @@ -34,6 +36,7 @@ pub async fn start_txpool_with_miner( Arc, ServiceRef, ServiceRef, + BlockDAG, ) { let mut config = NodeConfig::random_for_test(); config.tx_pool.set_max_count(pool_size); @@ -41,8 +44,9 @@ pub async fn start_txpool_with_miner( let node_config = Arc::new(config); - let (storage, _chain_info, _) = - Genesis::init_storage_for_test(node_config.net()).expect("init storage by genesis fail."); + let (storage, _chain_info, _, dag) = + Genesis::init_storage_for_test(node_config.net(), TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH) + .expect("init storage by genesis fail."); let registry = RegistryService::launch(); registry.put_shared(node_config.clone()).await.unwrap(); registry.put_shared(storage.clone()).await.unwrap(); @@ -68,7 +72,14 @@ pub async fn start_txpool_with_miner( Delay::new(Duration::from_millis(200)).await; let txpool_service = registry.get_shared::().await.unwrap(); - (txpool_service, storage, node_config, pool_actor, registry) + ( + txpool_service, + storage, + node_config, + pool_actor, + registry, + dag, + ) } pub async fn start_txpool() -> ( @@ -77,6 +88,7 @@ pub async fn start_txpool() -> ( Arc, ServiceRef, ServiceRef, + BlockDAG, ) { start_txpool_with_size(1000).await } diff --git a/txpool/src/test.rs b/txpool/src/test.rs index e205b388e6..35e6c55b97 100644 --- a/txpool/src/test.rs +++ b/txpool/src/test.rs @@ -56,7 +56,7 @@ impl AccountSeqNumberClient for MockNonceClient { #[stest::test] async fn test_txn_expire() -> Result<()> { - let (txpool_service, _storage, config, _, _) = test_helper::start_txpool().await; + let (txpool_service, _storage, config, _, _, _) = test_helper::start_txpool().await; let txn = generate_txn(config, 0); txpool_service.add_txns(vec![txn]).pop().unwrap()?; let pendings = txpool_service.get_pending_txns(None, Some(0)); @@ -70,7 +70,7 @@ async fn test_txn_expire() -> Result<()> { #[stest::test] async fn test_tx_pool() -> Result<()> { - let (txpool_service, _storage, config, _, _) = test_helper::start_txpool().await; + let (txpool_service, _storage, config, _, _, _) = test_helper::start_txpool().await; let (_private_key, public_key) = KeyGen::from_os_rng().generate_keypair(); let account_address = account_address::from_public_key(&public_key); let txn = starcoin_transaction_builder::build_transfer_from_association( @@ -103,7 +103,7 @@ async fn test_subscribe_txns() { async fn test_pool_pending() -> Result<()> { let pool_size = 5; let expect_reject = 3; - let (txpool_service, _storage, node_config, _, _) = + let (txpool_service, _storage, node_config, _, _, _) = test_helper::start_txpool_with_size(pool_size).await; let metrics_config: &MetricsConfig = &node_config.metrics; @@ -181,7 +181,7 @@ async fn test_pool_pending() -> Result<()> { #[stest::test] async fn test_rollback() -> Result<()> { - let (pool, storage, config, _, _) = test_helper::start_txpool().await; + let (pool, storage, config, _, _, _) = test_helper::start_txpool().await; let start_timestamp = 0; let retracted_txn = { let (_private_key, public_key) = KeyGen::from_os_rng().generate_keypair(); @@ -227,6 +227,8 @@ async fn test_rollback() -> Result<()> { U256::from(1024u64), config.net().genesis_config().consensus(), None, + None, + None, )?; let excluded_txns = open_block.push_txns(vec![txn])?; assert_eq!(excluded_txns.discarded_txns.len(), 0); @@ -273,7 +275,7 @@ async fn test_rollback() -> Result<()> { #[stest::test(timeout = 480)] async fn test_txpool_actor_service() { - let (_txpool_service, _storage, config, tx_pool_actor, _registry) = + let (_txpool_service, _storage, config, tx_pool_actor, _registry, _) = test_helper::start_txpool().await; let txn = generate_txn(config, 0); diff --git a/types/Cargo.toml b/types/Cargo.toml index e33c88309b..2839ed498a 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -18,6 +18,8 @@ starcoin-crypto = { workspace = true } starcoin-uint = { workspace = true } starcoin-vm-types = { workspace = true } thiserror = { workspace = true } +lazy_static= { workspace = true } +parking_lot = { workspace = true } [features] default = [] diff --git a/types/src/block/legacy.rs b/types/src/block/legacy.rs new file mode 100644 index 0000000000..44ac39cc47 --- /dev/null +++ b/types/src/block/legacy.rs @@ -0,0 +1,264 @@ +use super::{AccountAddress, BlockHeaderExtra, BlockNumber, ChainId, SignedUserTransaction, U256}; +use schemars::{self, JsonSchema}; +use serde::{Deserialize, Deserializer, Serialize}; +use starcoin_crypto::{ + hash::{CryptoHash, CryptoHasher, PlainCryptoHash}, + HashValue, +}; +use starcoin_vm_types::transaction::authenticator::AuthenticationKey; + +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, CryptoHasher, CryptoHash, JsonSchema)] +#[serde(rename = "BlockHeader")] +pub struct BlockHeader { + #[serde(skip)] + pub id: Option, + /// Parent hash. + parent_hash: HashValue, + /// Block timestamp. + timestamp: u64, + /// Block number. + number: BlockNumber, + /// Block author. + author: AccountAddress, + /// Block author auth key. + /// this field is deprecated + author_auth_key: Option, + /// The transaction accumulator root hash after executing this block. + txn_accumulator_root: HashValue, + /// The parent block info's block accumulator root hash. + block_accumulator_root: HashValue, + /// The last transaction state_root of this block after execute. + state_root: HashValue, + /// Gas used for contracts execution. + gas_used: u64, + /// Block difficulty + #[schemars(with = "String")] + difficulty: U256, + /// hash for block body + body_hash: HashValue, + /// The chain id + chain_id: ChainId, + /// Consensus nonce field. + nonce: u32, + /// block header extra + extra: BlockHeaderExtra, +} + +impl BlockHeader { + // the author_auth_key field is deprecated, but keep this fn for compat with old block. + pub(crate) fn new_with_auth_key( + parent_hash: HashValue, + timestamp: u64, + number: BlockNumber, + author: AccountAddress, + author_auth_key: Option, + txn_accumulator_root: HashValue, + block_accumulator_root: HashValue, + state_root: HashValue, + gas_used: u64, + difficulty: U256, + body_hash: HashValue, + chain_id: ChainId, + nonce: u32, + extra: BlockHeaderExtra, + ) -> BlockHeader { + let mut header = BlockHeader { + id: None, + parent_hash, + block_accumulator_root, + number, + timestamp, + author, + author_auth_key, + txn_accumulator_root, + state_root, + gas_used, + difficulty, + nonce, + body_hash, + chain_id, + extra, + }; + header.id = Some(header.crypto_hash()); + header + } + + pub fn number(&self) -> BlockNumber { + self.number + } + + pub fn id(&self) -> HashValue { + self.id.unwrap() + } +} + +impl From for BlockHeader { + fn from(v: crate::block::BlockHeader) -> Self { + Self { + id: v.id, + parent_hash: v.parent_hash, + timestamp: v.timestamp, + number: v.number, + author: v.author, + author_auth_key: v.author_auth_key, + txn_accumulator_root: v.txn_accumulator_root, + block_accumulator_root: v.block_accumulator_root, + state_root: v.state_root, + gas_used: v.gas_used, + difficulty: v.difficulty, + body_hash: v.body_hash, + chain_id: v.chain_id, + nonce: v.nonce, + extra: v.extra, + } + } +} + +impl From for crate::block::BlockHeader { + fn from(v: BlockHeader) -> Self { + let id = v.id.or_else(|| Some(v.crypto_hash())); + Self { + id, + parent_hash: v.parent_hash, + timestamp: v.timestamp, + number: v.number, + author: v.author, + author_auth_key: v.author_auth_key, + txn_accumulator_root: v.txn_accumulator_root, + block_accumulator_root: v.block_accumulator_root, + state_root: v.state_root, + gas_used: v.gas_used, + difficulty: v.difficulty, + body_hash: v.body_hash, + chain_id: v.chain_id, + nonce: v.nonce, + extra: v.extra, + parents_hash: None, + } + } +} +impl<'de> Deserialize<'de> for BlockHeader { + fn deserialize(deserializer: D) -> Result>::Error> + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(rename = "BlockHeader")] + struct BlockHeaderData { + parent_hash: HashValue, + timestamp: u64, + number: BlockNumber, + author: AccountAddress, + author_auth_key: Option, + txn_accumulator_root: HashValue, + block_accumulator_root: HashValue, + state_root: HashValue, + gas_used: u64, + difficulty: U256, + body_hash: HashValue, + chain_id: ChainId, + nonce: u32, + extra: BlockHeaderExtra, + } + + let header_data = BlockHeaderData::deserialize(deserializer)?; + let block_header = Self::new_with_auth_key( + header_data.parent_hash, + header_data.timestamp, + header_data.number, + header_data.author, + header_data.author_auth_key, + header_data.txn_accumulator_root, + header_data.block_accumulator_root, + header_data.state_root, + header_data.gas_used, + header_data.difficulty, + header_data.body_hash, + header_data.chain_id, + header_data.nonce, + header_data.extra, + ); + Ok(block_header) + } +} + +#[derive( + Default, Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash, +)] +pub struct BlockBody { + /// The transactions in this block. + pub transactions: Vec, + /// uncles block header + pub uncles: Option>, +} + +impl BlockBody { + pub fn hash(&self) -> HashValue { + self.crypto_hash() + } +} + +impl From for crate::block::BlockBody { + fn from(value: BlockBody) -> Self { + let BlockBody { + transactions, + uncles, + } = value; + + Self { + transactions, + uncles: uncles.map(|u| u.into_iter().map(Into::into).collect()), + } + } +} + +impl From for BlockBody { + fn from(value: crate::block::BlockBody) -> Self { + let crate::block::BlockBody { + transactions, + uncles, + } = value; + + Self { + transactions, + uncles: uncles.map(|u| u.into_iter().map(Into::into).collect()), + } + } +} + +/// A block, encoded as it is on the block chain. +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] +pub struct Block { + /// The header of this block. + pub header: BlockHeader, + /// The body of this block. + pub body: BlockBody, +} + +impl Block { + pub fn id(&self) -> HashValue { + self.header.id() + } + + pub fn header(&self) -> &BlockHeader { + &self.header + } +} + +impl From for crate::block::Block { + fn from(value: Block) -> Self { + Self { + header: value.header.into(), + body: value.body.into(), + } + } +} + +impl From for Block { + fn from(value: crate::block::Block) -> Self { + Self { + header: value.header.into(), + body: value.body.into(), + } + } +} diff --git a/types/src/block.rs b/types/src/block/mod.rs similarity index 80% rename from types/src/block.rs rename to types/src/block/mod.rs index 45704fa069..53abb68012 100644 --- a/types/src/block.rs +++ b/types/src/block/mod.rs @@ -1,6 +1,10 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 +mod legacy; +#[cfg(test)] +mod tests; + use crate::account_address::AccountAddress; use crate::block_metadata::BlockMetadata; use crate::genesis_config::{ChainId, ConsensusStrategy}; @@ -8,6 +12,9 @@ use crate::language_storage::CORE_CODE_ADDRESS; use crate::transaction::SignedUserTransaction; use crate::U256; use bcs_ext::Sample; +pub use legacy::{ + Block as LegacyBlock, BlockBody as LegacyBlockBody, BlockHeader as LegacyBlockHeader, +}; use schemars::{self, JsonSchema}; use serde::de::Error; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -20,8 +27,48 @@ use starcoin_crypto::{ use starcoin_vm_types::account_config::genesis_address; use starcoin_vm_types::transaction::authenticator::AuthenticationKey; use std::fmt::Formatter; +use std::hash::Hash; + /// Type for block number. pub type BlockNumber = u64; +pub type ParentsHash = Option>; +//TODO: make sure height +pub static TEST_FLEXIDAG_FORK_HEIGHT_FOR_DAG: BlockNumber = 4; +pub static TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH: BlockNumber = 10000; +// static DEV_FLEXIDAG_FORK_HEIGHT: BlockNumber = 2; +// static PROXIMA_FLEXIDAG_FORK_HEIGHT: BlockNumber = 10000; +// static HALLEY_FLEXIDAG_FORK_HEIGHT: BlockNumber = 10000; +// static BARNARD_FLEXIDAG_FORK_HEIGHT: BlockNumber = 10000; +// static MAIN_FLEXIDAG_FORK_HEIGHT: BlockNumber = 1000000; + +// lazy_static! { +// static ref TEST_FLEXIDAG_FORK_HEIGHT: Mutex = Mutex::new(10000); +// static ref CUSTOM_FLEXIDAG_FORK_HEIGHT: Mutex = Mutex::new(10000); +// } + +// pub fn get_test_flexidag_fork_height() -> BlockNumber { +// *TEST_FLEXIDAG_FORK_HEIGHT.lock().unwrap() +// } + +// pub fn get_custom_flexidag_fork_height() -> BlockNumber { +// *CUSTOM_FLEXIDAG_FORK_HEIGHT.lock().unwrap() +// } + +// // TODO: support a macro such as #[cfg(test:consensus=dag)] to set fork height for testing customly and reset after executing. +// pub fn set_test_flexidag_fork_height(value: BlockNumber) { +// let mut num = TEST_FLEXIDAG_FORK_HEIGHT.lock().unwrap(); +// *num = value; +// } + +// pub fn set_customm_flexidag_fork_height(value: BlockNumber) { +// let mut num = TEST_FLEXIDAG_FORK_HEIGHT.lock().unwrap(); +// *num = value; +// } + +// pub fn reset_test_custom_fork_height() { +// *TEST_FLEXIDAG_FORK_HEIGHT.lock().unwrap() = 10000; +// *CUSTOM_FLEXIDAG_FORK_HEIGHT.lock().unwrap() = 10000; +// } /// Type for block header extra #[derive(Clone, Default, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, JsonSchema)] @@ -152,6 +199,8 @@ pub struct BlockHeader { nonce: u32, /// block header extra extra: BlockHeaderExtra, + /// Parents hash. + parents_hash: ParentsHash, } impl BlockHeader { @@ -169,6 +218,7 @@ impl BlockHeader { chain_id: ChainId, nonce: u32, extra: BlockHeaderExtra, + parents_hash: ParentsHash, ) -> BlockHeader { Self::new_with_auth_key( parent_hash, @@ -185,6 +235,7 @@ impl BlockHeader { chain_id, nonce, extra, + parents_hash, ) } @@ -204,6 +255,7 @@ impl BlockHeader { chain_id: ChainId, nonce: u32, extra: BlockHeaderExtra, + parents_hash: ParentsHash, ) -> BlockHeader { let mut header = BlockHeader { id: None, @@ -221,8 +273,13 @@ impl BlockHeader { body_hash, chain_id, extra, + parents_hash, }; - header.id = Some(header.crypto_hash()); + header.id = Some(if header.is_legacy() { + LegacyBlockHeader::from(header.clone()).crypto_hash() + } else { + header.crypto_hash() + }); header } @@ -247,6 +304,9 @@ impl BlockHeader { self.parent_hash } + pub fn parents_hash(&self) -> ParentsHash { + self.parents_hash.clone() + } pub fn timestamp(&self) -> u64 { self.timestamp } @@ -302,6 +362,33 @@ impl BlockHeader { pub fn is_genesis(&self) -> bool { self.number == 0 } + // pub fn dag_fork_height(&self) -> BlockNumber { + // if self.chain_id.is_test() { + // get_test_flexidag_fork_height() + // } else if self.chain_id.is_halley() { + // HALLEY_FLEXIDAG_FORK_HEIGHT + // } else if self.chain_id.is_proxima() { + // PROXIMA_FLEXIDAG_FORK_HEIGHT + // } else if self.chain_id.is_barnard() { + // BARNARD_FLEXIDAG_FORK_HEIGHT + // } else if self.chain_id.is_main() { + // MAIN_FLEXIDAG_FORK_HEIGHT + // } else if self.chain_id.is_dev() { + // DEV_FLEXIDAG_FORK_HEIGHT + // } else { + // get_custom_flexidag_fork_height() + // } + // } + + // pub fn is_dag(&self) -> bool { + // self.number > self.dag_fork_height() + // } + pub fn is_legacy(&self) -> bool { + self.parents_hash.is_none() + } + // pub fn is_dag_genesis(&self) -> bool { + // self.number == self.dag_fork_height() + // } pub fn genesis_block_header( parent_hash: HashValue, @@ -326,24 +413,67 @@ impl BlockHeader { chain_id, 0, BlockHeaderExtra::default(), + None, ) } + //for test + pub fn dag_genesis_random(dag_genesis_number: BlockNumber) -> Self { + let mut header = Self::random(); + header.parents_hash = Some(vec![header.parent_hash]); + header.number = dag_genesis_number; + header + } + // Create a random compatible block header whose + // number <= fork_height + // parents_hash == None pub fn random() -> Self { + Self::random_with_opt(0) + } + + pub fn random_for_dag() -> Self { + Self::random_with_opt(2) + } + + // header_type: + // 0 - legacy compatible header + // 1 - upgraded but non-dag header + // 2 - dag block header + pub fn random_with_opt(header_type: u8) -> Self { + let base: u64 = TEST_FLEXIDAG_FORK_HEIGHT_NEVER_REACH + .checked_add(1) + .unwrap(); + let (number, parents_hash) = if header_type == 0 { + (rand::random::().checked_rem(base).unwrap(), None) + } else if header_type == 1 { + ( + rand::random::().checked_rem(base).unwrap(), + Some(vec![]), + ) + } else if header_type == 2 { + ( + rand::random::().checked_add(base).unwrap_or(base), + Some(vec![HashValue::random()]), + ) + } else { + panic!("Invalid header_type {header_type}") + }; + Self::new( HashValue::random(), rand::random(), - rand::random(), + number, AccountAddress::random(), HashValue::random(), HashValue::random(), HashValue::random(), rand::random(), - U256::max_value(), + rand::random::().into(), HashValue::random(), ChainId::test(), 0, BlockHeaderExtra([0u8; 4]), + parents_hash, ) } @@ -374,6 +504,7 @@ impl<'de> Deserialize<'de> for BlockHeader { chain_id: ChainId, nonce: u32, extra: BlockHeaderExtra, + parents_hash: ParentsHash, } let header_data = BlockHeaderData::deserialize(deserializer)?; @@ -392,6 +523,7 @@ impl<'de> Deserialize<'de> for BlockHeader { header_data.chain_id, header_data.nonce, header_data.extra, + header_data.parents_hash, ); Ok(block_header) } @@ -413,6 +545,7 @@ impl Default for BlockHeader { ChainId::test(), 0, BlockHeaderExtra([0u8; 4]), + None, ) } } @@ -433,6 +566,7 @@ impl Sample for BlockHeader { ChainId::test(), 0, BlockHeaderExtra([0u8; 4]), + None, ) } } @@ -502,9 +636,19 @@ impl BlockHeaderBuilder { } } + pub fn random_for_dag() -> Self { + Self { + buffer: BlockHeader::random_for_dag(), + } + } + fn new_with(buffer: BlockHeader) -> Self { Self { buffer } } + pub fn with_parents_hash(mut self, parent_hash: ParentsHash) -> Self { + self.buffer.parents_hash = parent_hash; + self + } pub fn with_parent_hash(mut self, parent_hash: HashValue) -> Self { self.buffer.parent_hash = parent_hash; @@ -665,6 +809,20 @@ impl Block { } } + // pub fn is_dag(&self) -> bool { + // self.header.is_dag() + // } + pub fn is_legacy(&self) -> bool { + self.header.is_legacy() + } + // pub fn is_dag_genesis_block(&self) -> bool { + // self.header.is_dag_genesis() + // } + + pub fn parent_hash(&self) -> HashValue { + self.header.parent_hash() + } + pub fn id(&self) -> HashValue { self.header.id() } @@ -724,17 +882,38 @@ impl Block { .as_ref() .map(|uncles| uncles.len() as u64) .unwrap_or(0); + if let Some(parents_hash) = self.header.parents_hash() { + BlockMetadata::new_with_parents( + self.header.parent_hash(), + self.header.timestamp, + self.header.author, + self.header.author_auth_key, + uncles, + self.header.number, + self.header.chain_id, + parent_gas_used, + parents_hash, + ) + } else { + BlockMetadata::new( + self.header.parent_hash(), + self.header.timestamp, + self.header.author, + self.header.author_auth_key, + uncles, + self.header.number, + self.header.chain_id, + parent_gas_used, + ) + } + } - BlockMetadata::new( - self.header.parent_hash(), - self.header.timestamp, - self.header.author, - self.header.author_auth_key, - uncles, - self.header.number, - self.header.chain_id, - parent_gas_used, - ) + pub fn random() -> Self { + let body = BlockBody::sample(); + let mut header = BlockHeader::random(); + header.body_hash = body.hash(); + + Self { header, body } } } @@ -863,6 +1042,8 @@ pub struct BlockTemplate { pub difficulty: U256, /// Block consensus strategy pub strategy: ConsensusStrategy, + /// parents + parents_hash: ParentsHash, } impl BlockTemplate { @@ -877,7 +1058,7 @@ impl BlockTemplate { strategy: ConsensusStrategy, block_metadata: BlockMetadata, ) -> Self { - let (parent_hash, timestamp, author, _author_auth_key, _, number, _, _) = + let (parent_hash, timestamp, author, _author_auth_key, _, number, _, _, parents_hash) = block_metadata.into_inner(); Self { parent_hash, @@ -893,6 +1074,8 @@ impl BlockTemplate { chain_id, difficulty, strategy, + // for an upgraded binary, parents_hash should never be None. + parents_hash: parents_hash.or_else(|| Some(vec![])), } } @@ -911,14 +1094,16 @@ impl BlockTemplate { self.chain_id, nonce, extra, + self.parents_hash, ); + Block { header, body: self.body, } } - pub fn as_raw_block_header(&self) -> RawBlockHeader { + fn as_raw_block_header(&self) -> RawBlockHeader { RawBlockHeader { parent_hash: self.parent_hash, timestamp: self.timestamp, @@ -942,29 +1127,11 @@ impl BlockTemplate { let mut dh = [0u8; 32]; raw_header.difficulty.to_big_endian(&mut dh); let extend_and_nonce = [0u8; 12]; - blob.extend_from_slice(raw_header_hash.to_vec().as_slice()); blob.extend_from_slice(&extend_and_nonce); blob.extend_from_slice(&dh); - blob - } - pub fn into_block_header(self, nonce: u32, extra: BlockHeaderExtra) -> BlockHeader { - BlockHeader::new( - self.parent_hash, - self.timestamp, - self.number, - self.author, - self.txn_accumulator_root, - self.block_accumulator_root, - self.state_root, - self.gas_used, - self.difficulty, - self.body_hash, - self.chain_id, - nonce, - extra, - ) + blob } } diff --git a/types/src/block/tests.rs b/types/src/block/tests.rs new file mode 100644 index 0000000000..d0f5b82f71 --- /dev/null +++ b/types/src/block/tests.rs @@ -0,0 +1,159 @@ +use super::legacy::{BlockBody, BlockHeader}; +use crate::{ + account_address::AccountAddress, + account_config::CORE_CODE_ADDRESS, + block::{BlockBody as DagBlockBody, BlockHeaderExtra}, +}; +use bcs_ext::Sample; +use starcoin_crypto::hash::PlainCryptoHash; +use starcoin_crypto::{ed25519::genesis_key_pair, HashValue}; +use starcoin_uint::U256; +use starcoin_vm_types::genesis_config::ChainId; +use starcoin_vm_types::transaction::{ + Package, RawUserTransaction, SignedUserTransaction, TransactionPayload, +}; +use std::str::FromStr; + +fn this_header() -> BlockHeader { + let header_id = + HashValue::from_str("0x85d3b70cbe4c0ccc39d28af77214303d21d2dbae32a8cf8cf8f9da50e1fe4e50") + .unwrap(); + let parent_hash = + HashValue::from_str("0x863b7525f5404eae39c0462b572c84eaa23a5fb0728cebfe1924351b7dc54ece") + .unwrap(); + let timestamp = 1703079047026u64; + let number = 15780908u64; + let author = AccountAddress::from_str("0xd9b2d56e8d20a911b2dc5929695f4ec0").unwrap(); + //let author_auth_key = None; + let txn_accumulator_root = + HashValue::from_str("0x610e248024614f5c44bc036001809e14e32aa0b922ba2be625cc0d099d49d373") + .unwrap(); + let block_accumulator_root = + HashValue::from_str("0xcd70b9a4f3bb71d4228f461d13b9ea438dc6c3c26f7df465ea141f5dd5bca063") + .unwrap(); + let state_root = + HashValue::from_str("0xcbcfb2a8bdfd4a4d26ee70068a28f484a819b0220debe5820ff0a5c342f81a83") + .unwrap(); + let gas_used = 0; + let difficulty = U256::from(162878673u64); + let body_hash = + HashValue::from_str("0xc01e0329de6d899348a8ef4bd51db56175b3fa0988e57c3dcec8eaf13a164d97") + .unwrap(); + let chain_id = ChainId::new(1); + let nonce = 83887534u32; + let extra = BlockHeaderExtra::new([205, 193, 0, 0]); + + let header = BlockHeader::new_with_auth_key( + parent_hash, + timestamp, + number, + author, + None, + txn_accumulator_root, + block_accumulator_root, + state_root, + gas_used, + difficulty, + body_hash, + chain_id, + nonce, + extra, + ); + + assert_eq!(header.id.unwrap(), header_id); + header +} + +fn this_signed_txn() -> SignedUserTransaction { + let txn = RawUserTransaction::new_with_default_gas_token( + CORE_CODE_ADDRESS, + 0, + TransactionPayload::Package(Package::sample()), + 0, + 0, + 1, // init to 1 to pass time check + ChainId::test(), + ); + let (genesis_private_key, genesis_public_key) = genesis_key_pair(); + let sign_txn = txn.sign(&genesis_private_key, genesis_public_key).unwrap(); + sign_txn.into_inner() +} + +#[test] +fn verify_body_hash_with_uncles() { + let body_hash = + HashValue::from_str("0x00592ee74f78a848089083febe0621f45d92b70c8f5a0d4b4f6123b6b01a241b") + .unwrap(); + + let body = BlockBody { + transactions: vec![], + uncles: Some(vec![this_header()]), + }; + assert_eq!(body.crypto_hash(), body_hash); + + let dag_body: DagBlockBody = body.clone().into(); + assert_ne!(body_hash, dag_body.crypto_hash()); + + let converted_body: BlockBody = dag_body.into(); + assert_eq!(body.crypto_hash(), converted_body.crypto_hash()); +} + +#[test] +fn verify_empty_body_hash() { + let empty_hash = + HashValue::from_str("0xc01e0329de6d899348a8ef4bd51db56175b3fa0988e57c3dcec8eaf13a164d97") + .unwrap(); + let empty_body = BlockBody { + transactions: vec![], + uncles: None, + }; + assert_eq!(empty_hash, empty_body.crypto_hash()); + + let empty_dag_body: DagBlockBody = empty_body.clone().into(); + assert_eq!(empty_hash, empty_dag_body.crypto_hash()); + + let converted_empty_body: BlockBody = empty_dag_body.into(); + assert_eq!(empty_body.crypto_hash(), converted_empty_body.crypto_hash()); +} + +#[test] +fn verify_zero_uncle_body_hash() { + let empty_hash = + HashValue::from_str("0xc01e0329de6d899348a8ef4bd51db56175b3fa0988e57c3dcec8eaf13a164d97") + .unwrap(); + let body = BlockBody { + transactions: vec![], + uncles: Some(vec![]), + }; + + assert_ne!(empty_hash, body.crypto_hash()); + + let dag_body: DagBlockBody = body.clone().into(); + let converted_body: BlockBody = dag_body.clone().into(); + + assert_eq!(body.crypto_hash(), converted_body.crypto_hash()); + assert_eq!(body.crypto_hash(), dag_body.crypto_hash()); +} + +#[test] +fn verify_empty_uncles_body_hash() { + let body = BlockBody { + transactions: vec![this_signed_txn()], + uncles: None, + }; + + let dag_body: DagBlockBody = body.clone().into(); + let converted_body: BlockBody = dag_body.clone().into(); + + assert_eq!(body.crypto_hash(), converted_body.crypto_hash()); + assert_eq!(body.crypto_hash(), dag_body.crypto_hash()); +} +#[test] +fn verify_body_and_legacybody_hash() { + let legacy_body = crate::block::LegacyBlockBody { + transactions: vec![], + uncles: Some(vec![this_header()]), + }; + let body = crate::block::BlockBody::from(legacy_body.clone()); + assert_ne!(legacy_body.crypto_hash(), body.crypto_hash()); +} diff --git a/types/src/blockhash.rs b/types/src/blockhash.rs new file mode 100644 index 0000000000..5bc90bd78b --- /dev/null +++ b/types/src/blockhash.rs @@ -0,0 +1,71 @@ +use starcoin_crypto::hash::HashValue; +use std::collections::{HashMap, HashSet}; + +pub const BLOCK_VERSION: u16 = 1; + +pub const HASH_LENGTH: usize = HashValue::LENGTH; + +use starcoin_uint::U256; +use std::sync::Arc; + +pub type BlockHashes = Arc>; + +/// `blockhash::NONE` is a hash which is used in rare cases as the `None` block hash +pub const NONE: [u8; HASH_LENGTH] = [0u8; HASH_LENGTH]; + +/// `blockhash::VIRTUAL` is a special hash representing the `virtual` block. +pub const VIRTUAL: [u8; HASH_LENGTH] = [0xff; HASH_LENGTH]; + +/// `blockhash::ORIGIN` is a special hash representing a `virtual genesis` block. +/// It serves as a special local block which all locally-known +/// blocks are in its future. +pub const ORIGIN: [u8; HASH_LENGTH] = [0xfe; HASH_LENGTH]; + +pub trait BlockHashExtensions { + fn is_none(&self) -> bool; + fn is_virtual(&self) -> bool; + fn is_origin(&self) -> bool; +} + +impl BlockHashExtensions for HashValue { + fn is_none(&self) -> bool { + self.eq(&HashValue::new(NONE)) + } + + fn is_virtual(&self) -> bool { + self.eq(&HashValue::new(VIRTUAL)) + } + + fn is_origin(&self) -> bool { + self.eq(&HashValue::new(ORIGIN)) + } +} + +/// Generates a unique block hash for each call to this function. +/// To be used for test purposes only. +pub fn new_unique() -> HashValue { + use std::sync::atomic::{AtomicU64, Ordering}; + static COUNTER: AtomicU64 = AtomicU64::new(1); + let c = COUNTER.fetch_add(1, Ordering::Relaxed); + HashValue::from_u64(c) +} + +pub type BlueWorkType = U256; + +/// The type used to represent the GHOSTDAG K parameter +pub type KType = u16; + +/// Map from Block hash to K type +pub type HashKTypeMap = std::sync::Arc>; + +pub type BlockHashMap = HashMap; + +/// Same as `BlockHashMap` but a `HashSet`. +pub type BlockHashSet = HashSet; + +pub struct ChainPath { + pub added: Vec, + pub removed: Vec, +} + +pub type BlockLevel = u8; diff --git a/types/src/compact_block.rs b/types/src/compact_block.rs index 826b02aa5f..f5b01533d2 100644 --- a/types/src/compact_block.rs +++ b/types/src/compact_block.rs @@ -1,4 +1,4 @@ -use crate::block::{Block, BlockHeader}; +use crate::block::{Block, BlockHeader, LegacyBlockHeader}; use crate::transaction::SignedUserTransaction; use bcs_ext::Sample; use serde::{Deserialize, Serialize}; @@ -12,6 +12,41 @@ pub struct CompactBlock { pub uncles: Option>, } +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] +#[serde(rename = "CompactBlock")] +pub struct LegacyCompactBlock { + pub header: LegacyBlockHeader, + pub short_ids: Vec, + pub prefilled_txn: Vec, + pub uncles: Option>, +} + +impl From for CompactBlock { + fn from(value: LegacyCompactBlock) -> Self { + Self { + header: value.header.into(), + short_ids: value.short_ids, + prefilled_txn: value.prefilled_txn, + uncles: value + .uncles + .map(|u| u.into_iter().map(Into::into).collect()), + } + } +} + +impl From for LegacyCompactBlock { + fn from(value: CompactBlock) -> Self { + Self { + header: value.header.into(), + short_ids: value.short_ids, + prefilled_txn: value.prefilled_txn, + uncles: value + .uncles + .map(|u| u.into_iter().map(Into::into).collect()), + } + } +} + #[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] pub struct PrefilledTxn { pub index: u64, @@ -57,3 +92,60 @@ impl Sample for CompactBlock { Block::sample().into() } } + +#[cfg(test)] +mod tests { + use super::{CompactBlock, LegacyCompactBlock, ShortId}; + use crate::block::BlockHeader; + use bcs_ext::BCSCodec; + use starcoin_crypto::HashValue; + + fn setup_data() -> (LegacyCompactBlock, CompactBlock) { + let header = BlockHeader::random(); + let uncles = vec![BlockHeader::random(), BlockHeader::random()]; + let short_ids = vec![ShortId(HashValue::random()), ShortId(HashValue::random())]; + let legacy = LegacyCompactBlock { + header: header.clone().into(), + short_ids: short_ids.clone(), + prefilled_txn: vec![], + uncles: Some(uncles.iter().cloned().map(Into::into).collect()), + }; + + let block = CompactBlock { + header, + short_ids, + prefilled_txn: vec![], + uncles: Some(uncles), + }; + (legacy, block) + } + + #[test] + fn test_compact_block_converting() { + let (legacy, block) = setup_data(); + + let converted_block: CompactBlock = legacy.clone().into(); + assert_eq!(block, converted_block); + + let converted_legacy: LegacyCompactBlock = block.into(); + assert_eq!(legacy, converted_legacy); + } + + #[test] + fn test_compact_block_encode_decode() { + let (legacy, block) = setup_data(); + + // legacy format -> upgraded format + let legacy_raw = legacy.encode().unwrap(); + let de_legacy = LegacyCompactBlock::decode(&legacy_raw).unwrap(); + assert_eq!(legacy, de_legacy); + assert!(CompactBlock::decode(&legacy_raw).is_err()); + let converted_block: CompactBlock = de_legacy.into(); + assert_eq!(block, converted_block); + + // upgraded format -> legacy format + let converted_legacy: LegacyCompactBlock = block.into(); + let converted_legacy_raw = converted_legacy.encode().unwrap(); + assert_eq!(legacy_raw, converted_legacy_raw); + } +} diff --git a/types/src/consensus_header.rs b/types/src/consensus_header.rs new file mode 100644 index 0000000000..135206378b --- /dev/null +++ b/types/src/consensus_header.rs @@ -0,0 +1,42 @@ +use crate::block::BlockHeader; +use crate::blockhash::BlockLevel; +use crate::U256; +use serde::{Deserialize, Serialize}; +use starcoin_crypto::{HashValue as Hash, HashValue}; +use std::sync::Arc; + +pub trait ConsensusHeader { + fn parents(&self) -> Vec; + fn difficulty(&self) -> U256; + fn hash(&self) -> Hash; + fn timestamp(&self) -> u64; +} + +impl ConsensusHeader for BlockHeader { + fn parents(&self) -> Vec { + self.parents_hash() + .unwrap_or_else(|| vec![self.parent_hash()]) + } + fn difficulty(&self) -> U256 { + self.difficulty() + } + fn hash(&self) -> Hash { + self.id() + } + + fn timestamp(&self) -> u64 { + self.timestamp() + } +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub struct HeaderWithBlockLevel { + pub header: Arc, + pub block_level: BlockLevel, +} + +#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize)] +pub struct CompactHeaderData { + pub timestamp: u64, + pub difficulty: U256, +} diff --git a/types/src/lib.rs b/types/src/lib.rs index ec49aa8bed..4535af6f98 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -104,3 +104,6 @@ pub mod sync_status; pub mod proof { pub use forkable_jellyfish_merkle::proof::SparseMerkleProof; } + +pub mod blockhash; +pub mod consensus_header; diff --git a/types/src/startup_info.rs b/types/src/startup_info.rs index d536020128..3e4c8d3368 100644 --- a/types/src/startup_info.rs +++ b/types/src/startup_info.rs @@ -1,7 +1,7 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::block::{BlockHeader, BlockInfo, BlockNumber}; +use crate::block::{BlockHeader, BlockInfo, BlockNumber, LegacyBlockHeader}; use anyhow::Result; use bcs_ext::{BCSCodec, Sample}; use schemars::JsonSchema; @@ -13,6 +13,8 @@ use starcoin_vm_types::genesis_config::ChainId; use std::convert::{TryFrom, TryInto}; use std::fmt; use std::fmt::Formatter; +use std::hash::Hash; + /// The info of a chain. #[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug)] pub struct ChainInfo { @@ -43,15 +45,15 @@ impl ChainInfo { } pub fn update_status(&mut self, status: ChainStatus) { - self.status = status + self.status = status; } pub fn head(&self) -> &BlockHeader { - self.status.head() + &self.status.head } pub fn total_difficulty(&self) -> U256 { - self.status.total_difficulty() + self.status.info.get_total_difficulty() } pub fn into_inner(self) -> (ChainId, HashValue, ChainStatus) { @@ -95,6 +97,47 @@ pub struct ChainStatus { /// Chain block info pub info: BlockInfo, } +#[derive(Deserialize, Serialize)] +#[serde(rename = "ChainInfo")] +pub struct OldChainInfo { + chain_id: ChainId, + genesis_hash: HashValue, + status: OldChainStatus, +} + +impl From for ChainInfo { + fn from(value: OldChainInfo) -> Self { + Self { + chain_id: value.chain_id, + genesis_hash: value.genesis_hash, + status: value.status.into(), + } + } +} +#[derive(Deserialize, Serialize)] +#[serde(rename = "ChainStatus")] +pub struct OldChainStatus { + pub head: LegacyBlockHeader, + pub info: BlockInfo, +} + +impl From for OldChainStatus { + fn from(value: ChainStatus) -> Self { + Self { + head: value.head.into(), + info: value.info, + } + } +} + +impl From for ChainStatus { + fn from(value: OldChainStatus) -> Self { + Self { + head: value.head.into(), + info: value.info, + } + } +} impl ChainStatus { pub fn new(head: BlockHeader, info: BlockInfo) -> Self { diff --git a/types/src/system_events.rs b/types/src/system_events.rs index 0a84fe1a2d..138a3948c6 100644 --- a/types/src/system_events.rs +++ b/types/src/system_events.rs @@ -10,7 +10,10 @@ use starcoin_crypto::HashValue; use starcoin_vm_types::genesis_config::ConsensusStrategy; use std::sync::Arc; #[derive(Clone, Debug)] -pub struct NewHeadBlock(pub Arc); +pub struct NewHeadBlock { + pub executed_block: Arc, + // pub tips: Option>, +} /// may be uncle block #[derive(Clone, Debug)] diff --git a/types/uint/src/lib.rs b/types/uint/src/lib.rs index 48c8d45f6b..2e3d685772 100644 --- a/types/uint/src/lib.rs +++ b/types/uint/src/lib.rs @@ -7,6 +7,7 @@ use serde::{de, ser, Deserialize, Serialize, Serializer}; use starcoin_crypto::HashValue; use std::convert::TryFrom; +use std::iter::Sum; use uint::*; construct_uint! { pub struct U256(4); @@ -145,7 +146,15 @@ impl Into for U256 { HashValue::new(bytes) } } - +impl Sum for U256 { + fn sum>(iter: I) -> Self { + let mut sum = U256::zero(); + for value in iter { + sum += value; + } + sum + } +} fn to_hex(bytes: &[u8], skip_leading_zero: bool) -> String { let bytes = if skip_leading_zero { let non_zero = bytes.iter().take_while(|b| **b == 0).count(); diff --git a/vm/e2e-tests/src/account_universe/bad_transaction.rs b/vm/e2e-tests/src/account_universe/bad_transaction.rs index c8671adabd..1d3f2849ee 100644 --- a/vm/e2e-tests/src/account_universe/bad_transaction.rs +++ b/vm/e2e-tests/src/account_universe/bad_transaction.rs @@ -124,6 +124,7 @@ pub struct InvalidAuthkeyGen { #[proptest( strategy = "starcoin_crypto::test_utils::uniform_keypair_strategy_with_perturbation(1)" )] + #[allow(dead_code)] new_keypair: KeyPair, } diff --git a/vm/starcoin-transactional-test-harness/Cargo.toml b/vm/starcoin-transactional-test-harness/Cargo.toml index 2800daa0c4..3e07e195f5 100644 --- a/vm/starcoin-transactional-test-harness/Cargo.toml +++ b/vm/starcoin-transactional-test-harness/Cargo.toml @@ -60,6 +60,7 @@ starcoin-types = { workspace = true } starcoin-vm-runtime = { workspace = true } starcoin-vm-types = { workspace = true } stdlib = { workspace = true } +starcoin-dag = { workspace = true } [dev-dependencies] datatest-stable = { workspace = true } diff --git a/vm/starcoin-transactional-test-harness/src/fork_chain.rs b/vm/starcoin-transactional-test-harness/src/fork_chain.rs index 9d0dda112d..0d540295cd 100644 --- a/vm/starcoin-transactional-test-harness/src/fork_chain.rs +++ b/vm/starcoin-transactional-test-harness/src/fork_chain.rs @@ -11,6 +11,7 @@ use starcoin_abi_decoder::decode_txn_payload; use starcoin_accumulator::{node::AccumulatorStoreType, Accumulator, MerkleAccumulator}; use starcoin_config::{BuiltinNetworkID, ChainNetworkID}; use starcoin_crypto::HashValue; +use starcoin_dag::consensusdb::consenses_state::DagStateView; use starcoin_rpc_api::chain::{ChainApi, GetBlockOption}; use starcoin_rpc_api::chain::{ChainApiClient, GetBlocksOption}; use starcoin_rpc_api::types::{ @@ -498,6 +499,11 @@ impl ChainApi for MockChainApi { }; Box::pin(fut.boxed().map_err(map_err)) } + + #[doc = r" Get the state of a dag."] + fn get_dag_state(&self) -> FutureResult { + todo!("not implement yet") + } } fn try_decode_block_txns(state: &dyn StateView, block: &mut BlockView) -> anyhow::Result<()> { diff --git a/vm/starcoin-transactional-test-harness/src/lib.rs b/vm/starcoin-transactional-test-harness/src/lib.rs index 6e023aabfe..1dd9748964 100644 --- a/vm/starcoin-transactional-test-harness/src/lib.rs +++ b/vm/starcoin-transactional-test-harness/src/lib.rs @@ -819,7 +819,7 @@ impl<'a> StarcoinTestAdapter<'a> { let last_blockmeta = self .context .storage - .get_resource::(genesis_address())?; + .get_resource::(genesis_address())?; let height = number .or_else(|| last_blockmeta.as_ref().map(|b| b.number + 1)) @@ -853,7 +853,7 @@ impl<'a> StarcoinTestAdapter<'a> { e })?; - let (parent_hash, timestamp, author, _author_auth_key, _, number, _, _) = + let (parent_hash, timestamp, author, _author_auth_key, _, number, _, _, parents_hash) = new_block_meta.clone().into_inner(); let block_body = BlockBody::new(vec![], None); let block_header = BlockHeader::new( @@ -870,6 +870,7 @@ impl<'a> StarcoinTestAdapter<'a> { self.context.storage.get_chain_id()?, 0, BlockHeaderExtra::new([0u8; 4]), + parents_hash, ); let new_block = Block::new(block_header, block_body); let mut chain = self.context.chain.lock().unwrap(); diff --git a/vm/starcoin-transactional-test-harness/tests/cases/call_api_cmd_halley.move b/vm/starcoin-transactional-test-harness/tests/cases/call_api_cmd_halley.move.disabled similarity index 100% rename from vm/starcoin-transactional-test-harness/tests/cases/call_api_cmd_halley.move rename to vm/starcoin-transactional-test-harness/tests/cases/call_api_cmd_halley.move.disabled diff --git a/vm/stdlib/compiled/13/12-13/stdlib.blob b/vm/stdlib/compiled/13/12-13/stdlib.blob new file mode 100644 index 0000000000..1e9519997a Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib.blob differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/000_BitOperators.mv b/vm/stdlib/compiled/13/12-13/stdlib/000_BitOperators.mv new file mode 100644 index 0000000000..5def61d413 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/000_BitOperators.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/001_Debug.mv b/vm/stdlib/compiled/13/12-13/stdlib/001_Debug.mv new file mode 100644 index 0000000000..06446cdf8f Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/001_Debug.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/002_EmptyScripts.mv b/vm/stdlib/compiled/13/12-13/stdlib/002_EmptyScripts.mv new file mode 100644 index 0000000000..1f874d057c Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/002_EmptyScripts.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/003_FromBCS.mv b/vm/stdlib/compiled/13/12-13/stdlib/003_FromBCS.mv new file mode 100644 index 0000000000..6291eb75bc Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/003_FromBCS.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/004_MintScripts.mv b/vm/stdlib/compiled/13/12-13/stdlib/004_MintScripts.mv new file mode 100644 index 0000000000..e0d82f1a77 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/004_MintScripts.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/005_SIP_2.mv b/vm/stdlib/compiled/13/12-13/stdlib/005_SIP_2.mv new file mode 100644 index 0000000000..b495fd3d33 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/005_SIP_2.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/006_SIP_3.mv b/vm/stdlib/compiled/13/12-13/stdlib/006_SIP_3.mv new file mode 100644 index 0000000000..3885df5848 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/006_SIP_3.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/007_SignedInteger64.mv b/vm/stdlib/compiled/13/12-13/stdlib/007_SignedInteger64.mv new file mode 100644 index 0000000000..e3b62651c1 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/007_SignedInteger64.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/008_Vector.mv b/vm/stdlib/compiled/13/12-13/stdlib/008_Vector.mv new file mode 100644 index 0000000000..fca0c13f9f Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/008_Vector.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/009_Errors.mv b/vm/stdlib/compiled/13/12-13/stdlib/009_Errors.mv new file mode 100644 index 0000000000..8d51430c07 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/009_Errors.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/010_ACL.mv b/vm/stdlib/compiled/13/12-13/stdlib/010_ACL.mv new file mode 100644 index 0000000000..773134f2ee Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/010_ACL.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/011_Signer.mv b/vm/stdlib/compiled/13/12-13/stdlib/011_Signer.mv new file mode 100644 index 0000000000..a84a73d58a Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/011_Signer.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/012_Math.mv b/vm/stdlib/compiled/13/12-13/stdlib/012_Math.mv new file mode 100644 index 0000000000..034dad6d3e Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/012_Math.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/013_Option.mv b/vm/stdlib/compiled/13/12-13/stdlib/013_Option.mv new file mode 100644 index 0000000000..340a58f50f Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/013_Option.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/014_BCS.mv b/vm/stdlib/compiled/13/12-13/stdlib/014_BCS.mv new file mode 100644 index 0000000000..d66fd29767 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/014_BCS.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/015_Event.mv b/vm/stdlib/compiled/13/12-13/stdlib/015_Event.mv new file mode 100644 index 0000000000..a4b3a1b812 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/015_Event.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/016_Token.mv b/vm/stdlib/compiled/13/12-13/stdlib/016_Token.mv new file mode 100644 index 0000000000..4217dbb830 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/016_Token.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/017_CoreAddresses.mv b/vm/stdlib/compiled/13/12-13/stdlib/017_CoreAddresses.mv new file mode 100644 index 0000000000..8977cc4410 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/017_CoreAddresses.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/018_Timestamp.mv b/vm/stdlib/compiled/13/12-13/stdlib/018_Timestamp.mv new file mode 100644 index 0000000000..815d990752 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/018_Timestamp.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/019_Config.mv b/vm/stdlib/compiled/13/12-13/stdlib/019_Config.mv new file mode 100644 index 0000000000..5107abbab8 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/019_Config.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/020_ChainId.mv b/vm/stdlib/compiled/13/12-13/stdlib/020_ChainId.mv new file mode 100644 index 0000000000..fb643fe958 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/020_ChainId.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/021_VMConfig.mv b/vm/stdlib/compiled/13/12-13/stdlib/021_VMConfig.mv new file mode 100644 index 0000000000..d4a4038b71 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/021_VMConfig.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/022_Version.mv b/vm/stdlib/compiled/13/12-13/stdlib/022_Version.mv new file mode 100644 index 0000000000..e08ee09a6e Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/022_Version.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/023_PackageTxnManager.mv b/vm/stdlib/compiled/13/12-13/stdlib/023_PackageTxnManager.mv new file mode 100644 index 0000000000..75e05951e0 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/023_PackageTxnManager.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/024_Treasury.mv b/vm/stdlib/compiled/13/12-13/stdlib/024_Treasury.mv new file mode 100644 index 0000000000..588181223e Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/024_Treasury.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/025_Dao.mv b/vm/stdlib/compiled/13/12-13/stdlib/025_Dao.mv new file mode 100644 index 0000000000..6f6bedf1cb Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/025_Dao.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/026_UpgradeModuleDaoProposal.mv b/vm/stdlib/compiled/13/12-13/stdlib/026_UpgradeModuleDaoProposal.mv new file mode 100644 index 0000000000..083a94d1c5 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/026_UpgradeModuleDaoProposal.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/027_TransactionTimeoutConfig.mv b/vm/stdlib/compiled/13/12-13/stdlib/027_TransactionTimeoutConfig.mv new file mode 100644 index 0000000000..f54deb348e Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/027_TransactionTimeoutConfig.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/028_TransactionPublishOption.mv b/vm/stdlib/compiled/13/12-13/stdlib/028_TransactionPublishOption.mv new file mode 100644 index 0000000000..814bd5aed1 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/028_TransactionPublishOption.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/029_RewardConfig.mv b/vm/stdlib/compiled/13/12-13/stdlib/029_RewardConfig.mv new file mode 100644 index 0000000000..9114706079 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/029_RewardConfig.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/030_OnChainConfigDao.mv b/vm/stdlib/compiled/13/12-13/stdlib/030_OnChainConfigDao.mv new file mode 100644 index 0000000000..cccbe13038 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/030_OnChainConfigDao.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/031_ModifyDaoConfigProposal.mv b/vm/stdlib/compiled/13/12-13/stdlib/031_ModifyDaoConfigProposal.mv new file mode 100644 index 0000000000..390caae299 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/031_ModifyDaoConfigProposal.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/032_ConsensusConfig.mv b/vm/stdlib/compiled/13/12-13/stdlib/032_ConsensusConfig.mv new file mode 100644 index 0000000000..0baf06dc64 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/032_ConsensusConfig.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/033_STC.mv b/vm/stdlib/compiled/13/12-13/stdlib/033_STC.mv new file mode 100644 index 0000000000..b462ed908f Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/033_STC.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/034_TransactionFee.mv b/vm/stdlib/compiled/13/12-13/stdlib/034_TransactionFee.mv new file mode 100644 index 0000000000..f209ddeb58 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/034_TransactionFee.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/035_Hash.mv b/vm/stdlib/compiled/13/12-13/stdlib/035_Hash.mv new file mode 100644 index 0000000000..67a4a8bf03 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/035_Hash.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/036_Authenticator.mv b/vm/stdlib/compiled/13/12-13/stdlib/036_Authenticator.mv new file mode 100644 index 0000000000..c5a74c5072 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/036_Authenticator.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/037_Account.mv b/vm/stdlib/compiled/13/12-13/stdlib/037_Account.mv new file mode 100644 index 0000000000..1a3465588c Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/037_Account.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/038_AccountScripts.mv b/vm/stdlib/compiled/13/12-13/stdlib/038_AccountScripts.mv new file mode 100644 index 0000000000..3ea86f9cde Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/038_AccountScripts.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/039_Arith.mv b/vm/stdlib/compiled/13/12-13/stdlib/039_Arith.mv new file mode 100644 index 0000000000..61d6433fab Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/039_Arith.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/040_Ring.mv b/vm/stdlib/compiled/13/12-13/stdlib/040_Ring.mv new file mode 100644 index 0000000000..870e433ee9 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/040_Ring.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/041_Block.mv b/vm/stdlib/compiled/13/12-13/stdlib/041_Block.mv new file mode 100644 index 0000000000..e72b98b179 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/041_Block.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/042_TreasuryWithdrawDaoProposal.mv b/vm/stdlib/compiled/13/12-13/stdlib/042_TreasuryWithdrawDaoProposal.mv new file mode 100644 index 0000000000..9b09f0d009 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/042_TreasuryWithdrawDaoProposal.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/043_BlockReward.mv b/vm/stdlib/compiled/13/12-13/stdlib/043_BlockReward.mv new file mode 100644 index 0000000000..f2b272971c Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/043_BlockReward.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/044_Collection.mv b/vm/stdlib/compiled/13/12-13/stdlib/044_Collection.mv new file mode 100644 index 0000000000..306c01fc8c Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/044_Collection.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/045_Collection2.mv b/vm/stdlib/compiled/13/12-13/stdlib/045_Collection2.mv new file mode 100644 index 0000000000..ab314639e1 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/045_Collection2.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/046_Compare.mv b/vm/stdlib/compiled/13/12-13/stdlib/046_Compare.mv new file mode 100644 index 0000000000..e3e7ef4a6a Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/046_Compare.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/047_ConsensusStrategy.mv b/vm/stdlib/compiled/13/12-13/stdlib/047_ConsensusStrategy.mv new file mode 100644 index 0000000000..911f9d5d76 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/047_ConsensusStrategy.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/048_DaoVoteScripts.mv b/vm/stdlib/compiled/13/12-13/stdlib/048_DaoVoteScripts.mv new file mode 100644 index 0000000000..1ad7b59e76 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/048_DaoVoteScripts.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/049_DummyToken.mv b/vm/stdlib/compiled/13/12-13/stdlib/049_DummyToken.mv new file mode 100644 index 0000000000..27c9e6f5a2 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/049_DummyToken.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/050_DummyTokenScripts.mv b/vm/stdlib/compiled/13/12-13/stdlib/050_DummyTokenScripts.mv new file mode 100644 index 0000000000..cc64f15f0b Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/050_DummyTokenScripts.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/051_EVMAddress.mv b/vm/stdlib/compiled/13/12-13/stdlib/051_EVMAddress.mv new file mode 100644 index 0000000000..9cb4ecebac Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/051_EVMAddress.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/052_TypeInfo.mv b/vm/stdlib/compiled/13/12-13/stdlib/052_TypeInfo.mv new file mode 100644 index 0000000000..1be6ae9259 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/052_TypeInfo.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/053_GenesisSignerCapability.mv b/vm/stdlib/compiled/13/12-13/stdlib/053_GenesisSignerCapability.mv new file mode 100644 index 0000000000..0b2013c145 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/053_GenesisSignerCapability.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/054_Oracle.mv b/vm/stdlib/compiled/13/12-13/stdlib/054_Oracle.mv new file mode 100644 index 0000000000..348476de27 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/054_Oracle.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/055_PriceOracle.mv b/vm/stdlib/compiled/13/12-13/stdlib/055_PriceOracle.mv new file mode 100644 index 0000000000..b8584e7754 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/055_PriceOracle.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/056_EasyGas.mv b/vm/stdlib/compiled/13/12-13/stdlib/056_EasyGas.mv new file mode 100644 index 0000000000..9194df2cea Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/056_EasyGas.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/057_TransferScripts.mv b/vm/stdlib/compiled/13/12-13/stdlib/057_TransferScripts.mv new file mode 100644 index 0000000000..5a5eaf7940 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/057_TransferScripts.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/058_EasyGasScript.mv b/vm/stdlib/compiled/13/12-13/stdlib/058_EasyGasScript.mv new file mode 100644 index 0000000000..16d8f1836f Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/058_EasyGasScript.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/059_Epoch.mv b/vm/stdlib/compiled/13/12-13/stdlib/059_Epoch.mv new file mode 100644 index 0000000000..2aa602ac28 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/059_Epoch.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/060_EventUtil.mv b/vm/stdlib/compiled/13/12-13/stdlib/060_EventUtil.mv new file mode 100644 index 0000000000..bbbc6f34f8 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/060_EventUtil.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/061_FixedPoint32.mv b/vm/stdlib/compiled/13/12-13/stdlib/061_FixedPoint32.mv new file mode 100644 index 0000000000..2ecc1abb82 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/061_FixedPoint32.mv differ diff --git a/vm/stdlib/compiled/13/12-13/stdlib/062_FlexiDagConfig.mv b/vm/stdlib/compiled/13/12-13/stdlib/062_FlexiDagConfig.mv new file mode 100644 index 0000000000..243f8a06b1 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/062_FlexiDagConfig.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/062_GasSchedule.mv b/vm/stdlib/compiled/13/12-13/stdlib/063_GasSchedule.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/062_GasSchedule.mv rename to vm/stdlib/compiled/13/12-13/stdlib/063_GasSchedule.mv diff --git a/vm/stdlib/compiled/latest/stdlib/063_STCUSDOracle.mv b/vm/stdlib/compiled/13/12-13/stdlib/064_STCUSDOracle.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/063_STCUSDOracle.mv rename to vm/stdlib/compiled/13/12-13/stdlib/064_STCUSDOracle.mv diff --git a/vm/stdlib/compiled/latest/stdlib/064_Offer.mv b/vm/stdlib/compiled/13/12-13/stdlib/065_Offer.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/064_Offer.mv rename to vm/stdlib/compiled/13/12-13/stdlib/065_Offer.mv diff --git a/vm/stdlib/compiled/latest/stdlib/065_NFT.mv b/vm/stdlib/compiled/13/12-13/stdlib/066_NFT.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/065_NFT.mv rename to vm/stdlib/compiled/13/12-13/stdlib/066_NFT.mv diff --git a/vm/stdlib/compiled/latest/stdlib/066_LanguageVersion.mv b/vm/stdlib/compiled/13/12-13/stdlib/067_LanguageVersion.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/066_LanguageVersion.mv rename to vm/stdlib/compiled/13/12-13/stdlib/067_LanguageVersion.mv diff --git a/vm/stdlib/compiled/latest/stdlib/067_MerkleProof.mv b/vm/stdlib/compiled/13/12-13/stdlib/068_MerkleProof.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/067_MerkleProof.mv rename to vm/stdlib/compiled/13/12-13/stdlib/068_MerkleProof.mv diff --git a/vm/stdlib/compiled/latest/stdlib/068_MerkleNFTDistributor.mv b/vm/stdlib/compiled/13/12-13/stdlib/069_MerkleNFTDistributor.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/068_MerkleNFTDistributor.mv rename to vm/stdlib/compiled/13/12-13/stdlib/069_MerkleNFTDistributor.mv diff --git a/vm/stdlib/compiled/latest/stdlib/069_IdentifierNFT.mv b/vm/stdlib/compiled/13/12-13/stdlib/070_IdentifierNFT.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/069_IdentifierNFT.mv rename to vm/stdlib/compiled/13/12-13/stdlib/070_IdentifierNFT.mv diff --git a/vm/stdlib/compiled/latest/stdlib/070_GenesisNFT.mv b/vm/stdlib/compiled/13/12-13/stdlib/071_GenesisNFT.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/070_GenesisNFT.mv rename to vm/stdlib/compiled/13/12-13/stdlib/071_GenesisNFT.mv diff --git a/vm/stdlib/compiled/13/12-13/stdlib/072_StdlibUpgradeScripts.mv b/vm/stdlib/compiled/13/12-13/stdlib/072_StdlibUpgradeScripts.mv new file mode 100644 index 0000000000..cebf13e904 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/072_StdlibUpgradeScripts.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/072_Genesis.mv b/vm/stdlib/compiled/13/12-13/stdlib/073_Genesis.mv similarity index 81% rename from vm/stdlib/compiled/latest/stdlib/072_Genesis.mv rename to vm/stdlib/compiled/13/12-13/stdlib/073_Genesis.mv index 78701566bc..97508566d0 100644 Binary files a/vm/stdlib/compiled/latest/stdlib/072_Genesis.mv and b/vm/stdlib/compiled/13/12-13/stdlib/073_Genesis.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/073_GenesisNFTScripts.mv b/vm/stdlib/compiled/13/12-13/stdlib/074_GenesisNFTScripts.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/073_GenesisNFTScripts.mv rename to vm/stdlib/compiled/13/12-13/stdlib/074_GenesisNFTScripts.mv diff --git a/vm/stdlib/compiled/latest/stdlib/074_IdentifierNFTScripts.mv b/vm/stdlib/compiled/13/12-13/stdlib/075_IdentifierNFTScripts.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/074_IdentifierNFTScripts.mv rename to vm/stdlib/compiled/13/12-13/stdlib/075_IdentifierNFTScripts.mv diff --git a/vm/stdlib/compiled/latest/stdlib/075_MintDaoProposal.mv b/vm/stdlib/compiled/13/12-13/stdlib/076_MintDaoProposal.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/075_MintDaoProposal.mv rename to vm/stdlib/compiled/13/12-13/stdlib/076_MintDaoProposal.mv diff --git a/vm/stdlib/compiled/latest/stdlib/076_ModuleUpgradeScripts.mv b/vm/stdlib/compiled/13/12-13/stdlib/077_ModuleUpgradeScripts.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/076_ModuleUpgradeScripts.mv rename to vm/stdlib/compiled/13/12-13/stdlib/077_ModuleUpgradeScripts.mv diff --git a/vm/stdlib/compiled/latest/stdlib/077_NFTGallery.mv b/vm/stdlib/compiled/13/12-13/stdlib/078_NFTGallery.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/077_NFTGallery.mv rename to vm/stdlib/compiled/13/12-13/stdlib/078_NFTGallery.mv diff --git a/vm/stdlib/compiled/latest/stdlib/078_NFTGalleryScripts.mv b/vm/stdlib/compiled/13/12-13/stdlib/079_NFTGalleryScripts.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/078_NFTGalleryScripts.mv rename to vm/stdlib/compiled/13/12-13/stdlib/079_NFTGalleryScripts.mv diff --git a/vm/stdlib/compiled/13/12-13/stdlib/080_OnChainConfigScripts.mv b/vm/stdlib/compiled/13/12-13/stdlib/080_OnChainConfigScripts.mv new file mode 100644 index 0000000000..8d847cdf25 Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/080_OnChainConfigScripts.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/080_PriceOracleAggregator.mv b/vm/stdlib/compiled/13/12-13/stdlib/081_PriceOracleAggregator.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/080_PriceOracleAggregator.mv rename to vm/stdlib/compiled/13/12-13/stdlib/081_PriceOracleAggregator.mv diff --git a/vm/stdlib/compiled/latest/stdlib/081_PriceOracleScripts.mv b/vm/stdlib/compiled/13/12-13/stdlib/082_PriceOracleScripts.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/081_PriceOracleScripts.mv rename to vm/stdlib/compiled/13/12-13/stdlib/082_PriceOracleScripts.mv diff --git a/vm/stdlib/compiled/latest/stdlib/082_Secp256k1.mv b/vm/stdlib/compiled/13/12-13/stdlib/083_Secp256k1.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/082_Secp256k1.mv rename to vm/stdlib/compiled/13/12-13/stdlib/083_Secp256k1.mv diff --git a/vm/stdlib/compiled/latest/stdlib/083_Signature.mv b/vm/stdlib/compiled/13/12-13/stdlib/084_Signature.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/083_Signature.mv rename to vm/stdlib/compiled/13/12-13/stdlib/084_Signature.mv diff --git a/vm/stdlib/compiled/latest/stdlib/084_SharedEd25519PublicKey.mv b/vm/stdlib/compiled/13/12-13/stdlib/085_SharedEd25519PublicKey.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/084_SharedEd25519PublicKey.mv rename to vm/stdlib/compiled/13/12-13/stdlib/085_SharedEd25519PublicKey.mv diff --git a/vm/stdlib/compiled/latest/stdlib/085_SimpleMap.mv b/vm/stdlib/compiled/13/12-13/stdlib/086_SimpleMap.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/085_SimpleMap.mv rename to vm/stdlib/compiled/13/12-13/stdlib/086_SimpleMap.mv diff --git a/vm/stdlib/compiled/latest/stdlib/086_StructuredHash.mv b/vm/stdlib/compiled/13/12-13/stdlib/087_StructuredHash.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/086_StructuredHash.mv rename to vm/stdlib/compiled/13/12-13/stdlib/087_StructuredHash.mv diff --git a/vm/stdlib/compiled/latest/stdlib/087_StarcoinVerifier.mv b/vm/stdlib/compiled/13/12-13/stdlib/088_StarcoinVerifier.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/087_StarcoinVerifier.mv rename to vm/stdlib/compiled/13/12-13/stdlib/088_StarcoinVerifier.mv diff --git a/vm/stdlib/compiled/latest/stdlib/088_String.mv b/vm/stdlib/compiled/13/12-13/stdlib/089_String.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/088_String.mv rename to vm/stdlib/compiled/13/12-13/stdlib/089_String.mv diff --git a/vm/stdlib/compiled/latest/stdlib/089_Table.mv b/vm/stdlib/compiled/13/12-13/stdlib/090_Table.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/089_Table.mv rename to vm/stdlib/compiled/13/12-13/stdlib/090_Table.mv diff --git a/vm/stdlib/compiled/latest/stdlib/090_TransactionTimeout.mv b/vm/stdlib/compiled/13/12-13/stdlib/091_TransactionTimeout.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/090_TransactionTimeout.mv rename to vm/stdlib/compiled/13/12-13/stdlib/091_TransactionTimeout.mv diff --git a/vm/stdlib/compiled/13/12-13/stdlib/092_TransactionManager.mv b/vm/stdlib/compiled/13/12-13/stdlib/092_TransactionManager.mv new file mode 100644 index 0000000000..799c306a8a Binary files /dev/null and b/vm/stdlib/compiled/13/12-13/stdlib/092_TransactionManager.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/092_TreasuryScripts.mv b/vm/stdlib/compiled/13/12-13/stdlib/093_TreasuryScripts.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/092_TreasuryScripts.mv rename to vm/stdlib/compiled/13/12-13/stdlib/093_TreasuryScripts.mv diff --git a/vm/stdlib/compiled/latest/stdlib/093_U256.mv b/vm/stdlib/compiled/13/12-13/stdlib/094_U256.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/093_U256.mv rename to vm/stdlib/compiled/13/12-13/stdlib/094_U256.mv diff --git a/vm/stdlib/compiled/latest/stdlib/094_YieldFarming.mv b/vm/stdlib/compiled/13/12-13/stdlib/095_YieldFarming.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/094_YieldFarming.mv rename to vm/stdlib/compiled/13/12-13/stdlib/095_YieldFarming.mv diff --git a/vm/stdlib/compiled/latest/stdlib/095_YieldFarmingV2.mv b/vm/stdlib/compiled/13/12-13/stdlib/096_YieldFarmingV2.mv similarity index 100% rename from vm/stdlib/compiled/latest/stdlib/095_YieldFarmingV2.mv rename to vm/stdlib/compiled/13/12-13/stdlib/096_YieldFarmingV2.mv diff --git a/vm/stdlib/compiled/13/stdlib/000_BitOperators.mv b/vm/stdlib/compiled/13/stdlib/000_BitOperators.mv new file mode 100644 index 0000000000..5def61d413 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/000_BitOperators.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/001_Debug.mv b/vm/stdlib/compiled/13/stdlib/001_Debug.mv new file mode 100644 index 0000000000..06446cdf8f Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/001_Debug.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/002_EmptyScripts.mv b/vm/stdlib/compiled/13/stdlib/002_EmptyScripts.mv new file mode 100644 index 0000000000..1f874d057c Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/002_EmptyScripts.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/003_FromBCS.mv b/vm/stdlib/compiled/13/stdlib/003_FromBCS.mv new file mode 100644 index 0000000000..6291eb75bc Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/003_FromBCS.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/004_MintScripts.mv b/vm/stdlib/compiled/13/stdlib/004_MintScripts.mv new file mode 100644 index 0000000000..e0d82f1a77 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/004_MintScripts.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/005_SIP_2.mv b/vm/stdlib/compiled/13/stdlib/005_SIP_2.mv new file mode 100644 index 0000000000..b495fd3d33 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/005_SIP_2.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/006_SIP_3.mv b/vm/stdlib/compiled/13/stdlib/006_SIP_3.mv new file mode 100644 index 0000000000..3885df5848 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/006_SIP_3.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/007_SignedInteger64.mv b/vm/stdlib/compiled/13/stdlib/007_SignedInteger64.mv new file mode 100644 index 0000000000..e3b62651c1 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/007_SignedInteger64.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/008_Vector.mv b/vm/stdlib/compiled/13/stdlib/008_Vector.mv new file mode 100644 index 0000000000..fca0c13f9f Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/008_Vector.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/009_Errors.mv b/vm/stdlib/compiled/13/stdlib/009_Errors.mv new file mode 100644 index 0000000000..8d51430c07 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/009_Errors.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/010_ACL.mv b/vm/stdlib/compiled/13/stdlib/010_ACL.mv new file mode 100644 index 0000000000..773134f2ee Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/010_ACL.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/011_Signer.mv b/vm/stdlib/compiled/13/stdlib/011_Signer.mv new file mode 100644 index 0000000000..a84a73d58a Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/011_Signer.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/012_Math.mv b/vm/stdlib/compiled/13/stdlib/012_Math.mv new file mode 100644 index 0000000000..034dad6d3e Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/012_Math.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/013_Option.mv b/vm/stdlib/compiled/13/stdlib/013_Option.mv new file mode 100644 index 0000000000..340a58f50f Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/013_Option.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/014_BCS.mv b/vm/stdlib/compiled/13/stdlib/014_BCS.mv new file mode 100644 index 0000000000..d66fd29767 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/014_BCS.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/015_Event.mv b/vm/stdlib/compiled/13/stdlib/015_Event.mv new file mode 100644 index 0000000000..a4b3a1b812 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/015_Event.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/016_Token.mv b/vm/stdlib/compiled/13/stdlib/016_Token.mv new file mode 100644 index 0000000000..4217dbb830 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/016_Token.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/017_CoreAddresses.mv b/vm/stdlib/compiled/13/stdlib/017_CoreAddresses.mv new file mode 100644 index 0000000000..8977cc4410 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/017_CoreAddresses.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/018_Timestamp.mv b/vm/stdlib/compiled/13/stdlib/018_Timestamp.mv new file mode 100644 index 0000000000..815d990752 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/018_Timestamp.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/019_Config.mv b/vm/stdlib/compiled/13/stdlib/019_Config.mv new file mode 100644 index 0000000000..5107abbab8 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/019_Config.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/020_ChainId.mv b/vm/stdlib/compiled/13/stdlib/020_ChainId.mv new file mode 100644 index 0000000000..fb643fe958 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/020_ChainId.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/021_VMConfig.mv b/vm/stdlib/compiled/13/stdlib/021_VMConfig.mv new file mode 100644 index 0000000000..d4a4038b71 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/021_VMConfig.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/022_Version.mv b/vm/stdlib/compiled/13/stdlib/022_Version.mv new file mode 100644 index 0000000000..e08ee09a6e Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/022_Version.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/023_PackageTxnManager.mv b/vm/stdlib/compiled/13/stdlib/023_PackageTxnManager.mv new file mode 100644 index 0000000000..75e05951e0 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/023_PackageTxnManager.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/024_Treasury.mv b/vm/stdlib/compiled/13/stdlib/024_Treasury.mv new file mode 100644 index 0000000000..588181223e Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/024_Treasury.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/025_Dao.mv b/vm/stdlib/compiled/13/stdlib/025_Dao.mv new file mode 100644 index 0000000000..6f6bedf1cb Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/025_Dao.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/026_UpgradeModuleDaoProposal.mv b/vm/stdlib/compiled/13/stdlib/026_UpgradeModuleDaoProposal.mv new file mode 100644 index 0000000000..083a94d1c5 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/026_UpgradeModuleDaoProposal.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/027_TransactionTimeoutConfig.mv b/vm/stdlib/compiled/13/stdlib/027_TransactionTimeoutConfig.mv new file mode 100644 index 0000000000..f54deb348e Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/027_TransactionTimeoutConfig.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/028_TransactionPublishOption.mv b/vm/stdlib/compiled/13/stdlib/028_TransactionPublishOption.mv new file mode 100644 index 0000000000..814bd5aed1 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/028_TransactionPublishOption.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/029_RewardConfig.mv b/vm/stdlib/compiled/13/stdlib/029_RewardConfig.mv new file mode 100644 index 0000000000..9114706079 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/029_RewardConfig.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/030_OnChainConfigDao.mv b/vm/stdlib/compiled/13/stdlib/030_OnChainConfigDao.mv new file mode 100644 index 0000000000..cccbe13038 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/030_OnChainConfigDao.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/031_ModifyDaoConfigProposal.mv b/vm/stdlib/compiled/13/stdlib/031_ModifyDaoConfigProposal.mv new file mode 100644 index 0000000000..390caae299 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/031_ModifyDaoConfigProposal.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/032_ConsensusConfig.mv b/vm/stdlib/compiled/13/stdlib/032_ConsensusConfig.mv new file mode 100644 index 0000000000..0baf06dc64 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/032_ConsensusConfig.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/033_STC.mv b/vm/stdlib/compiled/13/stdlib/033_STC.mv new file mode 100644 index 0000000000..b462ed908f Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/033_STC.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/034_TransactionFee.mv b/vm/stdlib/compiled/13/stdlib/034_TransactionFee.mv new file mode 100644 index 0000000000..f209ddeb58 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/034_TransactionFee.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/035_Hash.mv b/vm/stdlib/compiled/13/stdlib/035_Hash.mv new file mode 100644 index 0000000000..67a4a8bf03 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/035_Hash.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/036_Authenticator.mv b/vm/stdlib/compiled/13/stdlib/036_Authenticator.mv new file mode 100644 index 0000000000..c5a74c5072 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/036_Authenticator.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/037_Account.mv b/vm/stdlib/compiled/13/stdlib/037_Account.mv new file mode 100644 index 0000000000..1a3465588c Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/037_Account.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/038_AccountScripts.mv b/vm/stdlib/compiled/13/stdlib/038_AccountScripts.mv new file mode 100644 index 0000000000..3ea86f9cde Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/038_AccountScripts.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/039_Arith.mv b/vm/stdlib/compiled/13/stdlib/039_Arith.mv new file mode 100644 index 0000000000..61d6433fab Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/039_Arith.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/040_Ring.mv b/vm/stdlib/compiled/13/stdlib/040_Ring.mv new file mode 100644 index 0000000000..870e433ee9 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/040_Ring.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/041_Block.mv b/vm/stdlib/compiled/13/stdlib/041_Block.mv new file mode 100644 index 0000000000..e72b98b179 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/041_Block.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/042_TreasuryWithdrawDaoProposal.mv b/vm/stdlib/compiled/13/stdlib/042_TreasuryWithdrawDaoProposal.mv new file mode 100644 index 0000000000..9b09f0d009 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/042_TreasuryWithdrawDaoProposal.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/043_BlockReward.mv b/vm/stdlib/compiled/13/stdlib/043_BlockReward.mv new file mode 100644 index 0000000000..f2b272971c Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/043_BlockReward.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/044_Collection.mv b/vm/stdlib/compiled/13/stdlib/044_Collection.mv new file mode 100644 index 0000000000..306c01fc8c Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/044_Collection.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/045_Collection2.mv b/vm/stdlib/compiled/13/stdlib/045_Collection2.mv new file mode 100644 index 0000000000..ab314639e1 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/045_Collection2.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/046_Compare.mv b/vm/stdlib/compiled/13/stdlib/046_Compare.mv new file mode 100644 index 0000000000..e3e7ef4a6a Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/046_Compare.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/047_ConsensusStrategy.mv b/vm/stdlib/compiled/13/stdlib/047_ConsensusStrategy.mv new file mode 100644 index 0000000000..911f9d5d76 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/047_ConsensusStrategy.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/048_DaoVoteScripts.mv b/vm/stdlib/compiled/13/stdlib/048_DaoVoteScripts.mv new file mode 100644 index 0000000000..1ad7b59e76 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/048_DaoVoteScripts.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/049_DummyToken.mv b/vm/stdlib/compiled/13/stdlib/049_DummyToken.mv new file mode 100644 index 0000000000..27c9e6f5a2 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/049_DummyToken.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/050_DummyTokenScripts.mv b/vm/stdlib/compiled/13/stdlib/050_DummyTokenScripts.mv new file mode 100644 index 0000000000..cc64f15f0b Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/050_DummyTokenScripts.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/051_EVMAddress.mv b/vm/stdlib/compiled/13/stdlib/051_EVMAddress.mv new file mode 100644 index 0000000000..9cb4ecebac Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/051_EVMAddress.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/052_TypeInfo.mv b/vm/stdlib/compiled/13/stdlib/052_TypeInfo.mv new file mode 100644 index 0000000000..1be6ae9259 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/052_TypeInfo.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/053_GenesisSignerCapability.mv b/vm/stdlib/compiled/13/stdlib/053_GenesisSignerCapability.mv new file mode 100644 index 0000000000..0b2013c145 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/053_GenesisSignerCapability.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/054_Oracle.mv b/vm/stdlib/compiled/13/stdlib/054_Oracle.mv new file mode 100644 index 0000000000..348476de27 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/054_Oracle.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/055_PriceOracle.mv b/vm/stdlib/compiled/13/stdlib/055_PriceOracle.mv new file mode 100644 index 0000000000..b8584e7754 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/055_PriceOracle.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/056_EasyGas.mv b/vm/stdlib/compiled/13/stdlib/056_EasyGas.mv new file mode 100644 index 0000000000..9194df2cea Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/056_EasyGas.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/057_TransferScripts.mv b/vm/stdlib/compiled/13/stdlib/057_TransferScripts.mv new file mode 100644 index 0000000000..5a5eaf7940 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/057_TransferScripts.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/058_EasyGasScript.mv b/vm/stdlib/compiled/13/stdlib/058_EasyGasScript.mv new file mode 100644 index 0000000000..16d8f1836f Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/058_EasyGasScript.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/059_Epoch.mv b/vm/stdlib/compiled/13/stdlib/059_Epoch.mv new file mode 100644 index 0000000000..2aa602ac28 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/059_Epoch.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/060_EventUtil.mv b/vm/stdlib/compiled/13/stdlib/060_EventUtil.mv new file mode 100644 index 0000000000..bbbc6f34f8 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/060_EventUtil.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/061_FixedPoint32.mv b/vm/stdlib/compiled/13/stdlib/061_FixedPoint32.mv new file mode 100644 index 0000000000..2ecc1abb82 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/061_FixedPoint32.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/062_FlexiDagConfig.mv b/vm/stdlib/compiled/13/stdlib/062_FlexiDagConfig.mv new file mode 100644 index 0000000000..243f8a06b1 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/062_FlexiDagConfig.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/063_GasSchedule.mv b/vm/stdlib/compiled/13/stdlib/063_GasSchedule.mv new file mode 100644 index 0000000000..a88bc854cb Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/063_GasSchedule.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/064_STCUSDOracle.mv b/vm/stdlib/compiled/13/stdlib/064_STCUSDOracle.mv new file mode 100644 index 0000000000..2a49c59909 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/064_STCUSDOracle.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/065_Offer.mv b/vm/stdlib/compiled/13/stdlib/065_Offer.mv new file mode 100644 index 0000000000..297fc8eb9b Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/065_Offer.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/066_NFT.mv b/vm/stdlib/compiled/13/stdlib/066_NFT.mv new file mode 100644 index 0000000000..cf97fbd3cd Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/066_NFT.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/067_LanguageVersion.mv b/vm/stdlib/compiled/13/stdlib/067_LanguageVersion.mv new file mode 100644 index 0000000000..0c130d7222 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/067_LanguageVersion.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/068_MerkleProof.mv b/vm/stdlib/compiled/13/stdlib/068_MerkleProof.mv new file mode 100644 index 0000000000..27c43e894a Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/068_MerkleProof.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/069_MerkleNFTDistributor.mv b/vm/stdlib/compiled/13/stdlib/069_MerkleNFTDistributor.mv new file mode 100644 index 0000000000..bb8c651a7e Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/069_MerkleNFTDistributor.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/070_IdentifierNFT.mv b/vm/stdlib/compiled/13/stdlib/070_IdentifierNFT.mv new file mode 100644 index 0000000000..44d5f27272 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/070_IdentifierNFT.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/071_GenesisNFT.mv b/vm/stdlib/compiled/13/stdlib/071_GenesisNFT.mv new file mode 100644 index 0000000000..803cfbad57 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/071_GenesisNFT.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/072_StdlibUpgradeScripts.mv b/vm/stdlib/compiled/13/stdlib/072_StdlibUpgradeScripts.mv new file mode 100644 index 0000000000..cebf13e904 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/072_StdlibUpgradeScripts.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/073_Genesis.mv b/vm/stdlib/compiled/13/stdlib/073_Genesis.mv new file mode 100644 index 0000000000..97508566d0 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/073_Genesis.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/074_GenesisNFTScripts.mv b/vm/stdlib/compiled/13/stdlib/074_GenesisNFTScripts.mv new file mode 100644 index 0000000000..fe06059a19 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/074_GenesisNFTScripts.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/075_IdentifierNFTScripts.mv b/vm/stdlib/compiled/13/stdlib/075_IdentifierNFTScripts.mv new file mode 100644 index 0000000000..f495cd7670 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/075_IdentifierNFTScripts.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/076_MintDaoProposal.mv b/vm/stdlib/compiled/13/stdlib/076_MintDaoProposal.mv new file mode 100644 index 0000000000..2f4e07a5c9 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/076_MintDaoProposal.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/077_ModuleUpgradeScripts.mv b/vm/stdlib/compiled/13/stdlib/077_ModuleUpgradeScripts.mv new file mode 100644 index 0000000000..f2d215e295 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/077_ModuleUpgradeScripts.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/078_NFTGallery.mv b/vm/stdlib/compiled/13/stdlib/078_NFTGallery.mv new file mode 100644 index 0000000000..bda70d9e15 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/078_NFTGallery.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/079_NFTGalleryScripts.mv b/vm/stdlib/compiled/13/stdlib/079_NFTGalleryScripts.mv new file mode 100644 index 0000000000..e9736e40d3 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/079_NFTGalleryScripts.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/080_OnChainConfigScripts.mv b/vm/stdlib/compiled/13/stdlib/080_OnChainConfigScripts.mv new file mode 100644 index 0000000000..8d847cdf25 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/080_OnChainConfigScripts.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/081_PriceOracleAggregator.mv b/vm/stdlib/compiled/13/stdlib/081_PriceOracleAggregator.mv new file mode 100644 index 0000000000..51dc465f66 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/081_PriceOracleAggregator.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/082_PriceOracleScripts.mv b/vm/stdlib/compiled/13/stdlib/082_PriceOracleScripts.mv new file mode 100644 index 0000000000..9fc3054e32 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/082_PriceOracleScripts.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/083_Secp256k1.mv b/vm/stdlib/compiled/13/stdlib/083_Secp256k1.mv new file mode 100644 index 0000000000..5f0dd612f4 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/083_Secp256k1.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/084_Signature.mv b/vm/stdlib/compiled/13/stdlib/084_Signature.mv new file mode 100644 index 0000000000..e37f2baf06 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/084_Signature.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/085_SharedEd25519PublicKey.mv b/vm/stdlib/compiled/13/stdlib/085_SharedEd25519PublicKey.mv new file mode 100644 index 0000000000..aa92ddbcab Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/085_SharedEd25519PublicKey.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/086_SimpleMap.mv b/vm/stdlib/compiled/13/stdlib/086_SimpleMap.mv new file mode 100644 index 0000000000..0effeda9fd Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/086_SimpleMap.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/087_StructuredHash.mv b/vm/stdlib/compiled/13/stdlib/087_StructuredHash.mv new file mode 100644 index 0000000000..41c8858866 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/087_StructuredHash.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/088_StarcoinVerifier.mv b/vm/stdlib/compiled/13/stdlib/088_StarcoinVerifier.mv new file mode 100644 index 0000000000..684d44fe58 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/088_StarcoinVerifier.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/089_String.mv b/vm/stdlib/compiled/13/stdlib/089_String.mv new file mode 100644 index 0000000000..4b51f43735 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/089_String.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/090_Table.mv b/vm/stdlib/compiled/13/stdlib/090_Table.mv new file mode 100644 index 0000000000..297bb011c4 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/090_Table.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/091_TransactionTimeout.mv b/vm/stdlib/compiled/13/stdlib/091_TransactionTimeout.mv new file mode 100644 index 0000000000..0e53dcbf46 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/091_TransactionTimeout.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/092_TransactionManager.mv b/vm/stdlib/compiled/13/stdlib/092_TransactionManager.mv new file mode 100644 index 0000000000..799c306a8a Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/092_TransactionManager.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/093_TreasuryScripts.mv b/vm/stdlib/compiled/13/stdlib/093_TreasuryScripts.mv new file mode 100644 index 0000000000..23b7501a57 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/093_TreasuryScripts.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/094_U256.mv b/vm/stdlib/compiled/13/stdlib/094_U256.mv new file mode 100644 index 0000000000..5114e77f7f Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/094_U256.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/095_YieldFarming.mv b/vm/stdlib/compiled/13/stdlib/095_YieldFarming.mv new file mode 100644 index 0000000000..5095009085 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/095_YieldFarming.mv differ diff --git a/vm/stdlib/compiled/13/stdlib/096_YieldFarmingV2.mv b/vm/stdlib/compiled/13/stdlib/096_YieldFarmingV2.mv new file mode 100644 index 0000000000..e0e4f01ae5 Binary files /dev/null and b/vm/stdlib/compiled/13/stdlib/096_YieldFarmingV2.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/059_Epoch.mv b/vm/stdlib/compiled/latest/stdlib/059_Epoch.mv index 2aa602ac28..b896098b81 100644 Binary files a/vm/stdlib/compiled/latest/stdlib/059_Epoch.mv and b/vm/stdlib/compiled/latest/stdlib/059_Epoch.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/062_FlexiDagConfig.mv b/vm/stdlib/compiled/latest/stdlib/062_FlexiDagConfig.mv new file mode 100644 index 0000000000..243f8a06b1 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/062_FlexiDagConfig.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/063_GasSchedule.mv b/vm/stdlib/compiled/latest/stdlib/063_GasSchedule.mv new file mode 100644 index 0000000000..a88bc854cb Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/063_GasSchedule.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/064_STCUSDOracle.mv b/vm/stdlib/compiled/latest/stdlib/064_STCUSDOracle.mv new file mode 100644 index 0000000000..2a49c59909 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/064_STCUSDOracle.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/065_Offer.mv b/vm/stdlib/compiled/latest/stdlib/065_Offer.mv new file mode 100644 index 0000000000..297fc8eb9b Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/065_Offer.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/066_NFT.mv b/vm/stdlib/compiled/latest/stdlib/066_NFT.mv new file mode 100644 index 0000000000..cf97fbd3cd Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/066_NFT.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/067_LanguageVersion.mv b/vm/stdlib/compiled/latest/stdlib/067_LanguageVersion.mv new file mode 100644 index 0000000000..0c130d7222 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/067_LanguageVersion.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/068_MerkleProof.mv b/vm/stdlib/compiled/latest/stdlib/068_MerkleProof.mv new file mode 100644 index 0000000000..27c43e894a Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/068_MerkleProof.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/069_MerkleNFTDistributor.mv b/vm/stdlib/compiled/latest/stdlib/069_MerkleNFTDistributor.mv new file mode 100644 index 0000000000..bb8c651a7e Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/069_MerkleNFTDistributor.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/070_IdentifierNFT.mv b/vm/stdlib/compiled/latest/stdlib/070_IdentifierNFT.mv new file mode 100644 index 0000000000..44d5f27272 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/070_IdentifierNFT.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/071_GenesisNFT.mv b/vm/stdlib/compiled/latest/stdlib/071_GenesisNFT.mv new file mode 100644 index 0000000000..803cfbad57 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/071_GenesisNFT.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/071_StdlibUpgradeScripts.mv b/vm/stdlib/compiled/latest/stdlib/071_StdlibUpgradeScripts.mv deleted file mode 100644 index b53e7f3f3f..0000000000 Binary files a/vm/stdlib/compiled/latest/stdlib/071_StdlibUpgradeScripts.mv and /dev/null differ diff --git a/vm/stdlib/compiled/latest/stdlib/072_StdlibUpgradeScripts.mv b/vm/stdlib/compiled/latest/stdlib/072_StdlibUpgradeScripts.mv new file mode 100644 index 0000000000..cebf13e904 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/072_StdlibUpgradeScripts.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/073_Genesis.mv b/vm/stdlib/compiled/latest/stdlib/073_Genesis.mv new file mode 100644 index 0000000000..97508566d0 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/073_Genesis.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/074_GenesisNFTScripts.mv b/vm/stdlib/compiled/latest/stdlib/074_GenesisNFTScripts.mv new file mode 100644 index 0000000000..fe06059a19 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/074_GenesisNFTScripts.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/075_IdentifierNFTScripts.mv b/vm/stdlib/compiled/latest/stdlib/075_IdentifierNFTScripts.mv new file mode 100644 index 0000000000..f495cd7670 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/075_IdentifierNFTScripts.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/076_MintDaoProposal.mv b/vm/stdlib/compiled/latest/stdlib/076_MintDaoProposal.mv new file mode 100644 index 0000000000..2f4e07a5c9 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/076_MintDaoProposal.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/077_ModuleUpgradeScripts.mv b/vm/stdlib/compiled/latest/stdlib/077_ModuleUpgradeScripts.mv new file mode 100644 index 0000000000..f2d215e295 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/077_ModuleUpgradeScripts.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/078_NFTGallery.mv b/vm/stdlib/compiled/latest/stdlib/078_NFTGallery.mv new file mode 100644 index 0000000000..bda70d9e15 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/078_NFTGallery.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/079_NFTGalleryScripts.mv b/vm/stdlib/compiled/latest/stdlib/079_NFTGalleryScripts.mv new file mode 100644 index 0000000000..e9736e40d3 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/079_NFTGalleryScripts.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/079_OnChainConfigScripts.mv b/vm/stdlib/compiled/latest/stdlib/079_OnChainConfigScripts.mv deleted file mode 100644 index 0f740c0f6d..0000000000 Binary files a/vm/stdlib/compiled/latest/stdlib/079_OnChainConfigScripts.mv and /dev/null differ diff --git a/vm/stdlib/compiled/latest/stdlib/080_OnChainConfigScripts.mv b/vm/stdlib/compiled/latest/stdlib/080_OnChainConfigScripts.mv new file mode 100644 index 0000000000..8d847cdf25 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/080_OnChainConfigScripts.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/081_PriceOracleAggregator.mv b/vm/stdlib/compiled/latest/stdlib/081_PriceOracleAggregator.mv new file mode 100644 index 0000000000..51dc465f66 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/081_PriceOracleAggregator.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/082_PriceOracleScripts.mv b/vm/stdlib/compiled/latest/stdlib/082_PriceOracleScripts.mv new file mode 100644 index 0000000000..9fc3054e32 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/082_PriceOracleScripts.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/083_Secp256k1.mv b/vm/stdlib/compiled/latest/stdlib/083_Secp256k1.mv new file mode 100644 index 0000000000..5f0dd612f4 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/083_Secp256k1.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/084_Signature.mv b/vm/stdlib/compiled/latest/stdlib/084_Signature.mv new file mode 100644 index 0000000000..e37f2baf06 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/084_Signature.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/085_SharedEd25519PublicKey.mv b/vm/stdlib/compiled/latest/stdlib/085_SharedEd25519PublicKey.mv new file mode 100644 index 0000000000..aa92ddbcab Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/085_SharedEd25519PublicKey.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/086_SimpleMap.mv b/vm/stdlib/compiled/latest/stdlib/086_SimpleMap.mv new file mode 100644 index 0000000000..0effeda9fd Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/086_SimpleMap.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/087_StructuredHash.mv b/vm/stdlib/compiled/latest/stdlib/087_StructuredHash.mv new file mode 100644 index 0000000000..41c8858866 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/087_StructuredHash.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/088_StarcoinVerifier.mv b/vm/stdlib/compiled/latest/stdlib/088_StarcoinVerifier.mv new file mode 100644 index 0000000000..684d44fe58 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/088_StarcoinVerifier.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/089_String.mv b/vm/stdlib/compiled/latest/stdlib/089_String.mv new file mode 100644 index 0000000000..4b51f43735 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/089_String.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/090_Table.mv b/vm/stdlib/compiled/latest/stdlib/090_Table.mv new file mode 100644 index 0000000000..297bb011c4 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/090_Table.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/091_TransactionManager.mv b/vm/stdlib/compiled/latest/stdlib/091_TransactionManager.mv deleted file mode 100644 index 178931119d..0000000000 Binary files a/vm/stdlib/compiled/latest/stdlib/091_TransactionManager.mv and /dev/null differ diff --git a/vm/stdlib/compiled/latest/stdlib/091_TransactionTimeout.mv b/vm/stdlib/compiled/latest/stdlib/091_TransactionTimeout.mv new file mode 100644 index 0000000000..0e53dcbf46 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/091_TransactionTimeout.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/092_TransactionManager.mv b/vm/stdlib/compiled/latest/stdlib/092_TransactionManager.mv new file mode 100644 index 0000000000..799c306a8a Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/092_TransactionManager.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/093_TreasuryScripts.mv b/vm/stdlib/compiled/latest/stdlib/093_TreasuryScripts.mv new file mode 100644 index 0000000000..23b7501a57 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/093_TreasuryScripts.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/094_U256.mv b/vm/stdlib/compiled/latest/stdlib/094_U256.mv new file mode 100644 index 0000000000..5114e77f7f Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/094_U256.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/095_YieldFarming.mv b/vm/stdlib/compiled/latest/stdlib/095_YieldFarming.mv new file mode 100644 index 0000000000..5095009085 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/095_YieldFarming.mv differ diff --git a/vm/stdlib/compiled/latest/stdlib/096_YieldFarmingV2.mv b/vm/stdlib/compiled/latest/stdlib/096_YieldFarmingV2.mv new file mode 100644 index 0000000000..e0e4f01ae5 Binary files /dev/null and b/vm/stdlib/compiled/latest/stdlib/096_YieldFarmingV2.mv differ diff --git a/vm/stdlib/tests/package_init_script.rs b/vm/stdlib/tests/package_init_script.rs index 3021071ca3..8c914f23b9 100644 --- a/vm/stdlib/tests/package_init_script.rs +++ b/vm/stdlib/tests/package_init_script.rs @@ -16,6 +16,7 @@ fn test_package_init_function() -> Result<()> { "./compiled/10/9-10/stdlib.blob", "./compiled/11/10-11/stdlib.blob", "./compiled/12/11-12/stdlib.blob", + "./compiled/13/12-13/stdlib.blob", ]; let init_strs = [ @@ -30,8 +31,9 @@ fn test_package_init_function() -> Result<()> { "", "", "0x00000000000000000000000000000001::StdlibUpgradeScripts::upgrade_from_v11_to_v12", + "0x00000000000000000000000000000001::StdlibUpgradeScripts::upgrade_from_v12_to_v13", ]; - for (i, version) in (2..=12).collect::>().into_iter().enumerate() { + for (i, version) in (2..=13).collect::>().into_iter().enumerate() { let package_file = format!("{}/{}-{}/stdlib.blob", version, version - 1, version); let package = COMPILED_MOVE_CODE_DIR .get_file(package_file) diff --git a/vm/types/src/account_config/constants/chain.rs b/vm/types/src/account_config/constants/chain.rs index fe0442c13d..4183b2956a 100644 --- a/vm/types/src/account_config/constants/chain.rs +++ b/vm/types/src/account_config/constants/chain.rs @@ -37,6 +37,8 @@ pub static G_TRANSACTION_MANAGER_MODULE: Lazy = Lazy::new(|| { pub static G_PROLOGUE_NAME: Lazy = Lazy::new(|| Identifier::new("prologue").unwrap()); pub static G_BLOCK_PROLOGUE_NAME: Lazy = Lazy::new(|| Identifier::new("block_prologue").unwrap()); +pub static G_BLOCK_PROLOGUE_V2_NAME: Lazy = + Lazy::new(|| Identifier::new("block_prologue_v2").unwrap()); pub static G_EPILOGUE_NAME: Lazy = Lazy::new(|| Identifier::new("epilogue").unwrap()); pub static G_EPILOGUE_V2_NAME: Lazy = Lazy::new(|| Identifier::new("epilogue_v2").unwrap()); diff --git a/vm/types/src/block_metadata/legacy.rs b/vm/types/src/block_metadata/legacy.rs new file mode 100644 index 0000000000..2c4d1f1e71 --- /dev/null +++ b/vm/types/src/block_metadata/legacy.rs @@ -0,0 +1,102 @@ +use crate::genesis_config::ChainId; +use crate::transaction::authenticator::AuthenticationKey; +use anyhow::anyhow; +use move_core_types::account_address::AccountAddress; +use serde::{Deserialize, Deserializer, Serialize}; +use starcoin_crypto::hash::{CryptoHash, CryptoHasher, PlainCryptoHash}; +use starcoin_crypto::HashValue; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, CryptoHasher, CryptoHash)] +pub struct BlockMetadata { + #[serde(skip)] + pub(super) id: Option, + /// Parent block hash. + pub(super) parent_hash: HashValue, + pub(super) timestamp: u64, + pub(super) author: AccountAddress, + pub(super) author_auth_key: Option, + pub(super) uncles: u64, + pub(super) number: u64, + pub(super) chain_id: ChainId, + pub(super) parent_gas_used: u64, +} + +impl BlockMetadata { + pub fn id(&self) -> HashValue { + self.id.expect("id must be initialized") + } +} + +impl<'de> Deserialize<'de> for BlockMetadata { + fn deserialize(deserializer: D) -> Result>::Error> + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(rename = "BlockMetadata")] + struct BlockMetadataData { + parent_hash: HashValue, + timestamp: u64, + author: AccountAddress, + author_auth_key: Option, + uncles: u64, + number: u64, + chain_id: ChainId, + parent_gas_used: u64, + } + let data = BlockMetadataData::deserialize(deserializer)?; + let mut txn = Self { + id: None, + parent_hash: data.parent_hash, + timestamp: data.timestamp, + author: data.author, + author_auth_key: data.author_auth_key, + uncles: data.uncles, + number: data.number, + chain_id: data.chain_id, + parent_gas_used: data.parent_gas_used, + }; + txn.id = Some(txn.crypto_hash()); + Ok(txn) + } +} + +impl From for super::BlockMetadata { + fn from(value: BlockMetadata) -> Self { + Self { + id: value.id, + parent_hash: value.parent_hash, + timestamp: value.timestamp, + author: value.author, + author_auth_key: value.author_auth_key, + uncles: value.uncles, + number: value.number, + chain_id: value.chain_id, + parent_gas_used: value.parent_gas_used, + parents_hash: None, + } + } +} + +impl TryFrom for BlockMetadata { + type Error = anyhow::Error; + + fn try_from(value: super::BlockMetadata) -> Result { + if value.parents_hash.is_some() { + return Err(anyhow!( + "Can't convert a new BlockMetaData txn with parents_hash to an old one" + )); + } + Ok(Self { + id: value.id, + parent_hash: value.parent_hash, + timestamp: value.timestamp, + author: value.author, + author_auth_key: value.author_auth_key, + uncles: value.uncles, + number: value.number, + chain_id: value.chain_id, + parent_gas_used: value.parent_gas_used, + }) + } +} diff --git a/vm/types/src/block_metadata.rs b/vm/types/src/block_metadata/mod.rs similarity index 71% rename from vm/types/src/block_metadata.rs rename to vm/types/src/block_metadata/mod.rs index 0064ddd9e3..b8a670db9b 100644 --- a/vm/types/src/block_metadata.rs +++ b/vm/types/src/block_metadata/mod.rs @@ -4,11 +4,14 @@ // Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 +mod legacy; + use crate::account_address::AccountAddress; use crate::account_config::genesis_address; use crate::genesis_config::ChainId; use crate::transaction::authenticator::AuthenticationKey; use bcs_ext::Sample; +pub use legacy::BlockMetadata as LegacyBlockMetadata; use serde::{Deserialize, Deserializer, Serialize}; use starcoin_crypto::hash::PlainCryptoHash; use starcoin_crypto::{ @@ -41,6 +44,7 @@ pub struct BlockMetadata { number: u64, chain_id: ChainId, parent_gas_used: u64, + parents_hash: Option>, } impl BlockMetadata { @@ -53,6 +57,32 @@ impl BlockMetadata { number: u64, chain_id: ChainId, parent_gas_used: u64, + ) -> Self { + let mut txn = legacy::BlockMetadata { + id: None, + parent_hash, + timestamp, + author, + author_auth_key, + uncles, + number, + chain_id, + parent_gas_used, + }; + txn.id = Some(txn.crypto_hash()); + txn.into() + } + + pub fn new_with_parents( + parent_hash: HashValue, + timestamp: u64, + author: AccountAddress, + author_auth_key: Option, + uncles: u64, + number: u64, + chain_id: ChainId, + parent_gas_used: u64, + parents_hash: Vec, ) -> Self { let mut txn = Self { id: None, @@ -64,6 +94,7 @@ impl BlockMetadata { number, chain_id, parent_gas_used, + parents_hash: Some(parents_hash), }; txn.id = Some(txn.crypto_hash()); txn @@ -80,6 +111,7 @@ impl BlockMetadata { u64, ChainId, u64, + Option>, ) { ( self.parent_hash, @@ -90,6 +122,7 @@ impl BlockMetadata { self.number, self.chain_id, self.parent_gas_used, + self.parents_hash, ) } @@ -135,24 +168,39 @@ impl<'de> Deserialize<'de> for BlockMetadata { number: u64, chain_id: ChainId, parent_gas_used: u64, + parents_hash: Option>, } let data = BlockMetadataData::deserialize(deserializer)?; - Ok(Self::new( - data.parent_hash, - data.timestamp, - data.author, - data.author_auth_key, - data.uncles, - data.number, - data.chain_id, - data.parent_gas_used, - )) + Ok(if let Some(parents_hash) = data.parents_hash { + Self::new_with_parents( + data.parent_hash, + data.timestamp, + data.author, + data.author_auth_key, + data.uncles, + data.number, + data.chain_id, + data.parent_gas_used, + parents_hash, + ) + } else { + Self::new( + data.parent_hash, + data.timestamp, + data.author, + data.author_auth_key, + data.uncles, + data.number, + data.chain_id, + data.parent_gas_used, + ) + }) } } impl Sample for BlockMetadata { fn sample() -> Self { - Self::new( + Self::new_with_parents( HashValue::zero(), 0, genesis_address(), @@ -161,6 +209,7 @@ impl Sample for BlockMetadata { 0, ChainId::test(), 0, + vec![], ) } } diff --git a/vm/types/src/on_chain_config/flexi_dag_config.rs b/vm/types/src/on_chain_config/flexi_dag_config.rs new file mode 100644 index 0000000000..0ab18b0d0a --- /dev/null +++ b/vm/types/src/on_chain_config/flexi_dag_config.rs @@ -0,0 +1,31 @@ +// Copyright (c) The Starcoin Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::on_chain_config::OnChainConfig; +use move_core_types::identifier::Identifier; +use move_core_types::language_storage::{StructTag, TypeTag, CORE_CODE_ADDRESS}; +use serde::{Deserialize, Serialize}; + +const MV_FLEXI_DAG_CONFIG_MODULE_NAME: &str = "FlexiDagConfig"; +const MV_FLEXI_DAG_CONFIG_STRUCT_NAME: &str = "FlexiDagConfig"; + +#[derive(Clone, Copy, Debug, Deserialize, Serialize, Eq, Hash, PartialEq, PartialOrd, Ord)] +pub struct FlexiDagConfig { + pub effective_height: u64, +} + +impl OnChainConfig for FlexiDagConfig { + const MODULE_IDENTIFIER: &'static str = MV_FLEXI_DAG_CONFIG_MODULE_NAME; + const CONF_IDENTIFIER: &'static str = MV_FLEXI_DAG_CONFIG_STRUCT_NAME; +} + +impl FlexiDagConfig { + pub fn type_tag() -> TypeTag { + TypeTag::Struct(Box::new(StructTag { + address: CORE_CODE_ADDRESS, + module: Identifier::new(MV_FLEXI_DAG_CONFIG_MODULE_NAME).unwrap(), + name: Identifier::new(MV_FLEXI_DAG_CONFIG_STRUCT_NAME).unwrap(), + type_params: vec![], + })) + } +} diff --git a/vm/types/src/on_chain_config/mod.rs b/vm/types/src/on_chain_config/mod.rs index 774525f3b2..334b23a901 100644 --- a/vm/types/src/on_chain_config/mod.rs +++ b/vm/types/src/on_chain_config/mod.rs @@ -18,6 +18,7 @@ use std::{collections::HashMap, sync::Arc}; mod consensus_config; mod dao_config; +mod flexi_dag_config; mod gas_schedule; mod genesis_gas_schedule; mod move_lang_version; @@ -27,6 +28,7 @@ mod vm_config; pub use self::{ consensus_config::{consensus_config_type_tag, ConsensusConfig, G_CONSENSUS_CONFIG_IDENTIFIER}, dao_config::DaoConfig, + flexi_dag_config::*, gas_schedule::{ instruction_gas_schedule_v1, instruction_gas_schedule_v2, native_gas_schedule_v1, native_gas_schedule_v2, native_gas_schedule_v3, native_gas_schedule_v4, diff --git a/vm/types/src/on_chain_resource/block_metadata.rs b/vm/types/src/on_chain_resource/block_metadata.rs index 69bd01c3d7..f35934e143 100644 --- a/vm/types/src/on_chain_resource/block_metadata.rs +++ b/vm/types/src/on_chain_resource/block_metadata.rs @@ -25,3 +25,29 @@ impl MoveResource for BlockMetadata { const MODULE_NAME: &'static str = "Block"; const STRUCT_NAME: &'static str = "BlockMetadata"; } + +/// On chain resource BlockMetadata mapping for FlexiDag block +#[derive(Debug, Serialize, Deserialize)] +pub struct BlockMetadataV2 { + // number of the current block + pub number: u64, + // Hash of the parent block. + pub parent_hash: HashValue, + // Author of the current block. + pub author: AccountAddress, + pub uncles: u64, + pub parents_hash: Vec, + // Handle where events with the time of new blocks are emitted + pub new_block_events: EventHandle, +} + +impl BlockMetadataV2 { + pub fn parents_hash(&self) -> anyhow::Result> { + bcs_ext::from_bytes(self.parents_hash.as_slice()) + } +} + +impl MoveResource for BlockMetadataV2 { + const MODULE_NAME: &'static str = "Block"; + const STRUCT_NAME: &'static str = "BlockMetadataV2"; +} diff --git a/vm/types/src/on_chain_resource/mod.rs b/vm/types/src/on_chain_resource/mod.rs index a537109a76..2bbca7e469 100644 --- a/vm/types/src/on_chain_resource/mod.rs +++ b/vm/types/src/on_chain_resource/mod.rs @@ -8,7 +8,7 @@ mod global_time; pub mod nft; mod treasury; -pub use block_metadata::BlockMetadata; +pub use block_metadata::{BlockMetadata, BlockMetadataV2}; pub use epoch::{Epoch, EpochData, EpochInfo}; pub use global_time::GlobalTimeOnChain; pub use treasury::{LinearWithdrawCapability, Treasury}; diff --git a/vm/types/src/state_view.rs b/vm/types/src/state_view.rs index 09ee20f4e5..64a1d784f7 100644 --- a/vm/types/src/state_view.rs +++ b/vm/types/src/state_view.rs @@ -20,7 +20,7 @@ use crate::{ on_chain_config::{GlobalTimeOnChain, OnChainConfig}, on_chain_resource::{ dao::{Proposal, ProposalAction}, - BlockMetadata, Epoch, EpochData, EpochInfo, Treasury, + BlockMetadata, BlockMetadataV2, Epoch, EpochData, EpochInfo, Treasury, }, sips::SIP, }; @@ -167,6 +167,11 @@ pub trait StateReaderExt: StateView { .ok_or_else(|| format_err!("BlockMetadata resource should exist at genesis address. ")) } + // Get latest BlockMetadataV2 on chain, since stdlib version(13) + fn get_block_metadata_v2(&self) -> Result> { + self.get_resource::(genesis_address()) + } + fn get_code(&self, module_id: ModuleId) -> Result>> { self.get_state_value(&StateKey::AccessPath(AccessPath::from(&module_id))) } diff --git a/vm/types/src/transaction/mod.rs b/vm/types/src/transaction/mod.rs index 5a083a80ec..b0ce1a0900 100644 --- a/vm/types/src/transaction/mod.rs +++ b/vm/types/src/transaction/mod.rs @@ -884,6 +884,32 @@ pub enum Transaction { BlockMetadata(BlockMetadata), } +#[allow(clippy::large_enum_variant)] +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +#[serde(rename = "Transaction")] +pub enum LegacyTransaction { + UserTransaction(SignedUserTransaction), + BlockMetadata(#[serde(rename = "BlockMetadata")] super::block_metadata::LegacyBlockMetadata), +} + +impl LegacyTransaction { + pub fn id(&self) -> HashValue { + match self { + Self::UserTransaction(signed) => signed.id(), + Self::BlockMetadata(meta) => meta.id(), + } + } +} + +impl From for Transaction { + fn from(value: LegacyTransaction) -> Self { + match value { + LegacyTransaction::UserTransaction(txn) => Self::UserTransaction(txn), + LegacyTransaction::BlockMetadata(meta) => Self::BlockMetadata(meta.into()), + } + } +} + impl Transaction { pub fn as_signed_user_txn(&self) -> Result<&SignedUserTransaction> { match self { diff --git a/vm/vm-runtime/src/starcoin_vm.rs b/vm/vm-runtime/src/starcoin_vm.rs index c9402e5f51..6d30541410 100644 --- a/vm/vm-runtime/src/starcoin_vm.rs +++ b/vm/vm-runtime/src/starcoin_vm.rs @@ -12,6 +12,7 @@ use crate::errors::{ use crate::move_vm_ext::{MoveResolverExt, MoveVmExt, SessionId, SessionOutput}; use anyhow::{bail, format_err, Error, Result}; use move_core_types::gas_algebra::{InternalGasPerByte, NumBytes}; +use move_core_types::vm_status::StatusCode::VALUE_SERIALIZATION_ERROR; use move_table_extension::NativeTableContext; use move_vm_runtime::move_vm_adapter::{PublishModuleBundleOption, SessionAdapter}; use move_vm_runtime::session::Session; @@ -50,7 +51,7 @@ use starcoin_vm_types::genesis_config::StdlibVersion; use starcoin_vm_types::identifier::IdentStr; use starcoin_vm_types::language_storage::ModuleId; use starcoin_vm_types::on_chain_config::{ - GasSchedule, MoveLanguageVersion, G_GAS_CONSTANTS_IDENTIFIER, + FlexiDagConfig, GasSchedule, MoveLanguageVersion, G_GAS_CONSTANTS_IDENTIFIER, G_INSTRUCTION_SCHEDULE_IDENTIFIER, G_NATIVE_SCHEDULE_IDENTIFIER, G_VM_CONFIG_IDENTIFIER, }; use starcoin_vm_types::state_store::state_key::StateKey; @@ -87,6 +88,7 @@ pub struct StarcoinVM { native_params: NativeGasParameters, gas_params: Option, gas_schedule: Option, + flexi_dag_config: Option, #[cfg(feature = "metrics")] metrics: Option, } @@ -94,6 +96,7 @@ pub struct StarcoinVM { /// marking of stdlib version which includes vmconfig upgrades. const VMCONFIG_UPGRADE_VERSION_MARK: u64 = 10; const GAS_SCHEDULE_UPGRADE_VERSION_MARK: u64 = 12; +const FLEXI_DAG_UPGRADE_VERSION_MARK: u64 = 13; impl StarcoinVM { #[cfg(feature = "metrics")] @@ -110,6 +113,7 @@ impl StarcoinVM { native_params, gas_params: Some(gas_params), gas_schedule: None, + flexi_dag_config: None, metrics, } } @@ -127,6 +131,7 @@ impl StarcoinVM { native_params, gas_params: Some(gas_params), gas_schedule: None, + flexi_dag_config: None, } } @@ -271,6 +276,13 @@ impl StarcoinVM { let gas_schedule = GasSchedule::fetch_config(&remote_storage)?; (gas_schedule, "gas schedule from GasSchedule") }; + if stdlib_version >= StdlibVersion::Version(FLEXI_DAG_UPGRADE_VERSION_MARK) { + self.flexi_dag_config = FlexiDagConfig::fetch_config(&remote_storage)?; + debug!( + "stdlib version: {}, fetch flexi_dag_config {:?} from FlexiDagConfig module", + stdlib_version, self.flexi_dag_config, + ); + } #[cfg(feature = "print_gas_info")] match self.gas_schedule.as_ref() { None => { @@ -284,6 +296,11 @@ impl StarcoinVM { Ok(()) } + pub fn get_flexidag_config(&self) -> Result { + self.flexi_dag_config + .ok_or(VMStatus::Error(StatusCode::VM_STARTUP_FAILURE)) + } + pub fn get_gas_schedule(&self) -> Result<&CostTable, VMStatus> { self.vm_config .as_ref() @@ -516,11 +533,15 @@ impl StarcoinVM { package_address: AccountAddress, ) -> Result { let chain_id = remote_cache.get_chain_id()?; - let block_meta = remote_cache.get_block_metadata()?; + let block_number = if let Some(v2) = remote_cache.get_block_metadata_v2()? { + v2.number + } else { + remote_cache.get_block_metadata()?.number + }; // from mainnet after 8015088 and barnard after 8311392, we disable enforce upgrade if package_address == genesis_address() - || (chain_id.is_main() && block_meta.number < 8015088) - || (chain_id.is_barnard() && block_meta.number < 8311392) + || (chain_id.is_main() && block_number < 8015088) + || (chain_id.is_barnard() && block_number < 8311392) { let two_phase_upgrade_v2_path = access_path_for_two_phase_upgrade_v2(package_address); if let Some(data) = @@ -854,6 +875,7 @@ impl StarcoinVM { ) -> Result { #[cfg(testing)] info!("process_block_meta begin"); + let stdlib_version = self.version.clone().map(|v| v.into_stdlib_version()); let txn_sender = account_config::genesis_address(); // always use 0 gas for system. let max_gas_amount: Gas = 0.into(); @@ -871,8 +893,10 @@ impl StarcoinVM { number, chain_id, parent_gas_used, + parents_hash, ) = block_metadata.into_inner(); - let args = serialize_values(&vec![ + let mut function_name = &account_config::G_BLOCK_PROLOGUE_NAME; + let mut args_vec = vec![ MoveValue::Signer(txn_sender), MoveValue::vector_u8(parent_id.to_vec()), MoveValue::U64(timestamp), @@ -885,13 +909,23 @@ impl StarcoinVM { MoveValue::U64(number), MoveValue::U8(chain_id.id()), MoveValue::U64(parent_gas_used), - ]); + ]; + if let Some(version) = stdlib_version { + if version >= StdlibVersion::Version(FLEXI_DAG_UPGRADE_VERSION_MARK) { + args_vec.push(MoveValue::vector_u8( + bcs_ext::to_bytes(&parents_hash.unwrap_or_default()) + .or(Err(VMStatus::Error(VALUE_SERIALIZATION_ERROR)))?, + )); + function_name = &account_config::G_BLOCK_PROLOGUE_V2_NAME; + } + } + let args = serialize_values(&args_vec); let mut session: SessionAdapter<_> = self.move_vm.new_session(storage, session_id).into(); session .as_mut() .execute_function_bypass_visibility( &account_config::G_TRANSACTION_MANAGER_MODULE, - &account_config::G_BLOCK_PROLOGUE_NAME, + function_name, vec![], args, &mut gas_meter,