diff --git a/Cargo.lock b/Cargo.lock index 535327bd7d..cfd552cd93 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -377,6 +377,16 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote 1.0.28", + "syn 1.0.107", +] + [[package]] name = "async-channel" version = "1.8.0" @@ -428,6 +438,7 @@ dependencies = [ "blocking", "futures-lite", "once_cell", + "tokio", ] [[package]] @@ -466,6 +477,7 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" dependencies = [ + "async-attributes", "async-channel", "async-global-executor", "async-io", @@ -2303,7 +2315,7 @@ checksum = "850878694b7933ca4c9569d30a34b55031b9b139ee1fc7b94a527c4ef960d690" [[package]] name = "diem-crypto" version = "0.0.3" -source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=a742ddc0674022800341182cbb4c3681807b2f00#a742ddc0674022800341182cbb4c3681807b2f00" +source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=8d41c280a227594ca0a2b6ecba580643518274ea#8d41c280a227594ca0a2b6ecba580643518274ea" dependencies = [ "aes-gcm 0.8.0", "anyhow", @@ -2338,7 +2350,7 @@ dependencies = [ [[package]] name = "diem-crypto-derive" version = "0.0.3" -source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=a742ddc0674022800341182cbb4c3681807b2f00#a742ddc0674022800341182cbb4c3681807b2f00" +source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=8d41c280a227594ca0a2b6ecba580643518274ea#8d41c280a227594ca0a2b6ecba580643518274ea" dependencies = [ "proc-macro2 1.0.59", "quote 1.0.28", @@ -9253,6 +9265,7 @@ name = "starcoin-chain" version = "1.13.7" dependencies = [ "anyhow", + "async-std", "bcs-ext", "clap 3.2.23", "proptest", @@ -9270,6 +9283,7 @@ dependencies = [ "starcoin-executor", "starcoin-genesis", "starcoin-logger", + "starcoin-network-rpc-api", "starcoin-open-block", "starcoin-resource-viewer", "starcoin-service-registry", @@ -9299,7 +9313,9 @@ dependencies = [ "rand_core 0.6.4", "serde 1.0.152", "starcoin-accumulator", + "starcoin-config", "starcoin-crypto", + "starcoin-network-rpc-api", "starcoin-service-registry", "starcoin-state-api", "starcoin-statedb", @@ -9356,16 +9372,21 @@ name = "starcoin-chain-service" version = "1.13.7" dependencies = [ "anyhow", + "async-std", "async-trait", "futures 0.3.26", "rand 0.8.5", "rand_core 0.6.4", "serde 1.0.152", + "starcoin-accumulator", "starcoin-chain", "starcoin-chain-api", "starcoin-config", + "starcoin-consensus", "starcoin-crypto", + "starcoin-flexidag", "starcoin-logger", + "starcoin-network-rpc-api", "starcoin-service-registry", "starcoin-state-api", "starcoin-storage", @@ -9481,32 +9502,42 @@ name = "starcoin-consensus" version = "1.13.7" dependencies = [ "anyhow", + "bcs-ext", + "bincode", "byteorder", "cryptonight-rs", "futures 0.3.26", "hex", + "itertools", "once_cell", + "parking_lot 0.12.1", "proptest", "proptest-derive", "rand 0.8.5", "rand_core 0.6.4", + "rocksdb", "rust-argon2", + "serde 1.0.152", "sha3", + "starcoin-accumulator", "starcoin-chain-api", + "starcoin-config", "starcoin-crypto", "starcoin-logger", "starcoin-state-api", + "starcoin-storage", "starcoin-time-service", "starcoin-types", "starcoin-vm-types", "stest", + "tempfile", "thiserror", ] [[package]] name = "starcoin-crypto" version = "1.10.0-rc.2" -source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=a742ddc0674022800341182cbb4c3681807b2f00#a742ddc0674022800341182cbb4c3681807b2f00" +source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=8d41c280a227594ca0a2b6ecba580643518274ea#8d41c280a227594ca0a2b6ecba580643518274ea" dependencies = [ "anyhow", "bcs", @@ -9525,7 +9556,7 @@ dependencies = [ [[package]] name = "starcoin-crypto-macro" version = "1.10.0-rc.2" -source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=a742ddc0674022800341182cbb4c3681807b2f00#a742ddc0674022800341182cbb4c3681807b2f00" +source = "git+https://github.com/starcoinorg/starcoin-crypto?rev=8d41c280a227594ca0a2b6ecba580643518274ea#8d41c280a227594ca0a2b6ecba580643518274ea" dependencies = [ "proc-macro2 1.0.59", "quote 1.0.28", @@ -9666,6 +9697,25 @@ dependencies = [ "tokio-executor 0.2.0-alpha.6", ] +[[package]] +name = "starcoin-flexidag" +version = "1.13.7" +dependencies = [ + "anyhow", + "async-trait", + "futures 0.3.26", + "starcoin-accumulator", + "starcoin-config", + "starcoin-consensus", + "starcoin-crypto", + "starcoin-logger", + "starcoin-service-registry", + "starcoin-storage", + "starcoin-types", + "thiserror", + "tokio", +] + [[package]] name = "starcoin-framework" version = "11.0.0" @@ -9856,6 +9906,7 @@ name = "starcoin-miner" version = "1.13.7" dependencies = [ "anyhow", + "async-std", "bcs-ext", "futures 0.3.26", "futures-timer", @@ -9871,6 +9922,7 @@ dependencies = [ "starcoin-consensus", "starcoin-crypto", "starcoin-executor", + "starcoin-flexidag", "starcoin-genesis", "starcoin-logger", "starcoin-metrics", @@ -10178,6 +10230,7 @@ dependencies = [ "serde_json", "starcoin-account-api", "starcoin-account-service", + "starcoin-accumulator", "starcoin-block-relayer", "starcoin-chain-notify", "starcoin-chain-service", @@ -10717,6 +10770,7 @@ dependencies = [ "starcoin-consensus", "starcoin-crypto", "starcoin-executor", + "starcoin-flexidag", "starcoin-genesis", "starcoin-logger", "starcoin-metrics", @@ -10744,6 +10798,7 @@ dependencies = [ "sysinfo", "test-helper", "thiserror", + "timeout-join-handler", "tokio", ] diff --git a/Cargo.toml b/Cargo.toml index d8cee9cb41..c1e537d11b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -111,6 +111,7 @@ members = [ "cmd/miner_client/api", "cmd/db-exporter", "cmd/genesis-nft-miner", + "flexidag", ] default-members = [ @@ -217,6 +218,7 @@ default-members = [ "stratum", "cmd/miner_client/api", "cmd/db-exporter", + "flexidag", ] [profile.dev] @@ -246,7 +248,7 @@ api-limiter = { path = "commons/api-limiter" } arc-swap = "1.5.1" arrayref = "0.3" ascii = "1.0.0" -async-std = "1.12" +async-std = { version = "1.12", features = ["attributes", "tokio1"] } async-trait = "0.1.53" asynchronous-codec = "0.5" atomic-counter = "1.0.1" @@ -257,6 +259,9 @@ bcs-ext = { path = "commons/bcs_ext" } bech32 = "0.9" bencher = "0.1.5" bitflags = "1.3.2" +faster-hex = "0.6" +indexmap = "1.9.1" +bincode = { version = "1", default-features = false } bs58 = "0.3.1" byteorder = "1.3.4" bytes = "1" @@ -438,7 +443,7 @@ starcoin-chain-service = { path = "chain/service" } starcoin-cmd = { path = "cmd/starcoin" } starcoin-config = { path = "config" } starcoin-consensus = { path = "consensus" } -starcoin-crypto = { git = "https://github.com/starcoinorg/starcoin-crypto", rev = "a742ddc0674022800341182cbb4c3681807b2f00" } +starcoin-crypto = { git = "https://github.com/starcoinorg/starcoin-crypto", rev = "8d41c280a227594ca0a2b6ecba580643518274ea" } starcoin-decrypt = { path = "commons/decrypt" } starcoin-dev = { path = "vm/dev" } starcoin-executor = { path = "executor" } @@ -496,6 +501,7 @@ starcoin-parallel-executor = { path = "vm/parallel-executor" } starcoin-transaction-benchmarks = { path = "vm/transaction-benchmarks" } starcoin-language-e2e-tests = { path = "vm/e2e-tests" } starcoin-proptest-helpers = { path = "vm/proptest-helpers" } +starcoin-flexidag = { path = "flexidag" } syn = { version = "1.0.107", features = [ "full", diff --git a/account/src/account_test.rs b/account/src/account_test.rs index bba50ab6cb..5e36ea2528 100644 --- a/account/src/account_test.rs +++ b/account/src/account_test.rs @@ -224,7 +224,7 @@ pub fn test_wallet_account() -> Result<()> { ); //println!("verify result is {:?}", sign.verify(&raw_txn, &public_key)?); println!("public key is {:?}", public_key.to_bytes().as_ref()); - println!("hash value is {:?}", hash_value.as_ref()); + println!("hash value is {:?}", hash_value); println!("key is {:?}", key.derived_address()); println!("address is {:?},result is {:?}", address, result); diff --git a/benchmarks/src/chain.rs b/benchmarks/src/chain.rs index ede8471734..da52a5043d 100644 --- a/benchmarks/src/chain.rs +++ b/benchmarks/src/chain.rs @@ -46,8 +46,14 @@ impl ChainBencher { Genesis::init_and_check_storage(&net, storage.clone(), temp_path.path()) .expect("init storage by genesis fail."); - let chain = BlockChain::new(net.time_service(), chain_info.head().id(), storage, None) - .expect("create block chain should success."); + let chain = BlockChain::new( + net.time_service(), + chain_info.head().id(), + storage, + net.id().clone(), + None, + ) + .expect("create block chain should success."); let miner_account = AccountInfo::random(); ChainBencher { diff --git a/block-relayer/src/block_relayer.rs b/block-relayer/src/block_relayer.rs index d8d791051c..33baf7b34d 100644 --- a/block-relayer/src/block_relayer.rs +++ b/block-relayer/src/block_relayer.rs @@ -78,6 +78,7 @@ impl BlockRelayer { &self, network: NetworkServiceRef, executed_block: Arc, + tips_hash: Option>, ) { if !self.is_nearly_synced() { debug!("[block-relay] Ignore NewHeadBlock event because the node has not been synchronized yet."); @@ -85,7 +86,7 @@ impl BlockRelayer { } let compact_block = executed_block.block().clone().into(); let compact_block_msg = - CompactBlockMessage::new(compact_block, executed_block.block_info.clone()); + CompactBlockMessage::new(compact_block, executed_block.block_info.clone(), tips_hash); network.broadcast(NotificationMessage::CompactBlock(Box::new( compact_block_msg, ))); @@ -203,7 +204,9 @@ impl BlockRelayer { ctx: &mut ServiceContext, ) -> Result<()> { let network = ctx.get_shared::()?; - let block_connector_service = ctx.service_ref::()?.clone(); + let block_connector_service = ctx + .service_ref::>()? + .clone(); let txpool = self.txpool.clone(); let metrics = self.metrics.clone(); let fut = async move { @@ -286,7 +289,7 @@ impl EventHandler for BlockRelayer { return; } }; - self.broadcast_compact_block(network, event.0); + self.broadcast_compact_block(network, event.0, event.1); } } @@ -303,7 +306,7 @@ impl EventHandler for BlockRelayer { return; } }; - self.broadcast_compact_block(network, event.0); + self.broadcast_compact_block(network, event.0, event.1); } } diff --git a/chain/Cargo.toml b/chain/Cargo.toml index 44d63cc92c..5a01426439 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -23,6 +23,8 @@ starcoin-types = { package = "starcoin-types", workspace = true } starcoin-vm-types = { workspace = true } starcoin-storage = { workspace = true } thiserror = { workspace = true } +starcoin-network-rpc-api = { workspace = true } +async-std = { workspace = true } [dev-dependencies] proptest = { workspace = true } @@ -39,6 +41,7 @@ stdlib = { workspace = true } stest = { workspace = true } test-helper = { workspace = true } tokio = { features = ["full"], workspace = true } +starcoin-network-rpc-api = { workspace = true } [features] default = [] diff --git a/chain/api/Cargo.toml b/chain/api/Cargo.toml index 6b6b855e79..094c6edcb8 100644 --- a/chain/api/Cargo.toml +++ b/chain/api/Cargo.toml @@ -15,7 +15,8 @@ starcoin-time-service = { workspace = true } starcoin-types = { workspace = true } starcoin-vm-types = { workspace = true } thiserror = { workspace = true } - +starcoin-network-rpc-api = { workspace = true } +starcoin-config = { workspace = true } [dev-dependencies] diff --git a/chain/api/src/chain.rs b/chain/api/src/chain.rs index 93884610e2..7943c64919 100644 --- a/chain/api/src/chain.rs +++ b/chain/api/src/chain.rs @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2 use anyhow::Result; +use starcoin_accumulator::accumulator_info::AccumulatorInfo; +use starcoin_config::ChainNetworkID; use starcoin_crypto::HashValue; use starcoin_state_api::ChainStateReader; use starcoin_statedb::ChainStateDB; @@ -80,7 +82,11 @@ pub trait ChainReader { /// Verify block header and body, base current chain, but do not verify it execute state. fn verify(&self, block: Block) -> Result; /// Execute block and verify it execute state, and save result base current chain, but do not change current chain. - fn execute(&self, block: VerifiedBlock) -> Result; + fn execute( + &self, + block: VerifiedBlock, + transaction_parent: Option, + ) -> Result; /// Get chain transaction infos fn get_transaction_infos( &self, @@ -100,6 +106,8 @@ pub trait ChainReader { event_index: Option, access_path: Option, ) -> Result>; + + fn net_id(&self) -> ChainNetworkID; } pub trait ChainWriter { diff --git a/chain/api/src/errors.rs b/chain/api/src/errors.rs index 777cb19e7c..0fccef901c 100644 --- a/chain/api/src/errors.rs +++ b/chain/api/src/errors.rs @@ -63,6 +63,10 @@ pub enum ConnectBlockError { VerifyBlockFailed(VerifyBlockField, Error), #[error("Barnard hard fork block: {:?} ", .0.header())] BarnardHardFork(Box), + #[error("dag block before time window: {:?} ", .0.header())] + DagBlockBeforeTimeWindow(Box), + #[error("dag block after time window: {:?} ", .0.header())] + DagBlockAfterTimeWindow(Box), } impl ConnectBlockError { @@ -74,6 +78,10 @@ impl ConnectBlockError { ReputationChange::new_fatal("VerifyBlockFailed"); pub const REP_BARNARD_HARD_FORK: ReputationChange = ReputationChange::new_fatal("BarnardHardFork"); + pub const REP_BLOCK_BEFORE_TIME_WINDOW: ReputationChange = + ReputationChange::new_fatal("DagBlockBeforeTimeWindow"); + pub const REP_BLOCK_AFTER_TIME_WINDOW: ReputationChange = + ReputationChange::new_fatal("DagBlockAfterTimeWindow"); pub fn reason(&self) -> &str { match self { @@ -81,6 +89,8 @@ impl ConnectBlockError { ConnectBlockError::ParentNotExist(_) => "ParentNotExist", ConnectBlockError::VerifyBlockFailed(_, _) => "VerifyBlockFailed", ConnectBlockError::BarnardHardFork(_) => "BarnardHardFork", + ConnectBlockError::DagBlockBeforeTimeWindow(_) => "DagBlockBeforeTimeWindow", + ConnectBlockError::DagBlockAfterTimeWindow(_) => "DagBlockAfterTimeWindow", } } @@ -92,6 +102,12 @@ impl ConnectBlockError { ConnectBlockError::REP_VERIFY_BLOCK_FAILED } ConnectBlockError::BarnardHardFork(_) => ConnectBlockError::REP_BARNARD_HARD_FORK, + ConnectBlockError::DagBlockBeforeTimeWindow(_) => { + ConnectBlockError::REP_BLOCK_BEFORE_TIME_WINDOW + } + ConnectBlockError::DagBlockAfterTimeWindow(_) => { + ConnectBlockError::REP_BLOCK_AFTER_TIME_WINDOW + } } } } diff --git a/chain/api/src/message.rs b/chain/api/src/message.rs index d4144fe9a0..d023481215 100644 --- a/chain/api/src/message.rs +++ b/chain/api/src/message.rs @@ -4,6 +4,9 @@ use crate::TransactionInfoWithProof; use anyhow::Result; use starcoin_crypto::HashValue; +use starcoin_network_rpc_api::dag_protocol::{ + TargetDagAccumulatorLeaf, TargetDagAccumulatorLeafDetail, +}; use starcoin_service_registry::ServiceRequest; use starcoin_types::transaction::RichTransactionInfo; use starcoin_types::{ @@ -60,6 +63,14 @@ pub enum ChainRequest { access_path: Option, }, GetBlockInfos(Vec), + GetDagAccumulatorLeaves { + start_index: u64, + batch_size: u64, + }, + GetTargetDagAccumulatorLeafDetail { + leaf_index: u64, + batch_size: u64, + }, } impl ServiceRequest for ChainRequest { @@ -79,7 +90,7 @@ pub enum ChainResponse { Transaction(Box), TransactionOption(Option>), BlockVec(Vec), - BlockOptionVec(Vec>), + BlockOptionVec(Vec>, Option)>>), BlockHeaderVec(Vec>), TransactionInfos(Vec), TransactionInfo(Option), @@ -88,4 +99,6 @@ pub enum ChainResponse { HashVec(Vec), TransactionProof(Box>), BlockInfoVec(Box>>), + TargetDagAccumulatorLeaf(Vec), + TargetDagAccumulatorLeafDetail(Vec), } diff --git a/chain/api/src/service.rs b/chain/api/src/service.rs index 8ba6adce0e..c2e3de8a2f 100644 --- a/chain/api/src/service.rs +++ b/chain/api/src/service.rs @@ -1,11 +1,17 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2 +use std::sync::{Arc, Mutex}; + use crate::message::{ChainRequest, ChainResponse}; use crate::TransactionInfoWithProof; use anyhow::{bail, Result}; use starcoin_crypto::HashValue; -use starcoin_service_registry::{ActorService, ServiceHandler, ServiceRef}; +use starcoin_network_rpc_api::dag_protocol::{ + self, GetDagAccumulatorLeaves, GetTargetDagAccumulatorLeafDetail, TargetDagAccumulatorLeaf, + TargetDagAccumulatorLeafDetail, +}; +use starcoin_service_registry::{ActorService, ServiceContext, ServiceHandler, ServiceRef}; use starcoin_types::contract_event::{ContractEvent, ContractEventInfo}; use starcoin_types::filter::Filter; use starcoin_types::startup_info::ChainStatus; @@ -20,7 +26,10 @@ use starcoin_vm_types::access_path::AccessPath; pub trait ReadableChainService { fn get_header_by_hash(&self, hash: HashValue) -> Result>; fn get_block_by_hash(&self, hash: HashValue) -> Result>; - fn get_blocks(&self, ids: Vec) -> Result>>; + fn get_blocks( + &self, + ids: Vec, + ) -> Result>, Option)>>>; fn get_headers(&self, ids: Vec) -> Result>>; fn get_block_info_by_hash(&self, hash: HashValue) -> Result>; fn get_transaction(&self, hash: HashValue) -> Result>; @@ -72,6 +81,14 @@ pub trait ReadableChainService { ) -> Result>; fn get_block_infos(&self, ids: Vec) -> Result>>; + fn get_dag_accumulator_leaves( + &self, + req: GetDagAccumulatorLeaves, + ) -> anyhow::Result>; + fn get_target_dag_accumulator_leaf_detail( + &self, + req: GetTargetDagAccumulatorLeafDetail, + ) -> anyhow::Result>; } /// Writeable block chain service trait @@ -85,7 +102,10 @@ pub trait ChainAsyncService: { async fn get_header_by_hash(&self, hash: &HashValue) -> Result>; async fn get_block_by_hash(&self, hash: HashValue) -> Result>; - async fn get_blocks(&self, hashes: Vec) -> Result>>; + async fn get_blocks( + &self, + hashes: Vec, + ) -> Result>, Option)>>>; async fn get_headers(&self, hashes: Vec) -> Result>>; async fn get_block_info_by_hash(&self, hash: &HashValue) -> Result>; async fn get_block_info_by_number(&self, number: u64) -> Result>; @@ -139,6 +159,14 @@ pub trait ChainAsyncService: ) -> Result>; async fn get_block_infos(&self, hashes: Vec) -> Result>>; + async fn get_dag_accumulator_leaves( + &self, + req: dag_protocol::GetDagAccumulatorLeaves, + ) -> Result>; + async fn get_dag_accumulator_leaves_detail( + &self, + req: dag_protocol::GetTargetDagAccumulatorLeafDetail, + ) -> Result>>; } #[async_trait::async_trait] @@ -170,7 +198,10 @@ where } } - async fn get_blocks(&self, hashes: Vec) -> Result>> { + async fn get_blocks( + &self, + hashes: Vec, + ) -> Result>, Option)>>> { if let ChainResponse::BlockOptionVec(blocks) = self.send(ChainRequest::GetBlocks(hashes)).await?? { @@ -180,6 +211,40 @@ where } } + async fn get_dag_accumulator_leaves( + &self, + req: dag_protocol::GetDagAccumulatorLeaves, + ) -> Result> { + if let ChainResponse::TargetDagAccumulatorLeaf(leaves) = self + .send(ChainRequest::GetDagAccumulatorLeaves { + start_index: req.accumulator_leaf_index, + batch_size: req.batch_size, + }) + .await?? + { + Ok(leaves) + } else { + bail!("get_dag_accumulator_leaves response type error.") + } + } + + async fn get_dag_accumulator_leaves_detail( + &self, + req: dag_protocol::GetTargetDagAccumulatorLeafDetail, + ) -> Result>> { + if let ChainResponse::TargetDagAccumulatorLeafDetail(details) = self + .send(ChainRequest::GetTargetDagAccumulatorLeafDetail { + leaf_index: req.leaf_index, + batch_size: req.batch_size, + }) + .await?? + { + Ok(Some(details)) + } else { + Ok(None) + } + } + async fn get_headers(&self, ids: Vec) -> Result>> { if let ChainResponse::BlockHeaderVec(headers) = self.send(ChainRequest::GetHeaders(ids)).await?? diff --git a/chain/chain-notify/src/lib.rs b/chain/chain-notify/src/lib.rs index 60c1985dbe..de3f0900e5 100644 --- a/chain/chain-notify/src/lib.rs +++ b/chain/chain-notify/src/lib.rs @@ -52,11 +52,10 @@ impl EventHandler for ChainNotifyHandlerService { item: NewHeadBlock, ctx: &mut ServiceContext, ) { - let NewHeadBlock(block_detail) = item; + let NewHeadBlock(block_detail, _tips_hash) = item; let block = block_detail.block(); // notify header. self.notify_new_block(block, ctx); - // notify events if let Err(e) = self.notify_events(block, self.store.clone(), ctx) { error!(target: "pubsub", "fail to notify events to client, err: {}", &e); diff --git a/chain/mock/src/mock_chain.rs b/chain/mock/src/mock_chain.rs index 403cd09611..dbfe797320 100644 --- a/chain/mock/src/mock_chain.rs +++ b/chain/mock/src/mock_chain.rs @@ -25,7 +25,13 @@ impl MockChain { let (storage, chain_info, _) = Genesis::init_storage_for_test(&net).expect("init storage by genesis fail."); - let chain = BlockChain::new(net.time_service(), chain_info.head().id(), storage, None)?; + let chain = BlockChain::new( + net.time_service(), + chain_info.head().id(), + storage, + net.id().clone(), + None, + )?; let miner = AccountInfo::random(); Ok(Self::new_inner(net, chain, miner)) } @@ -36,7 +42,13 @@ impl MockChain { head_block_hash: HashValue, miner: AccountInfo, ) -> Result { - let chain = BlockChain::new(net.time_service(), head_block_hash, storage, None)?; + let chain = BlockChain::new( + net.time_service(), + head_block_hash, + storage, + net.id().clone(), + None, + )?; Ok(Self::new_inner(net, chain, miner)) } @@ -71,6 +83,7 @@ impl MockChain { self.head.time_service(), block_id, self.head.get_storage(), + self.net.id().clone(), None, ) } @@ -92,6 +105,7 @@ impl MockChain { self.net.time_service(), new_block_id, self.head.get_storage(), + self.net.id().clone(), None, )?; let branch_total_difficulty = branch.get_total_difficulty()?; @@ -121,7 +135,7 @@ impl MockChain { } pub fn apply(&mut self, block: Block) -> Result<()> { - self.head.apply(block)?; + self.head.apply(block, None)?; Ok(()) } diff --git a/chain/open-block/src/lib.rs b/chain/open-block/src/lib.rs index 7df7510ecd..52fb3800ab 100644 --- a/chain/open-block/src/lib.rs +++ b/chain/open-block/src/lib.rs @@ -39,6 +39,7 @@ pub struct OpenedBlock { difficulty: U256, strategy: ConsensusStrategy, vm_metrics: Option, + tips_header: Option>, } impl OpenedBlock { @@ -52,6 +53,7 @@ impl OpenedBlock { difficulty: U256, strategy: ConsensusStrategy, vm_metrics: Option, + tips_header: Option>, ) -> Result { let previous_block_id = previous_header.id(); let block_info = storage @@ -90,6 +92,7 @@ impl OpenedBlock { difficulty, strategy, vm_metrics, + tips_header, }; opened_block.initialize()?; Ok(opened_block) @@ -284,6 +287,7 @@ impl OpenedBlock { self.difficulty, self.strategy, self.block_meta, + self.tips_header, ); Ok(block_template) } diff --git a/chain/service/Cargo.toml b/chain/service/Cargo.toml index e375203781..b772b9e707 100644 --- a/chain/service/Cargo.toml +++ b/chain/service/Cargo.toml @@ -1,5 +1,6 @@ [dependencies] anyhow = { workspace = true } +async-std = { workspace = true } async-trait = { workspace = true } futures = { workspace = true } rand = { workspace = true } @@ -18,6 +19,10 @@ starcoin-vm-runtime = { workspace = true } starcoin-vm-types = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } +starcoin-network-rpc-api = { workspace = true } +starcoin-consensus = { workspace = true } +starcoin-accumulator = { package = "starcoin-accumulator", workspace = true } +starcoin-flexidag = { workspace = true } [dev-dependencies] stest = { workspace = true } diff --git a/chain/service/src/chain_service.rs b/chain/service/src/chain_service.rs index f7b32799d1..4df9f0581e 100644 --- a/chain/service/src/chain_service.rs +++ b/chain/service/src/chain_service.rs @@ -1,21 +1,33 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use anyhow::{format_err, Error, Result}; +use anyhow::{bail, format_err, Error, Ok, Result}; +use starcoin_accumulator::node::AccumulatorStoreType; +use starcoin_accumulator::{Accumulator, MerkleAccumulator}; use starcoin_chain::BlockChain; use starcoin_chain_api::message::{ChainRequest, ChainResponse}; use starcoin_chain_api::{ ChainReader, ChainWriter, ReadableChainService, TransactionInfoWithProof, }; use starcoin_config::NodeConfig; +use starcoin_consensus::BlockDAG; use starcoin_crypto::HashValue; +use starcoin_flexidag::flexidag_service::{ + GetDagAccumulatorLeafDetail, GetDagBlockParents, UpdateDagTips, +}; +use starcoin_flexidag::{flexidag_service, FlexidagService}; use starcoin_logger::prelude::*; +use starcoin_network_rpc_api::dag_protocol::{ + GetDagAccumulatorLeaves, GetTargetDagAccumulatorLeafDetail, TargetDagAccumulatorLeaf, + TargetDagAccumulatorLeafDetail, +}; use starcoin_service_registry::{ - ActorService, EventHandler, ServiceContext, ServiceFactory, ServiceHandler, + ActorService, EventHandler, ServiceContext, ServiceFactory, ServiceHandler, ServiceRef, }; use starcoin_storage::{BlockStore, Storage, Store}; use starcoin_types::block::ExecutedBlock; use starcoin_types::contract_event::ContractEventInfo; +use starcoin_types::dag_block::KTotalDifficulty; use starcoin_types::filter::Filter; use starcoin_types::system_events::NewHeadBlock; use starcoin_types::transaction::RichTransactionInfo; @@ -27,7 +39,7 @@ use starcoin_types::{ }; use starcoin_vm_runtime::metrics::VMMetrics; use starcoin_vm_types::access_path::AccessPath; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; /// A Chain reader service to provider Reader API. pub struct ChainReaderService { @@ -39,10 +51,17 @@ impl ChainReaderService { config: Arc, startup_info: StartupInfo, storage: Arc, + flexidag_service: ServiceRef, vm_metrics: Option, ) -> Result { Ok(Self { - inner: ChainReaderServiceInner::new(config, startup_info, storage, vm_metrics)?, + inner: ChainReaderServiceInner::new( + config.clone(), + startup_info, + storage.clone(), + flexidag_service, + vm_metrics.clone(), + )?, }) } } @@ -55,7 +74,8 @@ impl ServiceFactory for ChainReaderService { .get_startup_info()? .ok_or_else(|| format_err!("StartupInfo should exist at service init."))?; let vm_metrics = ctx.get_shared_opt::()?; - Self::new(config, startup_info, storage, vm_metrics) + let flexidag_service = ctx.service_ref::()?.clone(); + Self::new(config, startup_info, storage, flexidag_service, vm_metrics) } } @@ -72,10 +92,13 @@ impl ActorService for ChainReaderService { } impl EventHandler for ChainReaderService { - fn handle_event(&mut self, event: NewHeadBlock, _ctx: &mut ServiceContext) { - let new_head = event.0.block().header(); + fn handle_event(&mut self, event: NewHeadBlock, ctx: &mut ServiceContext) { + let new_head = event.0.block().header().clone(); if let Err(e) = if self.inner.get_main().can_connect(event.0.as_ref()) { - self.inner.update_chain_head(event.0.as_ref().clone()) + match self.inner.update_chain_head(event.0.as_ref().clone()) { + std::result::Result::Ok(_) => (), + Err(e) => Err(e), + } } else { self.inner.switch_main(new_head.id()) } { @@ -88,7 +111,7 @@ impl ServiceHandler for ChainReaderService { fn handle( &mut self, msg: ChainRequest, - _ctx: &mut ServiceContext, + ctx: &mut ServiceContext, ) -> Result { match msg { ChainRequest::CurrentHeader() => Ok(ChainResponse::BlockHeader(Box::new( @@ -232,6 +255,27 @@ impl ServiceHandler for ChainReaderService { ChainRequest::GetBlockInfos(ids) => Ok(ChainResponse::BlockInfoVec(Box::new( self.inner.get_block_infos(ids)?, ))), + ChainRequest::GetDagAccumulatorLeaves { + start_index, + batch_size, + } => Ok(ChainResponse::TargetDagAccumulatorLeaf( + self.inner + .get_dag_accumulator_leaves(GetDagAccumulatorLeaves { + accumulator_leaf_index: start_index, + batch_size, + })?, + )), + ChainRequest::GetTargetDagAccumulatorLeafDetail { + leaf_index, + batch_size, + } => Ok(ChainResponse::TargetDagAccumulatorLeafDetail( + self.inner.get_target_dag_accumulator_leaf_detail( + GetTargetDagAccumulatorLeafDetail { + leaf_index, + batch_size, + }, + )?, + )), } } } @@ -241,6 +285,7 @@ pub struct ChainReaderServiceInner { startup_info: StartupInfo, main: BlockChain, storage: Arc, + flexidag_service: ServiceRef, vm_metrics: Option, } @@ -249,6 +294,7 @@ impl ChainReaderServiceInner { config: Arc, startup_info: StartupInfo, storage: Arc, + flexidag_service: ServiceRef, vm_metrics: Option, ) -> Result { let net = config.net(); @@ -256,6 +302,7 @@ impl ChainReaderServiceInner { net.time_service(), startup_info.main, storage.clone(), + config.net().id().clone(), vm_metrics.clone(), )?; Ok(Self { @@ -263,6 +310,7 @@ impl ChainReaderServiceInner { startup_info, main, storage, + flexidag_service, vm_metrics, }) } @@ -282,10 +330,22 @@ impl ChainReaderServiceInner { net.time_service(), new_head_id, self.storage.clone(), + self.config.net().id().clone(), self.vm_metrics.clone(), )?; Ok(()) } + + pub fn update_dag_accumulator(&mut self, new_block_header: BlockHeader) -> Result<()> { + async_std::task::block_on(self.flexidag_service.send(UpdateDagTips { + block_header: new_block_header, + current_head_block_id: self.main.status().info().id(), + k_total_difficulty: KTotalDifficulty { + head_block_id: self.main.status().info().id(), + total_difficulty: self.main.status().info().get_total_difficulty(), + }, + }))? + } } impl ReadableChainService for ChainReaderServiceInner { @@ -297,15 +357,54 @@ impl ReadableChainService for ChainReaderServiceInner { self.storage.get_block_by_hash(hash) } - fn get_blocks(&self, ids: Vec) -> Result>> { - self.storage.get_blocks(ids) + fn get_blocks( + &self, + ids: Vec, + ) -> Result>, Option)>>> { + let blocks = self.storage.get_blocks(ids)?; + Ok(blocks + .into_iter() + .map(|block| { + if let Some(block) = block { + let result_parents = + async_std::task::block_on(self.flexidag_service.send(GetDagBlockParents { + block_id: block.id(), + })) + .expect("failed to get the dag block parents"); + let parents = match result_parents { + std::result::Result::Ok(parents) => parents.parents, + Err(_) => panic!("failed to get parents of block {}", block.id()), + }; + let transaction_parent = match self.storage.get_block_info(block.id()) { + std::result::Result::Ok(block_info) => { + if let Some(block_info) = &block_info { + let block_accumulator = MerkleAccumulator::new_with_info( + block_info.block_accumulator_info.clone(), + self.storage + .get_accumulator_store(AccumulatorStoreType::Block), + ); + block_accumulator + .get_leaf(block_info.block_accumulator_info.num_leaves - 2) + .expect("block should have transction header") + } else { + None + } + } + Err(_) => todo!(), + }; + Some((block, Some(parents), transaction_parent)) + } else { + None + } + }) + .collect()) } fn get_headers(&self, ids: Vec) -> Result>> { Ok(self .get_blocks(ids)? .into_iter() - .map(|block| block.map(|b| b.header)) + .map(|block| block.map(|b| b.0.header)) .collect()) } @@ -416,6 +515,43 @@ impl ReadableChainService for ChainReaderServiceInner { fn get_block_infos(&self, ids: Vec) -> Result>> { self.storage.get_block_infos(ids) } + + fn get_dag_accumulator_leaves( + &self, + req: GetDagAccumulatorLeaves, + ) -> anyhow::Result> { + Ok(async_std::task::block_on(self.flexidag_service.send( + flexidag_service::GetDagAccumulatorLeaves { + leaf_index: req.accumulator_leaf_index, + batch_size: req.batch_size, + reverse: true, + }, + ))?? + .into_iter() + .map(|leaf| TargetDagAccumulatorLeaf { + accumulator_root: leaf.dag_accumulator_root, + leaf_index: leaf.leaf_index, + }) + .collect()) + } + + fn get_target_dag_accumulator_leaf_detail( + &self, + req: GetTargetDagAccumulatorLeafDetail, + ) -> anyhow::Result> { + let dag_details = + async_std::task::block_on(self.flexidag_service.send(GetDagAccumulatorLeafDetail { + leaf_index: req.leaf_index, + batch_size: req.batch_size, + }))??; + Ok(dag_details + .into_iter() + .map(|detail| TargetDagAccumulatorLeafDetail { + accumulator_root: detail.accumulator_root, + tips: detail.tips, + }) + .collect()) + } } #[cfg(test)] diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 1c7825d4c7..03944e3363 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -2,7 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 use crate::verifier::{BlockVerifier, FullVerifier}; -use anyhow::{bail, ensure, format_err, Result}; +use anyhow::{anyhow, bail, ensure, format_err, Ok, Result}; +use bcs_ext::BCSCodec; use sp_utils::stop_watch::{watch, CHAIN_WATCH_NAME}; use starcoin_accumulator::inmemory::InMemoryAccumulator; use starcoin_accumulator::{ @@ -12,7 +13,9 @@ use starcoin_chain_api::{ verify_block, ChainReader, ChainWriter, ConnectBlockError, EventWithProof, ExcludedTxns, ExecutedBlock, MintedUncleNumber, TransactionInfoWithProof, VerifiedBlock, VerifyBlockField, }; -use starcoin_consensus::Consensus; +use starcoin_config::ChainNetworkID; +use starcoin_consensus::dag::types::ghostdata::GhostdagData; +use starcoin_consensus::{BlockDAG, Consensus, FlexiDagStorage}; use starcoin_crypto::hash::PlainCryptoHash; use starcoin_crypto::HashValue; use starcoin_executor::VMMetrics; @@ -20,11 +23,15 @@ use starcoin_logger::prelude::*; use starcoin_open_block::OpenedBlock; use starcoin_state_api::{AccountStateReader, ChainStateReader, ChainStateWriter}; use starcoin_statedb::ChainStateDB; +use starcoin_storage::flexi_dag::SyncFlexiDagSnapshot; +use starcoin_storage::storage::CodecKVStore; use starcoin_storage::Store; use starcoin_time_service::TimeService; use starcoin_types::block::BlockIdAndNumber; use starcoin_types::contract_event::ContractEventInfo; +use starcoin_types::dag_block::KTotalDifficulty; use starcoin_types::filter::Filter; +use starcoin_types::header::DagHeader; use starcoin_types::startup_info::{ChainInfo, ChainStatus}; use starcoin_types::transaction::RichTransactionInfo; use starcoin_types::{ @@ -37,9 +44,11 @@ use starcoin_types::{ }; use starcoin_vm_types::access_path::AccessPath; use starcoin_vm_types::account_config::genesis_address; +use starcoin_vm_types::effects::Op; use starcoin_vm_types::genesis_config::ConsensusStrategy; use starcoin_vm_types::on_chain_resource::Epoch; use std::cmp::min; +use std::collections::BTreeSet; use std::iter::Extend; use std::option::Option::{None, Some}; use std::{collections::HashMap, sync::Arc}; @@ -60,6 +69,8 @@ pub struct BlockChain { uncles: HashMap, epoch: Epoch, vm_metrics: Option, + // dag_accumulator: Option, + net: ChainNetworkID, } impl BlockChain { @@ -67,12 +78,13 @@ impl BlockChain { time_service: Arc, head_block_hash: HashValue, storage: Arc, + net: ChainNetworkID, vm_metrics: Option, ) -> Result { let head = storage .get_block_by_hash(head_block_hash)? .ok_or_else(|| format_err!("Can not find block by hash {:?}", head_block_hash))?; - Self::new_with_uncles(time_service, head, None, storage, vm_metrics) + Self::new_with_uncles(time_service, head, None, storage, net, vm_metrics) } fn new_with_uncles( @@ -80,6 +92,7 @@ impl BlockChain { head_block: Block, uncles: Option>, storage: Arc, + net: ChainNetworkID, vm_metrics: Option, ) -> Result { let block_info = storage @@ -94,7 +107,21 @@ impl BlockChain { let genesis = storage .get_genesis()? .ok_or_else(|| format_err!("Can not find genesis hash in storage."))?; + let head_id = head_block.id(); watch(CHAIN_WATCH_NAME, "n1253"); + + // let dag_accumulator = match storage.get_dag_accumulator_info(head_id)? { + // Some(accmulator_info) => Some(info_2_accumulator( + // accmulator_info, + // AccumulatorStoreType::SyncDag, + // storage.as_ref(), + // )), + // None => None, + // }; + // let dag_snapshot_tips = storage + // .get_accumulator_snapshot_storage() + // .get(head_id)? + // .map(|snapshot| snapshot.child_hashes); let mut chain = Self { genesis_hash: genesis, time_service, @@ -113,10 +140,11 @@ impl BlockChain { head: head_block, }, statedb: chain_state, - storage, + storage: storage.clone(), uncles: HashMap::new(), epoch, vm_metrics, + net, }; watch(CHAIN_WATCH_NAME, "n1251"); match uncles { @@ -132,6 +160,7 @@ impl BlockChain { storage: Arc, genesis_epoch: Epoch, genesis_block: Block, + net: ChainNetworkID, ) -> Result { debug_assert!(genesis_block.header().is_genesis()); let txn_accumulator = MerkleAccumulator::new_empty( @@ -141,6 +170,7 @@ impl BlockChain { storage.get_accumulator_store(AccumulatorStoreType::Block), ); let statedb = ChainStateDB::new(storage.clone().into_super_arc(), None); + let genesis_id = genesis_block.header.id(); let executed_block = Self::execute_block_and_save( storage.as_ref(), statedb, @@ -150,14 +180,42 @@ impl BlockChain { None, genesis_block, None, + None, + )?; + + let new_tips = vec![genesis_id]; + let dag_accumulator = MerkleAccumulator::new_empty( + storage.get_accumulator_store(AccumulatorStoreType::SyncDag), + ); + dag_accumulator.append(&new_tips)?; + dag_accumulator.flush()?; + storage.append_dag_accumulator_leaf( + Self::calculate_dag_accumulator_key(new_tips.clone()) + .expect("failed to calculate the dag key"), + new_tips, + dag_accumulator.get_info(), + genesis_id, + [KTotalDifficulty { + head_block_id: genesis_id, + total_difficulty: executed_block.block_info().get_total_difficulty(), + }] + .into_iter() + .collect(), )?; - Self::new(time_service, executed_block.block.id(), storage, None) + Self::new(time_service, executed_block.block.id(), storage, net, None) } pub fn current_epoch_uncles_size(&self) -> u64 { self.uncles.len() as u64 } + pub fn calculate_dag_accumulator_key(mut tips: Vec) -> Result { + tips.sort(); + Ok(HashValue::sha3_256_of(&tips.encode().expect( + "encoding the sorted relatship set must be successful", + ))) + } + pub fn current_block_accumulator_info(&self) -> AccumulatorInfo { self.block_accumulator.get_info() } @@ -265,6 +323,8 @@ impl BlockChain { difficulty, strategy, None, + // jacktest: TODO: this create single chian block not a dag block + None, )?; let excluded_txns = opened_block.push_txns(user_txns)?; let template = opened_block.finalize()?; @@ -333,7 +393,7 @@ impl BlockChain { { let verified_block = self.verify_with_verifier::(block)?; watch(CHAIN_WATCH_NAME, "n1"); - let executed_block = self.execute(verified_block)?; + let executed_block = self.execute(verified_block, transaction_parent)?; watch(CHAIN_WATCH_NAME, "n2"); self.connect(executed_block) } @@ -356,6 +416,7 @@ impl BlockChain { epoch: &Epoch, parent_status: Option, block: Block, + transaction_parent: Option, vm_metrics: Option, ) -> Result { let header = block.header(); @@ -506,7 +567,6 @@ impl BlockChain { storage.save_block_transaction_ids(block_id, txn_id_vec)?; storage.save_block_txn_info_ids(block_id, txn_info_ids)?; storage.commit_block(block.clone())?; - storage.save_block_info(block_info.clone())?; storage.save_table_infos(txn_table_infos)?; @@ -526,10 +586,15 @@ impl BlockChain { impl ChainReader for BlockChain { fn info(&self) -> ChainInfo { + let (dag_accumulator, k_total_difficulties) = self.storage.get_lastest_snapshot()?.map(|snapshot| { + (Some(snapshot.accumulator_info), Some(snapshot.k_total_difficulties)) + }).unwrap_or((None, None)); ChainInfo::new( self.status.head.header().chain_id(), self.genesis_hash, self.status.status.clone(), + dag_accumulator, + k_total_difficulties, ) } @@ -545,6 +610,10 @@ impl ChainReader for BlockChain { self.status.status.head().clone() } + fn net_id(&self) -> ChainNetworkID { + self.net.clone() + } + fn get_header(&self, hash: HashValue) -> Result> { self.storage .get_block_header_by_hash(hash) @@ -573,13 +642,12 @@ impl ChainReader for BlockChain { reverse: bool, count: u64, ) -> Result> { + let num_leaves = self.block_accumulator.num_leaves(); let end_num = match number { - None => self.current_header().number(), + None => num_leaves.saturating_sub(1), Some(number) => number, }; - let num_leaves = self.block_accumulator.num_leaves(); - if end_num > num_leaves.saturating_sub(1) { bail!("Can not find block by number {}", end_num); }; @@ -715,12 +783,15 @@ impl ChainReader for BlockChain { } else { None }; + BlockChain::new_with_uncles( self.time_service.clone(), head, uncles, self.storage.clone(), + self.net.clone(), self.vm_metrics.clone(), + //TODO: check missing blocks need to be clean ) } @@ -755,7 +826,11 @@ impl ChainReader for BlockChain { FullVerifier::verify_block(self, block) } - fn execute(&self, verified_block: VerifiedBlock) -> Result { + fn execute( + &self, + verified_block: VerifiedBlock, + transaction_parent: Option, + ) -> Result { Self::execute_block_and_save( self.storage.as_ref(), self.statedb.fork(), @@ -764,6 +839,7 @@ impl ChainReader for BlockChain { &self.epoch, Some(self.status.status.clone()), verified_block.0, + transaction_parent, self.vm_metrics.clone(), ) } @@ -972,9 +1048,13 @@ impl BlockChain { impl ChainWriter for BlockChain { fn can_connect(&self, executed_block: &ExecutedBlock) -> bool { - executed_block.block.header().parent_hash() == self.status.status.head().id() + if executed_block.block.header().parent_hash() == self.status.status.head().id() { + return true; + } else { + // jacktest: TODO: check if the parents is in the dag? + return true; + } } - fn connect(&mut self, executed_block: ExecutedBlock) -> Result { let (block, block_info) = (executed_block.block(), executed_block.block_info()); debug_assert!(block.header().parent_hash() == self.status.status.head().id()); diff --git a/chain/src/verifier/mod.rs b/chain/src/verifier/mod.rs index 5128715302..eb8ed93d29 100644 --- a/chain/src/verifier/mod.rs +++ b/chain/src/verifier/mod.rs @@ -2,11 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::{format_err, Result}; +use bcs_ext::BCSCodec; use sp_utils::stop_watch::{watch, CHAIN_WATCH_NAME}; use starcoin_chain_api::{ verify_block, ChainReader, ConnectBlockError, VerifiedBlock, VerifyBlockField, }; use starcoin_consensus::{Consensus, ConsensusVerifyError}; +use starcoin_crypto::HashValue; use starcoin_logger::prelude::debug; use starcoin_types::block::{Block, BlockHeader, ALLOWED_FUTURE_BLOCKTIME}; use std::{collections::HashSet, str::FromStr}; @@ -179,6 +181,36 @@ impl BlockVerifier for BasicVerifier { let current_id = current.id(); let expect_number = current.number().saturating_add(1); + // dag + // jacktest: TODO: the verifying should be modified!!! + // if chain_status.tips_hash.is_some() { + // let mut tips_hash = chain_status.tips_hash.clone().unwrap(); + // tips_hash.sort(); + + // if it is a dag block + // if HashValue::sha3_256_of(&tips_hash.encode().expect("hash encode must be successful")) + // != new_block_parent + // { + // // or a block of a single chain + // verify_block!( + // VerifyBlockField::Header, + // expect_number == new_block_header.number(), + // "Invalid block: Unexpect block number, expect:{}, got: {}.", + // expect_number, + // new_block_header.number() + // ); + + // verify_block!( + // VerifyBlockField::Header, + // current_id == new_block_parent, + // "Invalid block: Parent id mismatch, expect:{}, got: {}, number:{}.", + // current_id, + // new_block_parent, + // new_block_header.number() + // ); + // } + // } else { + // or a block of a single chain verify_block!( VerifyBlockField::Header, expect_number == new_block_header.number(), @@ -195,7 +227,7 @@ impl BlockVerifier for BasicVerifier { new_block_parent, new_block_header.number() ); - + // } verify_block!( VerifyBlockField::Header, new_block_header.timestamp() > current.timestamp(), diff --git a/chain/tests/block_test_utils.rs b/chain/tests/block_test_utils.rs index f6d7016c26..34ae965304 100644 --- a/chain/tests/block_test_utils.rs +++ b/chain/tests/block_test_utils.rs @@ -79,6 +79,7 @@ fn gen_header( parent_header.chain_id(), 0, BlockHeaderExtra::new([0u8; 4]), + None, ) } diff --git a/chain/tests/test_block_chain.rs b/chain/tests/test_block_chain.rs index 7b1d41411b..f300d279e3 100644 --- a/chain/tests/test_block_chain.rs +++ b/chain/tests/test_block_chain.rs @@ -200,7 +200,7 @@ fn gen_uncle() -> (MockChain, BlockChain, BlockHeader) { let miner = mock_chain.miner(); let block = product_a_block(&fork_block_chain, miner, Vec::new()); let uncle_block_header = block.header().clone(); - fork_block_chain.apply(block).unwrap(); + fork_block_chain.apply(block, None).unwrap(); (mock_chain, fork_block_chain, uncle_block_header) } @@ -293,7 +293,7 @@ fn test_switch_epoch() { // 3. mock chain apply let uncles = vec![uncle_block_header.clone()]; let block = product_a_block(mock_chain.head(), &miner, uncles); - mock_chain.apply(block).unwrap(); + mock_chain.apply(block, None).unwrap(); assert!(mock_chain.head().head_block().block.uncles().is_some()); assert!(mock_chain .head() @@ -311,14 +311,14 @@ fn test_switch_epoch() { if begin_number < (end_number - 1) { for _i in begin_number..(end_number - 1) { let block = product_a_block(mock_chain.head(), &miner, Vec::new()); - mock_chain.apply(block).unwrap(); + mock_chain.apply(block, None).unwrap(); assert_eq!(mock_chain.head().current_epoch_uncles_size(), 1); } } // 5. switch epoch let block = product_a_block(mock_chain.head(), &miner, Vec::new()); - mock_chain.apply(block).unwrap(); + mock_chain.apply(block, None).unwrap(); assert!(mock_chain.head().head_block().block.uncles().is_none()); assert_eq!(mock_chain.head().current_epoch_uncles_size(), 0); } @@ -336,14 +336,14 @@ fn test_uncle_in_diff_epoch() { if begin_number < (end_number - 1) { for _i in begin_number..(end_number - 1) { let block = product_a_block(mock_chain.head(), &miner, Vec::new()); - mock_chain.apply(block).unwrap(); + mock_chain.apply(block, None).unwrap(); assert_eq!(mock_chain.head().current_epoch_uncles_size(), 0); } } // 4. switch epoch let block = product_a_block(mock_chain.head(), &miner, Vec::new()); - mock_chain.apply(block).unwrap(); + mock_chain.apply(block, None).unwrap(); assert!(mock_chain.head().head_block().block.uncles().is_none()); assert_eq!(mock_chain.head().current_epoch_uncles_size(), 0); @@ -373,7 +373,7 @@ fn test_block_chain_txn_info_fork_mapping() -> Result<()> { let block_b1 = block_chain .consensus() .create_block(template_b1, config.net().time_service().as_ref())?; - block_chain.apply(block_b1.clone())?; + block_chain.apply(block_b1.clone(), None, &mut None)?; let mut block_chain2 = block_chain.fork(block_b1.id()).unwrap(); @@ -404,7 +404,7 @@ fn test_block_chain_txn_info_fork_mapping() -> Result<()> { .consensus() .create_block(template_b2, config.net().time_service().as_ref())?; - block_chain.apply(block_b2.clone())?; + block_chain.apply(block_b2.clone(), None, &mut None)?; let (template_b3, excluded) = block_chain2.create_block_template( *miner_account.address(), Some(block_b1.id()), @@ -416,7 +416,7 @@ fn test_block_chain_txn_info_fork_mapping() -> Result<()> { let block_b3 = block_chain2 .consensus() .create_block(template_b3, config.net().time_service().as_ref())?; - block_chain2.apply(block_b3.clone())?; + block_chain2.apply(block_b3.clone(), None, &mut None)?; assert_ne!( block_chain.get_txn_accumulator().root_hash(), diff --git a/chain/tests/test_epoch_switch.rs b/chain/tests/test_epoch_switch.rs index 48143c3e9f..cf0b71d2ab 100644 --- a/chain/tests/test_epoch_switch.rs +++ b/chain/tests/test_epoch_switch.rs @@ -203,7 +203,7 @@ pub fn modify_on_chain_config_by_dao_block( .consensus() .create_block(template, chain.time_service().as_ref())?; - chain.apply(block1)?; + chain.apply(block1, None)?; } // block 2 @@ -223,7 +223,7 @@ pub fn modify_on_chain_config_by_dao_block( block_timestamp / 1000, )], )?; - chain.apply(block2)?; + chain.apply(block2, None)?; let chain_state = chain.chain_state(); let state = proposal_state( @@ -255,7 +255,7 @@ pub fn modify_on_chain_config_by_dao_block( block_timestamp / 1000, )], )?; - chain.apply(block3)?; + chain.apply(block3, None)?; } // block 4 let chain_state = chain.chain_state(); @@ -263,7 +263,7 @@ pub fn modify_on_chain_config_by_dao_block( { chain.time_service().adjust(block_timestamp); let block4 = create_new_block(&chain, &alice, vec![])?; - chain.apply(block4)?; + chain.apply(block4, None)?; let chain_state = chain.chain_state(); let quorum = quorum_vote(chain_state, stc_type_tag()); println!("quorum: {}", quorum); @@ -282,7 +282,7 @@ pub fn modify_on_chain_config_by_dao_block( let block_timestamp = block_timestamp + 20 * 1000; { chain.time_service().adjust(block_timestamp); - chain.apply(create_new_block(&chain, &alice, vec![])?)?; + chain.apply(create_new_block(&chain, &alice, vec![])?, None)?; let chain_state = chain.chain_state(); let state = proposal_state( chain_state, @@ -311,7 +311,7 @@ pub fn modify_on_chain_config_by_dao_block( block_timestamp / 1000, )], )?; - chain.apply(block6)?; + chain.apply(block6, None)?; let chain_state = chain.chain_state(); let state = proposal_state( chain_state, @@ -328,7 +328,7 @@ pub fn modify_on_chain_config_by_dao_block( let block_timestamp = block_timestamp + min_action_delay(chain_state, stc_type_tag()); { chain.time_service().adjust(block_timestamp); - chain.apply(create_new_block(&chain, &alice, vec![])?)?; + chain.apply(create_new_block(&chain, &alice, vec![])?, None)?; let chain_state = chain.chain_state(); let state = proposal_state( chain_state, @@ -353,7 +353,7 @@ pub fn modify_on_chain_config_by_dao_block( block_timestamp / 1000, )], )?; - chain.apply(block8)?; + chain.apply(block8, None)?; } // block 9 @@ -361,7 +361,7 @@ pub fn modify_on_chain_config_by_dao_block( let _chain_state = chain.chain_state(); { chain.time_service().adjust(block_timestamp); - chain.apply(create_new_block(&chain, &alice, vec![])?)?; + chain.apply(create_new_block(&chain, &alice, vec![])?, None)?; let chain_state = chain.chain_state(); let state = proposal_state( chain_state, diff --git a/chain/tests/test_opened_block.rs b/chain/tests/test_opened_block.rs index 33c922ba6b..b6c741bb6f 100644 --- a/chain/tests/test_opened_block.rs +++ b/chain/tests/test_opened_block.rs @@ -31,6 +31,7 @@ pub fn test_open_block() -> Result<()> { U256::from(0), chain.consensus(), None, + None, )? }; diff --git a/chain/tests/test_txn_info_and_proof.rs b/chain/tests/test_txn_info_and_proof.rs index d817366953..c057ef9f2b 100644 --- a/chain/tests/test_txn_info_and_proof.rs +++ b/chain/tests/test_txn_info_and_proof.rs @@ -70,9 +70,9 @@ fn test_transaction_info_and_proof() -> Result<()> { .consensus() .create_block(template, config.net().time_service().as_ref()) .unwrap(); - block_chain.apply(block.clone()).unwrap(); + block_chain.apply(block.clone(), None, &mut None).unwrap(); all_txns.push(Transaction::BlockMetadata( - block.to_metadata(current_header.gas_used()), + block.to_metadata(current_header.gas_used(), None), )); all_txns.extend(txns.into_iter().map(Transaction::UserTransaction)); current_header = block.header().clone(); diff --git a/cmd/db-exporter/src/main.rs b/cmd/db-exporter/src/main.rs index d0bf1688fb..4a7635c651 100644 --- a/cmd/db-exporter/src/main.rs +++ b/cmd/db-exporter/src/main.rs @@ -634,8 +634,14 @@ pub fn export_block_range( ))?); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), from_dir.as_ref())?; - let chain = BlockChain::new(net.time_service(), chain_info.head().id(), storage, None) - .expect("create block chain should success."); + let chain = BlockChain::new( + net.time_service(), + chain_info.head().id(), + storage, + net.id().clone(), + None, + ) + .expect("create block chain should success."); let cur_num = chain.status().head().number(); let end = if cur_num > end + BLOCK_GAP { end @@ -716,6 +722,7 @@ pub fn apply_block( net.time_service(), chain_info.head().id(), storage.clone(), + net.id().clone(), None, ) .expect("create block chain should success."); @@ -757,10 +764,10 @@ pub fn apply_block( let block_hash = block.header().id(); let block_number = block.header().number(); match verifier { - Verifier::Basic => chain.apply_with_verifier::(block)?, - Verifier::Consensus => chain.apply_with_verifier::(block)?, - Verifier::Full => chain.apply_with_verifier::(block)?, - Verifier::None => chain.apply_with_verifier::(block)?, + Verifier::Basic => chain.apply_with_verifier::(block, None)?, + Verifier::Consensus => chain.apply_with_verifier::(block, None)?, + Verifier::Full => chain.apply_with_verifier::(block, None)?, + Verifier::None => chain.apply_with_verifier::(block, None)?, }; // apply block then flush startup_info for breakpoint resume let startup_info = StartupInfo::new(block_hash); @@ -772,7 +779,7 @@ pub fn apply_block( let use_time = SystemTime::now().duration_since(start_time)?; println!("apply block use time: {:?}", use_time.as_secs()); let chain_info = storage - .get_chain_info()? + .get_chain_info(net.id().clone())? .ok_or_else(|| format_err!("{}", "get chain_info error"))?; println!("chain_info {}", chain_info); Ok(()) @@ -794,6 +801,7 @@ pub fn startup_info_back( net.time_service(), chain_info.head().id(), storage.clone(), + net.id().clone(), None, ) .expect("create block chain should success."); @@ -839,6 +847,7 @@ pub fn gen_block_transactions( net.time_service(), chain_info.head().id(), storage.clone(), + net.id().clone(), None, ) .expect("create block chain should success."); @@ -940,7 +949,7 @@ pub fn execute_transaction_with_create_account( println!("trans {}", block.transactions().len()); } let block_hash = block.header.id(); - chain.apply_with_verifier::(block)?; + chain.apply_with_verifier::(block, None)?; let startup_info = StartupInfo::new(block_hash); storage.save_startup_info(startup_info)?; @@ -963,7 +972,7 @@ pub fn execute_transaction_with_miner_create_account( let block = ConsensusStrategy::Dummy.create_block(block_template, net.time_service().as_ref())?; let block_hash = block.header.id(); - chain.apply_with_verifier::(block)?; + chain.apply_with_verifier::(block, None)?; let startup_info = StartupInfo::new(block_hash); storage.save_startup_info(startup_info)?; for _i in 0..block_num { @@ -992,7 +1001,7 @@ pub fn execute_transaction_with_miner_create_account( } send_sequence += block.transactions().len() as u64; let block_hash = block.header.id(); - chain.apply_with_verifier::(block)?; + chain.apply_with_verifier::(block, None)?; let startup_info = StartupInfo::new(block_hash); storage.save_startup_info(startup_info)?; @@ -1015,7 +1024,7 @@ pub fn execute_empty_transaction_with_miner( let block = ConsensusStrategy::Dummy.create_block(block_template, net.time_service().as_ref())?; let block_hash = block.header.id(); - chain.apply_with_verifier::(block)?; + chain.apply_with_verifier::(block, None)?; let startup_info = StartupInfo::new(block_hash); storage.save_startup_info(startup_info)?; for _i in 0..block_num { @@ -1042,7 +1051,7 @@ pub fn execute_empty_transaction_with_miner( } send_sequence += block.transactions().len() as u64; let block_hash = block.header.id(); - chain.apply_with_verifier::(block)?; + chain.apply_with_verifier::(block, None)?; let startup_info = StartupInfo::new(block_hash); storage.save_startup_info(startup_info)?; @@ -1066,7 +1075,7 @@ pub fn execute_transaction_with_fixed_account( let block = ConsensusStrategy::Dummy.create_block(block_template, net.time_service().as_ref())?; let block_hash = block.header.id(); - chain.apply_with_verifier::(block)?; + chain.apply_with_verifier::(block, None)?; let startup_info = StartupInfo::new(block_hash); storage.save_startup_info(startup_info)?; for _i in 0..block_num { @@ -1094,7 +1103,7 @@ pub fn execute_transaction_with_fixed_account( } send_sequence += block.transactions().len() as u64; let block_hash = block.header.id(); - chain.apply_with_verifier::(block)?; + chain.apply_with_verifier::(block, None)?; let startup_info = StartupInfo::new(block_hash); storage.save_startup_info(startup_info)?; @@ -1150,7 +1159,7 @@ pub fn execute_turbo_stm_transaction_with_fixed_account( ConsensusStrategy::Dummy.create_block(block_template, net.time_service().as_ref())?; println!("create account trans {}", block.transactions().len()); let block_hash = block.header.id(); - chain.apply_with_verifier::(block)?; + chain.apply_with_verifier::(block, None)?; let startup_info = StartupInfo::new(block_hash); storage.save_startup_info(startup_info)?; println!("receivers finish"); @@ -1177,7 +1186,7 @@ pub fn execute_turbo_stm_transaction_with_fixed_account( ConsensusStrategy::Dummy.create_block(block_template, net.time_service().as_ref())?; println!("p2p trans {}", block.transactions().len()); let block_hash = block.header.id(); - chain.apply_with_verifier::(block)?; + chain.apply_with_verifier::(block, None)?; let startup_info = StartupInfo::new(block_hash); storage.save_startup_info(startup_info)?; @@ -1295,6 +1304,7 @@ pub fn export_snapshot( net.time_service(), chain_info.head().id(), storage.clone(), + net.id().clone(), None, ) .expect("create block chain should success."); @@ -1313,8 +1323,14 @@ pub fn export_snapshot( let cur_block = chain .get_block_by_number(cur_num)? .ok_or_else(|| format_err!("get block by number {} error", cur_num))?; - let chain = BlockChain::new(net.time_service(), cur_block.id(), storage.clone(), None) - .expect("create block chain should success."); + let chain = BlockChain::new( + net.time_service(), + cur_block.id(), + storage.clone(), + net.id().clone(), + None, + ) + .expect("create block chain should success."); let cur_num = chain.epoch().start_block_number(); @@ -1636,6 +1652,7 @@ pub fn apply_snapshot( net.time_service(), chain_info.head().id(), storage.clone(), + net.id().clone(), None, ) .expect("create block chain should success."), @@ -1969,6 +1986,7 @@ pub fn gen_turbo_stm_transactions(to_dir: PathBuf, block_num: Option) -> an net.time_service(), chain_info.head().id(), storage.clone(), + net.id().clone(), None, ) .expect("create block chain should success."); @@ -1995,6 +2013,7 @@ pub fn apply_turbo_stm_block( net.time_service(), chain_info_seq.head().id(), storage_seq.clone(), + net.id().clone(), None, ) .expect("create block chain should success."); @@ -2024,14 +2043,14 @@ pub fn apply_turbo_stm_block( println!("seq execution"); for item in blocks.iter().take(4) { - chain_seq.apply_with_verifier::(item.clone())?; + chain_seq.apply_with_verifier::(item.clone(), None)?; } let mut block_hash = HashValue::zero(); let start_time = SystemTime::now(); for item in blocks.iter().skip(4) { let block = item.clone(); block_hash = block.header().id(); - chain_seq.apply_with_verifier::(block)?; + chain_seq.apply_with_verifier::(block, None)?; } let startup_info = StartupInfo::new(block_hash); storage_seq.save_startup_info(startup_info)?; @@ -2053,13 +2072,14 @@ pub fn apply_turbo_stm_block( net.time_service(), chain_info_stm.head().id(), storage_stm.clone(), + net.id().clone(), None, ) .expect("create block chain should success."); println!("stm execution"); for item in blocks.iter().take(4) { - chain_stm.apply_with_verifier::(item.clone())?; + chain_stm.apply_with_verifier::(item.clone(), None)?; } let mut block_hash = HashValue::zero(); let start_time = SystemTime::now(); @@ -2067,7 +2087,7 @@ pub fn apply_turbo_stm_block( for item in blocks.iter().skip(4) { let block = item.clone(); block_hash = block.header().id(); - chain_stm.apply_with_verifier::(block)?; + chain_stm.apply_with_verifier::(block, None)?; } let startup_info = StartupInfo::new(block_hash); storage_stm.save_startup_info(startup_info)?; diff --git a/cmd/peer-watcher/src/lib.rs b/cmd/peer-watcher/src/lib.rs index 0defa9ba3e..bb75a86819 100644 --- a/cmd/peer-watcher/src/lib.rs +++ b/cmd/peer-watcher/src/lib.rs @@ -9,6 +9,7 @@ use starcoin_network::network_p2p_handle::Networkp2pHandle; use starcoin_network::{build_network_worker, NotificationMessage}; use starcoin_storage::storage::StorageInstance; use starcoin_storage::Storage; +use starcoin_types::startup_info::ChainInfo; use std::sync::Arc; pub fn build_lighting_network( @@ -17,10 +18,17 @@ pub fn build_lighting_network( ) -> Result<(PeerInfo, NetworkWorker)> { let genesis = starcoin_genesis::Genesis::load_or_build(net)?; let storage = Arc::new(Storage::new(StorageInstance::new_cache_instance())?); - let chain_info = genesis.execute_genesis_block(net, storage)?; + let chain_info = genesis.execute_genesis_block(net, storage.clone())?; + let chain_state_info = ChainInfo::new( + chain_info.chain_id(), + chain_info.genesis_hash(), + chain_info.status().clone(), + chain_info.dag_accumulator_info().clone(), + chain_info.k_total_difficulties().clone(), + ); build_network_worker( network_config, - chain_info, + chain_state_info, NotificationMessage::protocols(), None, None, diff --git a/cmd/peer-watcher/src/main.rs b/cmd/peer-watcher/src/main.rs index 7d455f311a..9251a9ef74 100644 --- a/cmd/peer-watcher/src/main.rs +++ b/cmd/peer-watcher/src/main.rs @@ -32,9 +32,9 @@ fn main() { rpc_protocols, version_string, } => match ChainInfo::decode(&generic_data) { - Ok(chain_info) => Some(PeerInfo::new( + Ok(chain_state_info) => Some(PeerInfo::new( remote.into(), - chain_info, + chain_state_info, notif_protocols, rpc_protocols, version_string, diff --git a/cmd/replay/src/main.rs b/cmd/replay/src/main.rs index d391c78fa3..7a06f5ff0e 100644 --- a/cmd/replay/src/main.rs +++ b/cmd/replay/src/main.rs @@ -80,8 +80,14 @@ fn main() -> anyhow::Result<()> { ); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), from_dir.as_ref()) .expect("init storage by genesis fail."); - let chain = BlockChain::new(net.time_service(), chain_info.head().id(), storage, None) - .expect("create block chain should success."); + let chain = BlockChain::new( + net.time_service(), + chain_info.head().id(), + storage, + net.id().clone(), + None, + ) + .expect("create block chain should success."); let storage2 = Arc::new( Storage::new(StorageInstance::new_cache_and_db_instance( @@ -97,6 +103,7 @@ fn main() -> anyhow::Result<()> { net.time_service(), chain_info2.status().head().id(), storage2.clone(), + net.id().clone(), None, ) .expect("create block chain should success."); @@ -132,18 +139,24 @@ fn main() -> anyhow::Result<()> { let block_height = block.header().number(); match opts.verifier { Verifier::Basic => { - chain2.apply_with_verifier::(block).unwrap(); + chain2 + .apply_with_verifier::(block, None) + .unwrap(); } Verifier::Consensus => { chain2 - .apply_with_verifier::(block) + .apply_with_verifier::(block, None) .unwrap(); } Verifier::None => { - chain2.apply_with_verifier::(block).unwrap(); + chain2 + .apply_with_verifier::(block, None) + .unwrap(); } Verifier::Full => { - chain2.apply_with_verifier::(block).unwrap(); + chain2 + .apply_with_verifier::(block, None) + .unwrap(); } }; println!( diff --git a/commons/accumulator/src/node.rs b/commons/accumulator/src/node.rs index f3d05cd29c..36d8138eea 100644 --- a/commons/accumulator/src/node.rs +++ b/commons/accumulator/src/node.rs @@ -16,6 +16,7 @@ use starcoin_crypto::{ pub enum AccumulatorStoreType { Transaction, Block, + SyncDag, } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] diff --git a/commons/stream-task/src/collector.rs b/commons/stream-task/src/collector.rs index 3e597fce95..cd0e317bbd 100644 --- a/commons/stream-task/src/collector.rs +++ b/commons/stream-task/src/collector.rs @@ -15,7 +15,7 @@ use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; use thiserror::Error; -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, PartialEq)] pub enum CollectorState { /// Collector is enough, do not feed more item, finish task. Enough, diff --git a/commons/time-service/src/lib.rs b/commons/time-service/src/lib.rs index 9642726974..15dc51b178 100644 --- a/commons/time-service/src/lib.rs +++ b/commons/time-service/src/lib.rs @@ -163,6 +163,62 @@ impl TimeService for MockTimeService { } } +#[derive(Debug)] +pub struct DagBlockTimeWindowService { + time_service: Arc, + + time_window: u64, +} + +pub enum TimeWindowResult { + InTimeWindow, + BeforeTimeWindow, + AfterTimeWindow, +} + +impl TimeService for DagBlockTimeWindowService { + fn adjust(&self, milliseconds: u64) { + self.time_service.adjust(milliseconds) + } + + fn as_any(&self) -> &dyn Any { + self + } + fn now_secs(&self) -> u64 { + self.time_service.now_secs() + } + + fn now_millis(&self) -> u64 { + self.time_service.now_millis() + } + + fn sleep(&self, millis: u64) { + self.time_service.sleep(millis) + } +} + +impl DagBlockTimeWindowService { + pub fn new(time_windows: u64, time_service: Arc) -> Self { + Self { + time_service: time_service.clone(), + time_window: time_windows, + } + } + + pub fn is_in_time_window(&self, block_timestamp: u64) -> TimeWindowResult { + let now = self.time_service.now_millis(); + let start_time = now - now % self.time_window; + let end_time = start_time + self.time_window; + if (start_time..end_time).contains(&block_timestamp) { + TimeWindowResult::InTimeWindow + } else if block_timestamp < start_time { + TimeWindowResult::BeforeTimeWindow + } else { + TimeWindowResult::AfterTimeWindow + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/config/src/available_port.rs b/config/src/available_port.rs index 588b28ad81..f03bf1af60 100644 --- a/config/src/available_port.rs +++ b/config/src/available_port.rs @@ -57,7 +57,7 @@ fn get_ephemeral_port() -> ::std::io::Result { use std::net::{TcpListener, TcpStream}; // Request a random available port from the OS - let listener = TcpListener::bind(("localhost", 0))?; + let listener = TcpListener::bind(("127.0.0.1", 0))?; let addr = listener.local_addr()?; // Create and accept a connection (which we'll promptly drop) in order to force the port diff --git a/config/src/genesis_config.rs b/config/src/genesis_config.rs index f553cb5013..bc60c1fe3c 100644 --- a/config/src/genesis_config.rs +++ b/config/src/genesis_config.rs @@ -777,6 +777,7 @@ pub static G_DEV_CONFIG: Lazy = Lazy::new(|| { GenesisConfig { genesis_block_parameter: GenesisBlockParameterConfig::Static(GenesisBlockParameter { parent_hash: HashValue::sha3_256_of(b"starcoin_dev"), + // parent_hash: HashValue::from_slice(ORIGIN).expect("hash should be ok"), timestamp: 0, difficulty: 1.into(), }), @@ -832,7 +833,7 @@ pub static G_HALLEY_CONFIG: Lazy = Lazy::new(|| { GenesisConfig { genesis_block_parameter: GenesisBlockParameterConfig::Static(GenesisBlockParameter { - parent_hash: HashValue::sha3_256_of(b"starcoin_halley"), + parent_hash: HashValue::new(starcoin_types::blockhash::ORIGIN), timestamp: 1693798675000, difficulty: 100.into(), }), diff --git a/config/src/storage_config.rs b/config/src/storage_config.rs index 38634026e0..1b33e18134 100644 --- a/config/src/storage_config.rs +++ b/config/src/storage_config.rs @@ -34,6 +34,13 @@ pub struct RocksdbConfig { pub wal_bytes_per_sync: u64, #[clap(name = "rocksdb-bytes-per-sync", long, help = "rocksdb bytes per sync")] pub bytes_per_sync: u64, + + #[clap( + name = "rocksdb-parallelism", + long, + help = "rocksdb background threads, one for default" + )] + pub parallelism: u64, } impl RocksdbConfig { @@ -61,6 +68,8 @@ impl Default for RocksdbConfig { bytes_per_sync: 1u64 << 20, // For wal sync every size to be 1MB wal_bytes_per_sync: 1u64 << 20, + // For background threads + parallelism: 1u64, } } } @@ -102,6 +111,14 @@ pub struct StorageConfig { #[serde(skip_serializing_if = "Option::is_none")] #[clap(name = "rocksdb-bytes-per-sync", long, help = "rocksdb bytes per sync")] pub bytes_per_sync: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + #[clap( + name = "rocksdb-parallelism", + long, + help = "rocksdb background threads" + )] + pub parallelism: Option, } impl StorageConfig { @@ -124,6 +141,7 @@ impl StorageConfig { wal_bytes_per_sync: self .wal_bytes_per_sync .unwrap_or(default.wal_bytes_per_sync), + parallelism: self.parallelism.unwrap_or(default.parallelism), } } pub fn cache_size(&self) -> usize { diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 2810b14110..3c6203ffb3 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -19,11 +19,21 @@ starcoin-time-service = { workspace = true } starcoin-types = { workspace = true } starcoin-vm-types = { workspace = true } thiserror = { workspace = true } +rocksdb = { workspace = true } +bincode = { workspace = true } +serde = { workspace = true } +starcoin-storage = { workspace = true } +parking_lot = { workspace = true } +itertools = { workspace = true } +starcoin-config = { workspace = true } +bcs-ext = { workspace = true } +starcoin-accumulator = { package = "starcoin-accumulator", workspace = true } [dev-dependencies] proptest = { workspace = true } proptest-derive = { workspace = true } stest = { workspace = true } +tempfile = { workspace = true } [features] default = [] diff --git a/consensus/src/consensus.rs b/consensus/src/consensus.rs index 421b26bdcb..dccae700ce 100644 --- a/consensus/src/consensus.rs +++ b/consensus/src/consensus.rs @@ -75,11 +75,23 @@ pub trait Consensus { block_template: BlockTemplate, time_service: &dyn TimeService, ) -> Result { - let mining_hash = block_template.as_pow_header_blob(); + let mining_hash = block_template.as_pow_header_blob_single_chain(); let consensus_nonce = self.solve_consensus_nonce(&mining_hash, block_template.difficulty, time_service); let extra = BlockHeaderExtra::new([0u8; 4]); - Ok(block_template.into_block(consensus_nonce, extra)) + Ok(block_template.into_single_chain_block(consensus_nonce, extra)) + } + + fn create_single_chain_block( + &self, + block_template: BlockTemplate, + time_service: &dyn TimeService, + ) -> Result { + let mining_hash = block_template.as_pow_header_blob_single_chain(); + let consensus_nonce = + self.solve_consensus_nonce(&mining_hash, block_template.difficulty, time_service); + let extra = BlockHeaderExtra::new([0u8; 4]); + Ok(block_template.into_single_chain_block(consensus_nonce, extra)) } /// Inner helper for verify and unit testing fn verify_header_difficulty(&self, difficulty: U256, header: &BlockHeader) -> Result<()> { diff --git a/consensus/src/consensus_test.rs b/consensus/src/consensus_test.rs index b878fa9e10..e0d52d7606 100644 --- a/consensus/src/consensus_test.rs +++ b/consensus/src/consensus_test.rs @@ -89,6 +89,7 @@ fn verify_header_test_barnard_block3_ubuntu22() { ChainId::new(251), 2894404328, BlockHeaderExtra::new([0u8; 4]), + None, ); G_CRYPTONIGHT .verify_header_difficulty(header.difficulty(), &header) diff --git a/consensus/src/consensusdb/access.rs b/consensus/src/consensusdb/access.rs new file mode 100644 index 0000000000..e46e85acfe --- /dev/null +++ b/consensus/src/consensusdb/access.rs @@ -0,0 +1,199 @@ +use super::{cache::DagCache, db::DBStorage, error::StoreError}; + +use super::prelude::DbWriter; +use super::schema::{KeyCodec, Schema, ValueCodec}; +use itertools::Itertools; +use rocksdb::{Direction, IteratorMode, ReadOptions}; +use starcoin_storage::storage::RawDBStorage; +use std::{ + collections::hash_map::RandomState, error::Error, hash::BuildHasher, marker::PhantomData, + sync::Arc, +}; + +/// A concurrent DB store access with typed caching. +#[derive(Clone)] +pub struct CachedDbAccess { + db: Arc, + + // Cache + cache: DagCache, + + _phantom: PhantomData, +} + +impl CachedDbAccess +where + R: BuildHasher + Default, +{ + pub fn new(db: Arc, cache_size: u64) -> Self { + Self { + db, + cache: DagCache::new_with_capacity(cache_size), + _phantom: Default::default(), + } + } + + pub fn read_from_cache(&self, key: S::Key) -> Option { + self.cache.get(&key) + } + + pub fn has(&self, key: S::Key) -> Result { + Ok(self.cache.contains_key(&key) + || self + .db + .raw_get_pinned_cf(S::COLUMN_FAMILY, key.encode_key().unwrap()) + .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? + .is_some()) + } + + pub fn read(&self, key: S::Key) -> Result { + if let Some(data) = self.cache.get(&key) { + Ok(data) + } else if let Some(slice) = self + .db + .raw_get_pinned_cf(S::COLUMN_FAMILY, key.encode_key().unwrap()) + .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? + { + let data = S::Value::decode_value(slice.as_ref()) + .map_err(|o| StoreError::DecodeError(o.to_string()))?; + self.cache.insert(key, data.clone()); + Ok(data) + } else { + Err(StoreError::KeyNotFound("".to_string())) + } + } + + pub fn iterator( + &self, + ) -> Result, S::Value), Box>> + '_, StoreError> + { + let db_iterator = self + .db + .raw_iterator_cf_opt( + S::COLUMN_FAMILY, + IteratorMode::Start, + ReadOptions::default(), + ) + .map_err(|e| StoreError::CFNotExist(e.to_string()))?; + + Ok(db_iterator.map(|iter_result| match iter_result { + Ok((key, data_bytes)) => match S::Value::decode_value(&data_bytes) { + Ok(data) => Ok((key, data)), + Err(e) => Err(e.into()), + }, + Err(e) => Err(e.into()), + })) + } + + pub fn write( + &self, + mut writer: impl DbWriter, + key: S::Key, + data: S::Value, + ) -> Result<(), StoreError> { + writer.put::(&key, &data)?; + self.cache.insert(key, data); + Ok(()) + } + + pub fn write_many( + &self, + mut writer: impl DbWriter, + iter: &mut (impl Iterator + Clone), + ) -> Result<(), StoreError> { + for (key, data) in iter { + writer.put::(&key, &data)?; + self.cache.insert(key, data); + } + Ok(()) + } + + /// Write directly from an iterator and do not cache any data. NOTE: this action also clears the cache + pub fn write_many_without_cache( + &self, + mut writer: impl DbWriter, + iter: &mut impl Iterator, + ) -> Result<(), StoreError> { + for (key, data) in iter { + writer.put::(&key, &data)?; + } + // The cache must be cleared in order to avoid invalidated entries + self.cache.remove_all(); + Ok(()) + } + + pub fn delete(&self, mut writer: impl DbWriter, key: S::Key) -> Result<(), StoreError> { + self.cache.remove(&key); + writer.delete::(&key)?; + Ok(()) + } + + pub fn delete_many( + &self, + mut writer: impl DbWriter, + key_iter: &mut (impl Iterator + Clone), + ) -> Result<(), StoreError> { + let key_iter_clone = key_iter.clone(); + self.cache.remove_many(key_iter); + for key in key_iter_clone { + writer.delete::(&key)?; + } + Ok(()) + } + + pub fn delete_all(&self, mut writer: impl DbWriter) -> Result<(), StoreError> { + self.cache.remove_all(); + let keys = self + .db + .raw_iterator_cf_opt( + S::COLUMN_FAMILY, + IteratorMode::Start, + ReadOptions::default(), + ) + .map_err(|e| StoreError::CFNotExist(e.to_string()))? + .map(|iter_result| match iter_result { + Ok((key, _)) => Ok::<_, rocksdb::Error>(key), + Err(e) => Err(e), + }) + .collect_vec(); + for key in keys { + writer.delete::(&S::Key::decode_key(&key?)?)?; + } + Ok(()) + } + + /// A dynamic iterator that can iterate through a specific prefix, and from a certain start point. + //TODO: loop and chain iterators for multi-prefix iterator. + pub fn seek_iterator( + &self, + seek_from: Option, // iter whole range if None + limit: usize, // amount to take. + skip_first: bool, // skips the first value, (useful in conjunction with the seek-key, as to not re-retrieve). + ) -> Result, S::Value), Box>> + '_, StoreError> + { + let read_opts = ReadOptions::default(); + let mut db_iterator = match seek_from { + Some(seek_key) => self.db.raw_iterator_cf_opt( + S::COLUMN_FAMILY, + IteratorMode::From(seek_key.encode_key()?.as_slice(), Direction::Forward), + read_opts, + ), + None => self + .db + .raw_iterator_cf_opt(S::COLUMN_FAMILY, IteratorMode::Start, read_opts), + } + .map_err(|e| StoreError::CFNotExist(e.to_string()))?; + + if skip_first { + db_iterator.next(); + } + + Ok(db_iterator.take(limit).map(move |item| match item { + Ok((key_bytes, value_bytes)) => match S::Value::decode_value(value_bytes.as_ref()) { + Ok(value) => Ok((key_bytes, value)), + Err(err) => Err(err.into()), + }, + Err(err) => Err(err.into()), + })) + } +} diff --git a/consensus/src/consensusdb/cache.rs b/consensus/src/consensusdb/cache.rs new file mode 100644 index 0000000000..e2d5de0c3c --- /dev/null +++ b/consensus/src/consensusdb/cache.rs @@ -0,0 +1,44 @@ +use core::hash::Hash; +use starcoin_storage::cache_storage::GCacheStorage; +use std::sync::Arc; + +#[derive(Clone)] +pub struct DagCache { + cache: Arc>, +} + +impl DagCache +where + K: Hash + Eq + Default, + V: Default + Clone, +{ + pub(crate) fn new_with_capacity(size: u64) -> Self { + Self { + cache: Arc::new(GCacheStorage::new_with_capacity(size as usize, None)), + } + } + + pub(crate) fn get(&self, key: &K) -> Option { + self.cache.get_inner(key) + } + + pub(crate) fn contains_key(&self, key: &K) -> bool { + self.get(key).is_some() + } + + pub(crate) fn insert(&self, key: K, data: V) { + self.cache.put_inner(key, data); + } + + pub(crate) fn remove(&self, key: &K) { + self.cache.remove_inner(key); + } + + pub(crate) fn remove_many(&self, key_iter: &mut impl Iterator) { + key_iter.for_each(|k| self.remove(&k)); + } + + pub(crate) fn remove_all(&self) { + self.cache.remove_all(); + } +} diff --git a/consensus/src/consensusdb/consensus_ghostdag.rs b/consensus/src/consensusdb/consensus_ghostdag.rs new file mode 100644 index 0000000000..a6746d9eb5 --- /dev/null +++ b/consensus/src/consensusdb/consensus_ghostdag.rs @@ -0,0 +1,512 @@ +use super::schema::{KeyCodec, ValueCodec}; +use super::{ + db::DBStorage, + error::StoreError, + prelude::{CachedDbAccess, DirectDbWriter}, + writer::BatchDbWriter, +}; +use crate::define_schema; +use starcoin_types::blockhash::{ + BlockHashMap, BlockHashes, BlockLevel, BlueWorkType, HashKTypeMap, +}; + +use crate::dag::types::{ + ghostdata::{CompactGhostdagData, GhostdagData}, + ordering::SortableBlock, +}; +use itertools::{ + EitherOrBoth::{Both, Left, Right}, + Itertools, +}; +use rocksdb::WriteBatch; +use starcoin_crypto::HashValue as Hash; +use std::{cell::RefCell, cmp, iter::once, sync::Arc}; + +pub trait GhostdagStoreReader { + fn get_blue_score(&self, hash: Hash) -> Result; + fn get_blue_work(&self, hash: Hash) -> Result; + fn get_selected_parent(&self, hash: Hash) -> Result; + fn get_mergeset_blues(&self, hash: Hash) -> Result; + fn get_mergeset_reds(&self, hash: Hash) -> Result; + fn get_blues_anticone_sizes(&self, hash: Hash) -> Result; + + /// Returns full block data for the requested hash + fn get_data(&self, hash: Hash) -> Result, StoreError>; + + fn get_compact_data(&self, hash: Hash) -> Result; + + /// Check if the store contains data for the requested hash + fn has(&self, hash: Hash) -> Result; +} + +pub trait GhostdagStore: GhostdagStoreReader { + /// Insert GHOSTDAG data for block `hash` into the store. Note that GHOSTDAG data + /// is added once and never modified, so no need for specific setters for each element. + /// Additionally, this means writes are semantically "append-only", which is why + /// we can keep the `insert` method non-mutable on self. See "Parallel Processing.md" for an overview. + fn insert(&self, hash: Hash, data: Arc) -> Result<(), StoreError>; +} + +pub struct GhostDagDataWrapper(GhostdagData); + +impl From for GhostDagDataWrapper { + fn from(value: GhostdagData) -> Self { + Self(value) + } +} + +impl GhostDagDataWrapper { + /// Returns an iterator to the mergeset in ascending blue work order (tie-breaking by hash) + pub fn ascending_mergeset_without_selected_parent<'a>( + &'a self, + store: &'a (impl GhostdagStoreReader + ?Sized), + ) -> impl Iterator> + '_ { + self.0 + .mergeset_blues + .iter() + .skip(1) // Skip the selected parent + .cloned() + .map(|h| { + store + .get_blue_work(h) + .map(|blue| SortableBlock::new(h, blue)) + }) + .merge_join_by( + self.0 + .mergeset_reds + .iter() + .cloned() + .map(|h| store.get_blue_work(h).map(|red| SortableBlock::new(h, red))), + |a, b| match (a, b) { + (Ok(a), Ok(b)) => a.cmp(b), + (Err(_), Ok(_)) => cmp::Ordering::Less, // select left Err node + (Ok(_), Err(_)) => cmp::Ordering::Greater, // select right Err node + (Err(_), Err(_)) => cmp::Ordering::Equal, // remove both Err nodes + }, + ) + .map(|r| match r { + Left(b) | Right(b) => b, + Both(c, _) => Err(StoreError::DAGDupBlocksError(format!("{c:?}"))), + }) + } + + /// Returns an iterator to the mergeset in descending blue work order (tie-breaking by hash) + pub fn descending_mergeset_without_selected_parent<'a>( + &'a self, + store: &'a (impl GhostdagStoreReader + ?Sized), + ) -> impl Iterator> + '_ { + self.0 + .mergeset_blues + .iter() + .skip(1) // Skip the selected parent + .rev() // Reverse since blues and reds are stored with ascending blue work order + .cloned() + .map(|h| { + store + .get_blue_work(h) + .map(|blue| SortableBlock::new(h, blue)) + }) + .merge_join_by( + self.0 + .mergeset_reds + .iter() + .rev() // Reverse + .cloned() + .map(|h| store.get_blue_work(h).map(|red| SortableBlock::new(h, red))), + |a, b| match (b, a) { + (Ok(b), Ok(a)) => b.cmp(a), + (Err(_), Ok(_)) => cmp::Ordering::Less, // select left Err node + (Ok(_), Err(_)) => cmp::Ordering::Greater, // select right Err node + (Err(_), Err(_)) => cmp::Ordering::Equal, // select both Err nodes + }, // Reverse + ) + .map(|r| match r { + Left(b) | Right(b) => b, + Both(c, _) => Err(StoreError::DAGDupBlocksError(format!("{c:?}"))), + }) + } + + /// Returns an iterator to the mergeset in topological consensus order -- starting with the selected parent, + /// and adding the mergeset in increasing blue work order. Note that this is a topological order even though + /// the selected parent has highest blue work by def -- since the mergeset is in its anticone. + pub fn consensus_ordered_mergeset<'a>( + &'a self, + store: &'a (impl GhostdagStoreReader + ?Sized), + ) -> impl Iterator> + '_ { + once(Ok(self.0.selected_parent)).chain( + self.ascending_mergeset_without_selected_parent(store) + .map(|s| s.map(|s| s.hash)), + ) + } + + /// Returns an iterator to the mergeset in topological consensus order without the selected parent + pub fn consensus_ordered_mergeset_without_selected_parent<'a>( + &'a self, + store: &'a (impl GhostdagStoreReader + ?Sized), + ) -> impl Iterator> + '_ { + self.ascending_mergeset_without_selected_parent(store) + .map(|s| s.map(|s| s.hash)) + } +} + +pub(crate) const GHOST_DAG_STORE_CF: &str = "block-ghostdag-data"; +pub(crate) const COMPACT_GHOST_DAG_STORE_CF: &str = "compact-block-ghostdag-data"; + +define_schema!(GhostDag, Hash, Arc, GHOST_DAG_STORE_CF); +define_schema!( + CompactGhostDag, + Hash, + CompactGhostdagData, + COMPACT_GHOST_DAG_STORE_CF +); + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for Arc { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for CompactGhostdagData { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +/// A DB + cache implementation of `GhostdagStore` trait, with concurrency support. +#[derive(Clone)] +pub struct DbGhostdagStore { + db: Arc, + level: BlockLevel, + access: CachedDbAccess, + compact_access: CachedDbAccess, +} + +impl DbGhostdagStore { + pub fn new(db: Arc, level: BlockLevel, cache_size: u64) -> Self { + Self { + db: Arc::clone(&db), + level, + access: CachedDbAccess::new(db.clone(), cache_size), + compact_access: CachedDbAccess::new(db, cache_size), + } + } + + pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { + Self::new(Arc::clone(&self.db), self.level, cache_size) + } + + pub fn insert_batch( + &self, + batch: &mut WriteBatch, + hash: Hash, + data: &Arc, + ) -> Result<(), StoreError> { + if self.access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.access + .write(BatchDbWriter::new(batch), hash, data.clone())?; + self.compact_access.write( + BatchDbWriter::new(batch), + hash, + CompactGhostdagData { + blue_score: data.blue_score, + blue_work: data.blue_work, + selected_parent: data.selected_parent, + }, + )?; + Ok(()) + } +} + +impl GhostdagStoreReader for DbGhostdagStore { + fn get_blue_score(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.blue_score) + } + + fn get_blue_work(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.blue_work) + } + + fn get_selected_parent(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.selected_parent) + } + + fn get_mergeset_blues(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.access.read(hash)?.mergeset_blues)) + } + + fn get_mergeset_reds(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.access.read(hash)?.mergeset_reds)) + } + + fn get_blues_anticone_sizes(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.access.read(hash)?.blues_anticone_sizes)) + } + + fn get_data(&self, hash: Hash) -> Result, StoreError> { + self.access.read(hash) + } + + fn get_compact_data(&self, hash: Hash) -> Result { + self.compact_access.read(hash) + } + + fn has(&self, hash: Hash) -> Result { + self.access.has(hash) + } +} + +impl GhostdagStore for DbGhostdagStore { + fn insert(&self, hash: Hash, data: Arc) -> Result<(), StoreError> { + if self.access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.access + .write(DirectDbWriter::new(&self.db), hash, data.clone())?; + if self.compact_access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.compact_access.write( + DirectDbWriter::new(&self.db), + hash, + CompactGhostdagData { + blue_score: data.blue_score, + blue_work: data.blue_work, + selected_parent: data.selected_parent, + }, + )?; + Ok(()) + } +} + +/// An in-memory implementation of `GhostdagStore` trait to be used for tests. +/// Uses `RefCell` for interior mutability in order to workaround `insert` +/// being non-mutable. +pub struct MemoryGhostdagStore { + blue_score_map: RefCell>, + blue_work_map: RefCell>, + selected_parent_map: RefCell>, + mergeset_blues_map: RefCell>, + mergeset_reds_map: RefCell>, + blues_anticone_sizes_map: RefCell>, +} + +impl MemoryGhostdagStore { + pub fn new() -> Self { + Self { + blue_score_map: RefCell::new(BlockHashMap::new()), + blue_work_map: RefCell::new(BlockHashMap::new()), + selected_parent_map: RefCell::new(BlockHashMap::new()), + mergeset_blues_map: RefCell::new(BlockHashMap::new()), + mergeset_reds_map: RefCell::new(BlockHashMap::new()), + blues_anticone_sizes_map: RefCell::new(BlockHashMap::new()), + } + } +} + +impl Default for MemoryGhostdagStore { + fn default() -> Self { + Self::new() + } +} + +impl GhostdagStore for MemoryGhostdagStore { + fn insert(&self, hash: Hash, data: Arc) -> Result<(), StoreError> { + if self.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.blue_score_map + .borrow_mut() + .insert(hash, data.blue_score); + self.blue_work_map.borrow_mut().insert(hash, data.blue_work); + self.selected_parent_map + .borrow_mut() + .insert(hash, data.selected_parent); + self.mergeset_blues_map + .borrow_mut() + .insert(hash, data.mergeset_blues.clone()); + self.mergeset_reds_map + .borrow_mut() + .insert(hash, data.mergeset_reds.clone()); + self.blues_anticone_sizes_map + .borrow_mut() + .insert(hash, data.blues_anticone_sizes.clone()); + Ok(()) + } +} + +impl GhostdagStoreReader for MemoryGhostdagStore { + fn get_blue_score(&self, hash: Hash) -> Result { + match self.blue_score_map.borrow().get(&hash) { + Some(blue_score) => Ok(*blue_score), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_blue_work(&self, hash: Hash) -> Result { + match self.blue_work_map.borrow().get(&hash) { + Some(blue_work) => Ok(*blue_work), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_selected_parent(&self, hash: Hash) -> Result { + match self.selected_parent_map.borrow().get(&hash) { + Some(selected_parent) => Ok(*selected_parent), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_mergeset_blues(&self, hash: Hash) -> Result { + match self.mergeset_blues_map.borrow().get(&hash) { + Some(mergeset_blues) => Ok(BlockHashes::clone(mergeset_blues)), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_mergeset_reds(&self, hash: Hash) -> Result { + match self.mergeset_reds_map.borrow().get(&hash) { + Some(mergeset_reds) => Ok(BlockHashes::clone(mergeset_reds)), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_blues_anticone_sizes(&self, hash: Hash) -> Result { + match self.blues_anticone_sizes_map.borrow().get(&hash) { + Some(sizes) => Ok(HashKTypeMap::clone(sizes)), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_data(&self, hash: Hash) -> Result, StoreError> { + if !self.has(hash)? { + return Err(StoreError::KeyNotFound(hash.to_string())); + } + Ok(Arc::new(GhostdagData::new( + self.blue_score_map.borrow()[&hash], + self.blue_work_map.borrow()[&hash], + self.selected_parent_map.borrow()[&hash], + self.mergeset_blues_map.borrow()[&hash].clone(), + self.mergeset_reds_map.borrow()[&hash].clone(), + self.blues_anticone_sizes_map.borrow()[&hash].clone(), + ))) + } + + fn get_compact_data(&self, hash: Hash) -> Result { + Ok(self.get_data(hash)?.to_compact()) + } + + fn has(&self, hash: Hash) -> Result { + Ok(self.blue_score_map.borrow().contains_key(&hash)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use starcoin_types::blockhash::BlockHashSet; + use std::iter::once; + + #[test] + fn test_mergeset_iterators() { + let store = MemoryGhostdagStore::new(); + + let factory = |w: u64| { + Arc::new(GhostdagData { + blue_score: Default::default(), + blue_work: w.into(), + selected_parent: Default::default(), + mergeset_blues: Default::default(), + mergeset_reds: Default::default(), + blues_anticone_sizes: Default::default(), + }) + }; + + // Blues + store.insert(1.into(), factory(2)).unwrap(); + store.insert(2.into(), factory(7)).unwrap(); + store.insert(3.into(), factory(11)).unwrap(); + + // Reds + store.insert(4.into(), factory(4)).unwrap(); + store.insert(5.into(), factory(9)).unwrap(); + store.insert(6.into(), factory(11)).unwrap(); // Tie-breaking case + + let mut data = GhostdagData::new_with_selected_parent(1.into(), 5); + data.add_blue(2.into(), Default::default(), &Default::default()); + data.add_blue(3.into(), Default::default(), &Default::default()); + + data.add_red(4.into()); + data.add_red(5.into()); + data.add_red(6.into()); + + let wrapper: GhostDagDataWrapper = data.clone().into(); + + let mut expected: Vec = vec![4.into(), 2.into(), 5.into(), 3.into(), 6.into()]; + assert_eq!( + expected, + wrapper + .ascending_mergeset_without_selected_parent(&store) + .filter_map(|b| b.map(|b| b.hash).ok()) + .collect::>() + ); + + itertools::assert_equal( + once(1.into()).chain(expected.iter().cloned()), + wrapper + .consensus_ordered_mergeset(&store) + .filter_map(|b| b.ok()), + ); + + expected.reverse(); + assert_eq!( + expected, + wrapper + .descending_mergeset_without_selected_parent(&store) + .filter_map(|b| b.map(|b| b.hash).ok()) + .collect::>() + ); + + // Use sets since the below functions have no order guarantee + let expected = BlockHashSet::from_iter([4.into(), 2.into(), 5.into(), 3.into(), 6.into()]); + assert_eq!( + expected, + data.unordered_mergeset_without_selected_parent() + .collect::() + ); + + let expected = + BlockHashSet::from_iter([1.into(), 4.into(), 2.into(), 5.into(), 3.into(), 6.into()]); + assert_eq!( + expected, + data.unordered_mergeset().collect::() + ); + } +} diff --git a/consensus/src/consensusdb/consensus_header.rs b/consensus/src/consensusdb/consensus_header.rs new file mode 100644 index 0000000000..6a9c06fbdc --- /dev/null +++ b/consensus/src/consensusdb/consensus_header.rs @@ -0,0 +1,216 @@ +use super::schema::{KeyCodec, ValueCodec}; +use super::{ + db::DBStorage, + error::{StoreError, StoreResult}, + prelude::CachedDbAccess, + writer::{BatchDbWriter, DirectDbWriter}, +}; +use crate::define_schema; +use rocksdb::WriteBatch; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::{ + blockhash::BlockLevel, + header::{CompactHeaderData, ConsensusHeader, DagHeader, HeaderWithBlockLevel}, + U256, +}; +use std::sync::Arc; + +pub trait HeaderStoreReader { + fn get_daa_score(&self, hash: Hash) -> Result; + fn get_blue_score(&self, hash: Hash) -> Result; + fn get_timestamp(&self, hash: Hash) -> Result; + fn get_difficulty(&self, hash: Hash) -> Result; + fn get_header(&self, hash: Hash) -> Result, StoreError>; + fn get_header_with_block_level(&self, hash: Hash) -> Result; + fn get_compact_header_data(&self, hash: Hash) -> Result; +} + +pub trait HeaderStore: HeaderStoreReader { + // This is append only + fn insert( + &self, + hash: Hash, + header: Arc, + block_level: BlockLevel, + ) -> Result<(), StoreError>; +} + +pub(crate) const HEADERS_STORE_CF: &str = "headers-store"; +pub(crate) const COMPACT_HEADER_DATA_STORE_CF: &str = "compact-header-data"; + +define_schema!(BlockHeader, Hash, HeaderWithBlockLevel, HEADERS_STORE_CF); +define_schema!( + CompactBlockHeader, + Hash, + CompactHeaderData, + COMPACT_HEADER_DATA_STORE_CF +); + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for HeaderWithBlockLevel { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for CompactHeaderData { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +/// A DB + cache implementation of `HeaderStore` trait, with concurrency support. +#[derive(Clone)] +pub struct DbHeadersStore { + db: Arc, + headers_access: CachedDbAccess, + compact_headers_access: CachedDbAccess, +} + +impl DbHeadersStore { + pub fn new(db: Arc, cache_size: u64) -> Self { + Self { + db: Arc::clone(&db), + headers_access: CachedDbAccess::new(db.clone(), cache_size), + compact_headers_access: CachedDbAccess::new(db, cache_size), + } + } + + pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { + Self::new(Arc::clone(&self.db), cache_size) + } + + pub fn has(&self, hash: Hash) -> StoreResult { + self.headers_access.has(hash) + } + + pub fn get_header(&self, hash: Hash) -> Result { + let result = self.headers_access.read(hash)?; + Ok((*result.header).clone()) + } + + pub fn insert_batch( + &self, + batch: &mut WriteBatch, + hash: Hash, + header: Arc, + block_level: BlockLevel, + ) -> Result<(), StoreError> { + if self.headers_access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.headers_access.write( + BatchDbWriter::new(batch), + hash, + HeaderWithBlockLevel { + header: header.clone(), + block_level, + }, + )?; + self.compact_headers_access.write( + BatchDbWriter::new(batch), + hash, + CompactHeaderData { + timestamp: header.timestamp(), + difficulty: header.difficulty(), + }, + )?; + Ok(()) + } +} + +impl HeaderStoreReader for DbHeadersStore { + fn get_daa_score(&self, _hash: Hash) -> Result { + unimplemented!() + } + + fn get_blue_score(&self, _hash: Hash) -> Result { + unimplemented!() + } + + fn get_timestamp(&self, hash: Hash) -> Result { + if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash) { + return Ok(header_with_block_level.header.timestamp()); + } + Ok(self.compact_headers_access.read(hash)?.timestamp) + } + + fn get_difficulty(&self, hash: Hash) -> Result { + if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash) { + return Ok(header_with_block_level.header.difficulty()); + } + Ok(self.compact_headers_access.read(hash)?.difficulty) + } + + fn get_header(&self, hash: Hash) -> Result, StoreError> { + Ok(self.headers_access.read(hash)?.header) + } + + fn get_header_with_block_level(&self, hash: Hash) -> Result { + self.headers_access.read(hash) + } + + fn get_compact_header_data(&self, hash: Hash) -> Result { + if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash) { + return Ok(CompactHeaderData { + timestamp: header_with_block_level.header.timestamp(), + difficulty: header_with_block_level.header.difficulty(), + }); + } + self.compact_headers_access.read(hash) + } +} + +impl HeaderStore for DbHeadersStore { + fn insert( + &self, + hash: Hash, + header: Arc, + block_level: u8, + ) -> Result<(), StoreError> { + if self.headers_access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + self.compact_headers_access.write( + DirectDbWriter::new(&self.db), + hash, + CompactHeaderData { + timestamp: header.timestamp(), + difficulty: header.difficulty(), + }, + )?; + self.headers_access.write( + DirectDbWriter::new(&self.db), + hash, + HeaderWithBlockLevel { + header, + block_level, + }, + )?; + Ok(()) + } +} diff --git a/consensus/src/consensusdb/consensus_reachability.rs b/consensus/src/consensusdb/consensus_reachability.rs new file mode 100644 index 0000000000..308ffb88a8 --- /dev/null +++ b/consensus/src/consensusdb/consensus_reachability.rs @@ -0,0 +1,540 @@ +use super::{ + db::DBStorage, + prelude::{BatchDbWriter, CachedDbAccess, CachedDbItem, DirectDbWriter, StoreError}, +}; +use starcoin_crypto::HashValue as Hash; +use starcoin_storage::storage::RawDBStorage; + +use crate::{ + dag::types::{interval::Interval, reachability::ReachabilityData}, + define_schema, + schema::{KeyCodec, ValueCodec}, +}; +use starcoin_types::blockhash::{self, BlockHashMap, BlockHashes}; + +use parking_lot::{RwLockUpgradableReadGuard, RwLockWriteGuard}; +use rocksdb::WriteBatch; +use std::{collections::hash_map::Entry::Vacant, sync::Arc}; + +/// Reader API for `ReachabilityStore`. +pub trait ReachabilityStoreReader { + fn has(&self, hash: Hash) -> Result; + fn get_interval(&self, hash: Hash) -> Result; + fn get_parent(&self, hash: Hash) -> Result; + fn get_children(&self, hash: Hash) -> Result; + fn get_future_covering_set(&self, hash: Hash) -> Result; +} + +/// Write API for `ReachabilityStore`. All write functions are deliberately `mut` +/// since reachability writes are not append-only and thus need to be guarded. +pub trait ReachabilityStore: ReachabilityStoreReader { + fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError>; + fn insert( + &mut self, + hash: Hash, + parent: Hash, + interval: Interval, + height: u64, + ) -> Result<(), StoreError>; + fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError>; + fn append_child(&mut self, hash: Hash, child: Hash) -> Result; + fn insert_future_covering_item( + &mut self, + hash: Hash, + fci: Hash, + insertion_index: usize, + ) -> Result<(), StoreError>; + fn get_height(&self, hash: Hash) -> Result; + fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError>; + fn get_reindex_root(&self) -> Result; +} + +const REINDEX_ROOT_KEY: &str = "reachability-reindex-root"; +pub(crate) const REACHABILITY_DATA_CF: &str = "reachability-data"; +// TODO: explore perf to see if using fixed-length constants for store prefixes is preferable + +define_schema!( + Reachability, + Hash, + Arc, + REACHABILITY_DATA_CF +); +define_schema!(ReachabilityCache, Vec, Hash, REACHABILITY_DATA_CF); + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for Arc { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl KeyCodec for Vec { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Ok(data.to_vec()) + } +} +impl ValueCodec for Hash { + fn encode_value(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_value(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +/// A DB + cache implementation of `ReachabilityStore` trait, with concurrent readers support. +#[derive(Clone)] +pub struct DbReachabilityStore { + db: Arc, + access: CachedDbAccess, + reindex_root: CachedDbItem, +} + +impl DbReachabilityStore { + pub fn new(db: Arc, cache_size: u64) -> Self { + Self::new_with_prefix_end(db, cache_size) + } + + pub fn new_with_alternative_prefix_end(db: Arc, cache_size: u64) -> Self { + Self::new_with_prefix_end(db, cache_size) + } + + fn new_with_prefix_end(db: Arc, cache_size: u64) -> Self { + Self { + db: Arc::clone(&db), + access: CachedDbAccess::new(Arc::clone(&db), cache_size), + reindex_root: CachedDbItem::new(db, REINDEX_ROOT_KEY.as_bytes().to_vec()), + } + } + + pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { + Self::new_with_prefix_end(Arc::clone(&self.db), cache_size) + } +} + +impl ReachabilityStore for DbReachabilityStore { + fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError> { + debug_assert!(!self.access.has(origin)?); + + let data = Arc::new(ReachabilityData::new( + Hash::new(blockhash::NONE), + capacity, + 0, + )); + let mut batch = WriteBatch::default(); + self.access + .write(BatchDbWriter::new(&mut batch), origin, data)?; + self.reindex_root + .write(BatchDbWriter::new(&mut batch), &origin)?; + self.db + .raw_write_batch(batch) + .map_err(|e| StoreError::DBIoError(e.to_string()))?; + + Ok(()) + } + + fn insert( + &mut self, + hash: Hash, + parent: Hash, + interval: Interval, + height: u64, + ) -> Result<(), StoreError> { + if self.access.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + let data = Arc::new(ReachabilityData::new(parent, interval, height)); + self.access + .write(DirectDbWriter::new(&self.db), hash, data)?; + Ok(()) + } + + fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError> { + let mut data = self.access.read(hash)?; + Arc::make_mut(&mut data).interval = interval; + self.access + .write(DirectDbWriter::new(&self.db), hash, data)?; + Ok(()) + } + + fn append_child(&mut self, hash: Hash, child: Hash) -> Result { + let mut data = self.access.read(hash)?; + let height = data.height; + let mut_data = Arc::make_mut(&mut data); + Arc::make_mut(&mut mut_data.children).push(child); + self.access + .write(DirectDbWriter::new(&self.db), hash, data)?; + Ok(height) + } + + fn insert_future_covering_item( + &mut self, + hash: Hash, + fci: Hash, + insertion_index: usize, + ) -> Result<(), StoreError> { + let mut data = self.access.read(hash)?; + let mut_data = Arc::make_mut(&mut data); + Arc::make_mut(&mut mut_data.future_covering_set).insert(insertion_index, fci); + self.access + .write(DirectDbWriter::new(&self.db), hash, data)?; + Ok(()) + } + + fn get_height(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.height) + } + + fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError> { + self.reindex_root + .write(DirectDbWriter::new(&self.db), &root) + } + + fn get_reindex_root(&self) -> Result { + self.reindex_root.read() + } +} + +impl ReachabilityStoreReader for DbReachabilityStore { + fn has(&self, hash: Hash) -> Result { + self.access.has(hash) + } + + fn get_interval(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.interval) + } + + fn get_parent(&self, hash: Hash) -> Result { + Ok(self.access.read(hash)?.parent) + } + + fn get_children(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.access.read(hash)?.children)) + } + + fn get_future_covering_set(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.access.read(hash)?.future_covering_set)) + } +} + +pub struct StagingReachabilityStore<'a> { + store_read: RwLockUpgradableReadGuard<'a, DbReachabilityStore>, + staging_writes: BlockHashMap, + staging_reindex_root: Option, +} + +impl<'a> StagingReachabilityStore<'a> { + pub fn new(store_read: RwLockUpgradableReadGuard<'a, DbReachabilityStore>) -> Self { + Self { + store_read, + staging_writes: BlockHashMap::new(), + staging_reindex_root: None, + } + } + + pub fn commit( + self, + batch: &mut WriteBatch, + ) -> Result, StoreError> { + let mut store_write = RwLockUpgradableReadGuard::upgrade(self.store_read); + for (k, v) in self.staging_writes { + let data = Arc::new(v); + store_write + .access + .write(BatchDbWriter::new(batch), k, data)? + } + if let Some(root) = self.staging_reindex_root { + store_write + .reindex_root + .write(BatchDbWriter::new(batch), &root)?; + } + Ok(store_write) + } +} + +impl ReachabilityStore for StagingReachabilityStore<'_> { + fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError> { + self.insert(origin, Hash::new(blockhash::NONE), capacity, 0)?; + self.set_reindex_root(origin)?; + Ok(()) + } + + fn insert( + &mut self, + hash: Hash, + parent: Hash, + interval: Interval, + height: u64, + ) -> Result<(), StoreError> { + if self.store_read.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + if let Vacant(e) = self.staging_writes.entry(hash) { + e.insert(ReachabilityData::new(parent, interval, height)); + Ok(()) + } else { + Err(StoreError::KeyAlreadyExists(hash.to_string())) + } + } + + fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError> { + if let Some(data) = self.staging_writes.get_mut(&hash) { + data.interval = interval; + return Ok(()); + } + + let mut data = (*self.store_read.access.read(hash)?).clone(); + data.interval = interval; + self.staging_writes.insert(hash, data); + + Ok(()) + } + + fn append_child(&mut self, hash: Hash, child: Hash) -> Result { + if let Some(data) = self.staging_writes.get_mut(&hash) { + Arc::make_mut(&mut data.children).push(child); + return Ok(data.height); + } + + let mut data = (*self.store_read.access.read(hash)?).clone(); + let height = data.height; + Arc::make_mut(&mut data.children).push(child); + self.staging_writes.insert(hash, data); + + Ok(height) + } + + fn insert_future_covering_item( + &mut self, + hash: Hash, + fci: Hash, + insertion_index: usize, + ) -> Result<(), StoreError> { + if let Some(data) = self.staging_writes.get_mut(&hash) { + Arc::make_mut(&mut data.future_covering_set).insert(insertion_index, fci); + return Ok(()); + } + + let mut data = (*self.store_read.access.read(hash)?).clone(); + Arc::make_mut(&mut data.future_covering_set).insert(insertion_index, fci); + self.staging_writes.insert(hash, data); + + Ok(()) + } + + fn get_height(&self, hash: Hash) -> Result { + if let Some(data) = self.staging_writes.get(&hash) { + Ok(data.height) + } else { + Ok(self.store_read.access.read(hash)?.height) + } + } + + fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError> { + self.staging_reindex_root = Some(root); + Ok(()) + } + + fn get_reindex_root(&self) -> Result { + if let Some(root) = self.staging_reindex_root { + Ok(root) + } else { + Ok(self.store_read.get_reindex_root()?) + } + } +} + +impl ReachabilityStoreReader for StagingReachabilityStore<'_> { + fn has(&self, hash: Hash) -> Result { + Ok(self.staging_writes.contains_key(&hash) || self.store_read.access.has(hash)?) + } + + fn get_interval(&self, hash: Hash) -> Result { + if let Some(data) = self.staging_writes.get(&hash) { + Ok(data.interval) + } else { + Ok(self.store_read.access.read(hash)?.interval) + } + } + + fn get_parent(&self, hash: Hash) -> Result { + if let Some(data) = self.staging_writes.get(&hash) { + Ok(data.parent) + } else { + Ok(self.store_read.access.read(hash)?.parent) + } + } + + fn get_children(&self, hash: Hash) -> Result { + if let Some(data) = self.staging_writes.get(&hash) { + Ok(BlockHashes::clone(&data.children)) + } else { + Ok(BlockHashes::clone( + &self.store_read.access.read(hash)?.children, + )) + } + } + + fn get_future_covering_set(&self, hash: Hash) -> Result { + if let Some(data) = self.staging_writes.get(&hash) { + Ok(BlockHashes::clone(&data.future_covering_set)) + } else { + Ok(BlockHashes::clone( + &self.store_read.access.read(hash)?.future_covering_set, + )) + } + } +} + +pub struct MemoryReachabilityStore { + map: BlockHashMap, + reindex_root: Option, +} + +impl Default for MemoryReachabilityStore { + fn default() -> Self { + Self::new() + } +} + +impl MemoryReachabilityStore { + pub fn new() -> Self { + Self { + map: BlockHashMap::new(), + reindex_root: None, + } + } + + fn get_data_mut(&mut self, hash: Hash) -> Result<&mut ReachabilityData, StoreError> { + match self.map.get_mut(&hash) { + Some(data) => Ok(data), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_data(&self, hash: Hash) -> Result<&ReachabilityData, StoreError> { + match self.map.get(&hash) { + Some(data) => Ok(data), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } +} + +impl ReachabilityStore for MemoryReachabilityStore { + fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError> { + self.insert(origin, Hash::new(blockhash::NONE), capacity, 0)?; + self.set_reindex_root(origin)?; + Ok(()) + } + + fn insert( + &mut self, + hash: Hash, + parent: Hash, + interval: Interval, + height: u64, + ) -> Result<(), StoreError> { + if let Vacant(e) = self.map.entry(hash) { + e.insert(ReachabilityData::new(parent, interval, height)); + Ok(()) + } else { + Err(StoreError::KeyAlreadyExists(hash.to_string())) + } + } + + fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError> { + let data = self.get_data_mut(hash)?; + data.interval = interval; + Ok(()) + } + + fn append_child(&mut self, hash: Hash, child: Hash) -> Result { + let data = self.get_data_mut(hash)?; + Arc::make_mut(&mut data.children).push(child); + Ok(data.height) + } + + fn insert_future_covering_item( + &mut self, + hash: Hash, + fci: Hash, + insertion_index: usize, + ) -> Result<(), StoreError> { + let data = self.get_data_mut(hash)?; + Arc::make_mut(&mut data.future_covering_set).insert(insertion_index, fci); + Ok(()) + } + + fn get_height(&self, hash: Hash) -> Result { + Ok(self.get_data(hash)?.height) + } + + fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError> { + self.reindex_root = Some(root); + Ok(()) + } + + fn get_reindex_root(&self) -> Result { + match self.reindex_root { + Some(root) => Ok(root), + None => Err(StoreError::KeyNotFound(REINDEX_ROOT_KEY.to_string())), + } + } +} + +impl ReachabilityStoreReader for MemoryReachabilityStore { + fn has(&self, hash: Hash) -> Result { + Ok(self.map.contains_key(&hash)) + } + + fn get_interval(&self, hash: Hash) -> Result { + Ok(self.get_data(hash)?.interval) + } + + fn get_parent(&self, hash: Hash) -> Result { + Ok(self.get_data(hash)?.parent) + } + + fn get_children(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.get_data(hash)?.children)) + } + + fn get_future_covering_set(&self, hash: Hash) -> Result { + Ok(Arc::clone(&self.get_data(hash)?.future_covering_set)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_store_basics() { + let mut store: Box = Box::new(MemoryReachabilityStore::new()); + let (hash, parent) = (7.into(), 15.into()); + let interval = Interval::maximal(); + store.insert(hash, parent, interval, 5).unwrap(); + let height = store.append_child(hash, 31.into()).unwrap(); + assert_eq!(height, 5); + let children = store.get_children(hash).unwrap(); + println!("{children:?}"); + store.get_interval(7.into()).unwrap(); + println!("{children:?}"); + } +} diff --git a/consensus/src/consensusdb/consensus_relations.rs b/consensus/src/consensusdb/consensus_relations.rs new file mode 100644 index 0000000000..a34c1c049c --- /dev/null +++ b/consensus/src/consensusdb/consensus_relations.rs @@ -0,0 +1,316 @@ +use super::schema::{KeyCodec, ValueCodec}; +use super::{ + db::DBStorage, + prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter, StoreError}, +}; +use crate::define_schema; +use rocksdb::WriteBatch; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::{BlockHashMap, BlockHashes, BlockLevel}; +use std::{collections::hash_map::Entry::Vacant, sync::Arc}; + +/// Reader API for `RelationsStore`. +pub trait RelationsStoreReader { + fn get_parents(&self, hash: Hash) -> Result; + fn get_children(&self, hash: Hash) -> Result; + fn has(&self, hash: Hash) -> Result; +} + +/// Write API for `RelationsStore`. The insert function is deliberately `mut` +/// since it modifies the children arrays for previously added parents which is +/// non-append-only and thus needs to be guarded. +pub trait RelationsStore: RelationsStoreReader { + /// Inserts `parents` into a new store entry for `hash`, and for each `parent ∈ parents` adds `hash` to `parent.children` + fn insert(&mut self, hash: Hash, parents: BlockHashes) -> Result<(), StoreError>; +} + +pub(crate) const PARENTS_CF: &str = "block-parents"; +pub(crate) const CHILDREN_CF: &str = "block-children"; + +define_schema!(RelationParent, Hash, Arc>, PARENTS_CF); +define_schema!(RelationChildren, Hash, Arc>, CHILDREN_CF); + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl ValueCodec for Arc> { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +impl ValueCodec for Arc> { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +/// A DB + cache implementation of `RelationsStore` trait, with concurrent readers support. +#[derive(Clone)] +pub struct DbRelationsStore { + db: Arc, + level: BlockLevel, + parents_access: CachedDbAccess, + children_access: CachedDbAccess, +} + +impl DbRelationsStore { + pub fn new(db: Arc, level: BlockLevel, cache_size: u64) -> Self { + Self { + db: Arc::clone(&db), + level, + parents_access: CachedDbAccess::new(Arc::clone(&db), cache_size), + children_access: CachedDbAccess::new(db, cache_size), + } + } + + pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { + Self::new(Arc::clone(&self.db), self.level, cache_size) + } + + pub fn insert_batch( + &mut self, + batch: &mut WriteBatch, + hash: Hash, + parents: BlockHashes, + ) -> Result<(), StoreError> { + if self.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + + // Insert a new entry for `hash` + self.parents_access + .write(BatchDbWriter::new(batch), hash, parents.clone())?; + + // The new hash has no children yet + self.children_access.write( + BatchDbWriter::new(batch), + hash, + BlockHashes::new(Vec::new()), + )?; + + // Update `children` for each parent + for parent in parents.iter().cloned() { + let mut children = (*self.get_children(parent)?).clone(); + children.push(hash); + self.children_access.write( + BatchDbWriter::new(batch), + parent, + BlockHashes::new(children), + )?; + } + + Ok(()) + } +} + +impl RelationsStoreReader for DbRelationsStore { + fn get_parents(&self, hash: Hash) -> Result { + self.parents_access.read(hash) + } + + fn get_children(&self, hash: Hash) -> Result { + self.children_access.read(hash) + } + + fn has(&self, hash: Hash) -> Result { + if self.parents_access.has(hash)? { + debug_assert!(self.children_access.has(hash)?); + Ok(true) + } else { + Ok(false) + } + } +} + +impl RelationsStore for DbRelationsStore { + /// See `insert_batch` as well + /// TODO: use one function with DbWriter for both this function and insert_batch + fn insert(&mut self, hash: Hash, parents: BlockHashes) -> Result<(), StoreError> { + if self.has(hash)? { + return Err(StoreError::KeyAlreadyExists(hash.to_string())); + } + + // Insert a new entry for `hash` + self.parents_access + .write(DirectDbWriter::new(&self.db), hash, parents.clone())?; + + // The new hash has no children yet + self.children_access.write( + DirectDbWriter::new(&self.db), + hash, + BlockHashes::new(Vec::new()), + )?; + + // Update `children` for each parent + for parent in parents.iter().cloned() { + let mut children = (*self.get_children(parent)?).clone(); + children.push(hash); + self.children_access.write( + DirectDbWriter::new(&self.db), + parent, + BlockHashes::new(children), + )?; + } + + Ok(()) + } +} + +pub struct MemoryRelationsStore { + parents_map: BlockHashMap, + children_map: BlockHashMap, +} + +impl MemoryRelationsStore { + pub fn new() -> Self { + Self { + parents_map: BlockHashMap::new(), + children_map: BlockHashMap::new(), + } + } +} + +impl Default for MemoryRelationsStore { + fn default() -> Self { + Self::new() + } +} + +impl RelationsStoreReader for MemoryRelationsStore { + fn get_parents(&self, hash: Hash) -> Result { + match self.parents_map.get(&hash) { + Some(parents) => Ok(BlockHashes::clone(parents)), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn get_children(&self, hash: Hash) -> Result { + match self.children_map.get(&hash) { + Some(children) => Ok(BlockHashes::clone(children)), + None => Err(StoreError::KeyNotFound(hash.to_string())), + } + } + + fn has(&self, hash: Hash) -> Result { + Ok(self.parents_map.contains_key(&hash)) + } +} + +impl RelationsStore for MemoryRelationsStore { + fn insert(&mut self, hash: Hash, parents: BlockHashes) -> Result<(), StoreError> { + if let Vacant(e) = self.parents_map.entry(hash) { + // Update the new entry for `hash` + e.insert(BlockHashes::clone(&parents)); + + // Update `children` for each parent + for parent in parents.iter().cloned() { + let mut children = (*self.get_children(parent)?).clone(); + children.push(hash); + self.children_map.insert(parent, BlockHashes::new(children)); + } + + // The new hash has no children yet + self.children_map.insert(hash, BlockHashes::new(Vec::new())); + Ok(()) + } else { + Err(StoreError::KeyAlreadyExists(hash.to_string())) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::consensusdb::{ + db::RelationsStoreConfig, + prelude::{FlexiDagStorage, FlexiDagStorageConfig}, + }; + + #[test] + fn test_memory_relations_store() { + test_relations_store(MemoryRelationsStore::new()); + } + + #[test] + fn test_db_relations_store() { + let db_tempdir = tempfile::tempdir().unwrap(); + let rs_conf = RelationsStoreConfig { + block_level: 0, + cache_size: 2, + }; + let config = FlexiDagStorageConfig::new() + .update_parallelism(1) + .update_relations_conf(rs_conf); + + let db = FlexiDagStorage::create_from_path(db_tempdir.path(), config) + .expect("failed to create flexidag storage"); + test_relations_store(db.relations_store); + } + + fn test_relations_store(mut store: T) { + let parents = [ + (1, vec![]), + (2, vec![1]), + (3, vec![1]), + (4, vec![2, 3]), + (5, vec![1, 4]), + ]; + for (i, vec) in parents.iter().cloned() { + store + .insert( + i.into(), + BlockHashes::new(vec.iter().copied().map(Hash::from).collect()), + ) + .unwrap(); + } + + let expected_children = [ + (1, vec![2, 3, 5]), + (2, vec![4]), + (3, vec![4]), + (4, vec![5]), + (5, vec![]), + ]; + for (i, vec) in expected_children { + assert!(store + .get_children(i.into()) + .unwrap() + .iter() + .copied() + .eq(vec.iter().copied().map(Hash::from))); + } + + for (i, vec) in parents { + assert!(store + .get_parents(i.into()) + .unwrap() + .iter() + .copied() + .eq(vec.iter().copied().map(Hash::from))); + } + } +} diff --git a/consensus/src/consensusdb/db.rs b/consensus/src/consensusdb/db.rs new file mode 100644 index 0000000000..331df80277 --- /dev/null +++ b/consensus/src/consensusdb/db.rs @@ -0,0 +1,149 @@ +use super::{ + error::StoreError, + schemadb::{ + DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, CHILDREN_CF, + COMPACT_GHOST_DAG_STORE_CF, COMPACT_HEADER_DATA_STORE_CF, GHOST_DAG_STORE_CF, + HEADERS_STORE_CF, PARENTS_CF, REACHABILITY_DATA_CF, + }, +}; +use starcoin_config::RocksdbConfig; +pub(crate) use starcoin_storage::db_storage::DBStorage; +use std::{path::Path, sync::Arc}; + +#[derive(Clone)] +pub struct FlexiDagStorage { + pub ghost_dag_store: DbGhostdagStore, + pub header_store: DbHeadersStore, + pub reachability_store: DbReachabilityStore, + pub relations_store: DbRelationsStore, +} + +#[derive(Clone, Default)] +pub struct GhostDagStoreConfig { + pub block_level: u8, + pub cache_size: u64, +} + +#[derive(Clone, Default)] +pub struct HeaderStoreConfig { + pub cache_size: u64, +} + +#[derive(Clone, Default)] +pub struct ReachabilityStoreConfig { + pub cache_size: u64, +} + +#[derive(Clone, Default)] +pub struct RelationsStoreConfig { + pub block_level: u8, + pub cache_size: u64, +} + +#[derive(Clone, Default)] +pub struct FlexiDagStorageConfig { + pub parallelism: u64, + pub gds_conf: GhostDagStoreConfig, + pub hs_conf: HeaderStoreConfig, + pub rbs_conf: ReachabilityStoreConfig, + pub rs_conf: RelationsStoreConfig, +} + +impl FlexiDagStorageConfig { + pub fn new() -> Self { + FlexiDagStorageConfig::default() + } + + pub fn create_with_params(parallelism: u64, block_level: u8, cache_size: u64) -> Self { + Self { + parallelism, + gds_conf: GhostDagStoreConfig { + block_level, + cache_size, + }, + hs_conf: HeaderStoreConfig { cache_size }, + rbs_conf: ReachabilityStoreConfig { cache_size }, + rs_conf: RelationsStoreConfig { + block_level, + cache_size, + }, + } + } + + pub fn update_parallelism(mut self, parallelism: u64) -> Self { + self.parallelism = parallelism; + self + } + + pub fn update_ghost_dag_conf(mut self, gds_conf: GhostDagStoreConfig) -> Self { + self.gds_conf = gds_conf; + self + } + + pub fn update_headers_conf(mut self, hs_conf: HeaderStoreConfig) -> Self { + self.hs_conf = hs_conf; + self + } + + pub fn update_reachability_conf(mut self, rbs_conf: ReachabilityStoreConfig) -> Self { + self.rbs_conf = rbs_conf; + self + } + + pub fn update_relations_conf(mut self, rs_conf: RelationsStoreConfig) -> Self { + self.rs_conf = rs_conf; + self + } +} + +impl FlexiDagStorage { + /// Creates or loads an existing storage from the provided directory path. + pub fn create_from_path>( + db_path: P, + config: FlexiDagStorageConfig, + ) -> Result { + let rocksdb_config = RocksdbConfig { + parallelism: config.parallelism, + ..Default::default() + }; + + let db = Arc::new( + DBStorage::open_with_cfs( + db_path, + vec![ + // consensus headers + HEADERS_STORE_CF, + COMPACT_HEADER_DATA_STORE_CF, + // consensus relations + PARENTS_CF, + CHILDREN_CF, + // consensus reachability + REACHABILITY_DATA_CF, + // consensus ghostdag + GHOST_DAG_STORE_CF, + COMPACT_GHOST_DAG_STORE_CF, + ], + false, + rocksdb_config, + None, + ) + .map_err(|e| StoreError::DBIoError(e.to_string()))?, + ); + + Ok(Self { + ghost_dag_store: DbGhostdagStore::new( + db.clone(), + config.gds_conf.block_level, + config.gds_conf.cache_size, + ), + + header_store: DbHeadersStore::new(db.clone(), config.hs_conf.cache_size), + reachability_store: DbReachabilityStore::new(db.clone(), config.rbs_conf.cache_size), + relations_store: DbRelationsStore::new( + db, + config.rs_conf.block_level, + config.rs_conf.cache_size, + ), + }) + } +} diff --git a/consensus/src/consensusdb/error.rs b/consensus/src/consensusdb/error.rs new file mode 100644 index 0000000000..ff2c199c93 --- /dev/null +++ b/consensus/src/consensusdb/error.rs @@ -0,0 +1,58 @@ +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum StoreError { + #[error("key {0} not found in store")] + KeyNotFound(String), + + #[error("key {0} already exists in store")] + KeyAlreadyExists(String), + + #[error("column family {0} not exist in db")] + CFNotExist(String), + + #[error("IO error {0}")] + DBIoError(String), + + #[error("rocksdb error {0}")] + DbError(#[from] rocksdb::Error), + + #[error("encode error {0}")] + EncodeError(String), + + #[error("decode error {0}")] + DecodeError(String), + + #[error("ghostdag {0} duplicate blocks")] + DAGDupBlocksError(String), +} + +pub type StoreResult = std::result::Result; + +pub trait StoreResultExtensions { + fn unwrap_option(self) -> Option; +} + +impl StoreResultExtensions for StoreResult { + fn unwrap_option(self) -> Option { + match self { + Ok(value) => Some(value), + Err(StoreError::KeyNotFound(_)) => None, + Err(err) => panic!("Unexpected store error: {err:?}"), + } + } +} + +pub trait StoreResultEmptyTuple { + fn unwrap_and_ignore_key_already_exists(self); +} + +impl StoreResultEmptyTuple for StoreResult<()> { + fn unwrap_and_ignore_key_already_exists(self) { + match self { + Ok(_) => (), + Err(StoreError::KeyAlreadyExists(_)) => (), + Err(err) => panic!("Unexpected store error: {err:?}"), + } + } +} diff --git a/consensus/src/consensusdb/item.rs b/consensus/src/consensusdb/item.rs new file mode 100644 index 0000000000..0d27b9c347 --- /dev/null +++ b/consensus/src/consensusdb/item.rs @@ -0,0 +1,81 @@ +use super::prelude::DbWriter; +use super::schema::{KeyCodec, Schema, ValueCodec}; +use super::{db::DBStorage, error::StoreError}; +use parking_lot::RwLock; +use starcoin_storage::storage::RawDBStorage; +use std::sync::Arc; + +/// A cached DB item with concurrency support +#[derive(Clone)] +pub struct CachedDbItem { + db: Arc, + key: S::Key, + cached_item: Arc>>, +} + +impl CachedDbItem { + pub fn new(db: Arc, key: S::Key) -> Self { + Self { + db, + key, + cached_item: Arc::new(RwLock::new(None)), + } + } + + pub fn read(&self) -> Result { + if let Some(item) = self.cached_item.read().clone() { + return Ok(item); + } + if let Some(slice) = self + .db + .raw_get_pinned_cf(S::COLUMN_FAMILY, &self.key.encode_key()?) + .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? + { + let item = S::Value::decode_value(&slice)?; + *self.cached_item.write() = Some(item.clone()); + Ok(item) + } else { + Err(StoreError::KeyNotFound( + String::from_utf8(self.key.encode_key()?) + .unwrap_or(("unrecoverable key string").to_string()), + )) + } + } + + pub fn write(&mut self, mut writer: impl DbWriter, item: &S::Value) -> Result<(), StoreError> { + *self.cached_item.write() = Some(item.clone()); + writer.put::(&self.key, item)?; + Ok(()) + } + + pub fn remove(&mut self, mut writer: impl DbWriter) -> Result<(), StoreError> +where { + *self.cached_item.write() = None; + writer.delete::(&self.key)?; + Ok(()) + } + + pub fn update(&mut self, mut writer: impl DbWriter, op: F) -> Result + where + F: Fn(S::Value) -> S::Value, + { + let mut guard = self.cached_item.write(); + let mut item = if let Some(item) = guard.take() { + item + } else if let Some(slice) = self + .db + .raw_get_pinned_cf(S::COLUMN_FAMILY, &self.key.encode_key()?) + .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? + { + let item = S::Value::decode_value(&slice)?; + item + } else { + return Err(StoreError::KeyNotFound("".to_string())); + }; + + item = op(item); // Apply the update op + *guard = Some(item.clone()); + writer.put::(&self.key, &item)?; + Ok(item) + } +} diff --git a/consensus/src/consensusdb/mod.rs b/consensus/src/consensusdb/mod.rs new file mode 100644 index 0000000000..5aaa7c6ef2 --- /dev/null +++ b/consensus/src/consensusdb/mod.rs @@ -0,0 +1,31 @@ +mod access; +mod cache; +mod consensus_ghostdag; +mod consensus_header; +mod consensus_reachability; +pub mod consensus_relations; +mod db; +mod error; +mod item; +pub mod schema; +mod writer; + +pub mod prelude { + use super::{db, error}; + + pub use super::{ + access::CachedDbAccess, + cache::DagCache, + item::CachedDbItem, + writer::{BatchDbWriter, DbWriter, DirectDbWriter}, + }; + pub use db::{FlexiDagStorage, FlexiDagStorageConfig}; + pub use error::{StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions}; +} + +pub mod schemadb { + pub use super::{ + consensus_ghostdag::*, consensus_header::*, consensus_reachability::*, + consensus_relations::*, + }; +} diff --git a/consensus/src/consensusdb/schema.rs b/consensus/src/consensusdb/schema.rs new file mode 100644 index 0000000000..ad1bbc072f --- /dev/null +++ b/consensus/src/consensusdb/schema.rs @@ -0,0 +1,40 @@ +use super::error::StoreError; +use core::hash::Hash; +use std::fmt::Debug; +use std::result::Result; + +pub trait KeyCodec: Clone + Sized + Debug + Send + Sync { + /// Converts `self` to bytes to be stored in DB. + fn encode_key(&self) -> Result, StoreError>; + /// Converts bytes fetched from DB to `Self`. + fn decode_key(data: &[u8]) -> Result; +} + +pub trait ValueCodec: Clone + Sized + Debug + Send + Sync { + /// Converts `self` to bytes to be stored in DB. + fn encode_value(&self) -> Result, StoreError>; + /// Converts bytes fetched from DB to `Self`. + fn decode_value(data: &[u8]) -> Result; +} + +pub trait Schema: Debug + Send + Sync + 'static { + const COLUMN_FAMILY: &'static str; + + type Key: KeyCodec + Hash + Eq + Default; + type Value: ValueCodec + Default + Clone; +} + +#[macro_export] +macro_rules! define_schema { + ($schema_type: ident, $key_type: ty, $value_type: ty, $cf_name: expr) => { + #[derive(Clone, Debug)] + pub(crate) struct $schema_type; + + impl $crate::schema::Schema for $schema_type { + type Key = $key_type; + type Value = $value_type; + + const COLUMN_FAMILY: &'static str = $cf_name; + } + }; +} diff --git a/consensus/src/consensusdb/writer.rs b/consensus/src/consensusdb/writer.rs new file mode 100644 index 0000000000..717d7d7e1c --- /dev/null +++ b/consensus/src/consensusdb/writer.rs @@ -0,0 +1,75 @@ +use rocksdb::WriteBatch; +use starcoin_storage::storage::InnerStore; + +use super::schema::{KeyCodec, Schema, ValueCodec}; +use super::{db::DBStorage, error::StoreError}; + +/// Abstraction over direct/batched DB writing +pub trait DbWriter { + fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError>; + fn delete(&mut self, key: &S::Key) -> Result<(), StoreError>; +} + +pub struct DirectDbWriter<'a> { + db: &'a DBStorage, +} + +impl<'a> DirectDbWriter<'a> { + pub fn new(db: &'a DBStorage) -> Self { + Self { db } + } +} + +impl DbWriter for DirectDbWriter<'_> { + fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError> { + let bin_key = key.encode_key()?; + let bin_data = value.encode_value()?; + self.db + .put(S::COLUMN_FAMILY, bin_key, bin_data) + .map_err(|e| StoreError::DBIoError(e.to_string())) + } + + fn delete(&mut self, key: &S::Key) -> Result<(), StoreError> { + let key = key.encode_key()?; + self.db + .remove(S::COLUMN_FAMILY, key) + .map_err(|e| StoreError::DBIoError(e.to_string())) + } +} + +pub struct BatchDbWriter<'a> { + batch: &'a mut WriteBatch, +} + +impl<'a> BatchDbWriter<'a> { + pub fn new(batch: &'a mut WriteBatch) -> Self { + Self { batch } + } +} + +impl DbWriter for BatchDbWriter<'_> { + fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError> { + let key = key.encode_key()?; + let value = value.encode_value()?; + self.batch.put(key, value); + Ok(()) + } + + fn delete(&mut self, key: &S::Key) -> Result<(), StoreError> { + let key = key.encode_key()?; + self.batch.delete(key); + Ok(()) + } +} + +impl DbWriter for &mut T { + #[inline] + fn put(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError> { + (*self).put::(key, value) + } + + #[inline] + fn delete(&mut self, key: &S::Key) -> Result<(), StoreError> { + (*self).delete::(key) + } +} diff --git a/consensus/src/dag/blockdag.rs b/consensus/src/dag/blockdag.rs new file mode 100644 index 0000000000..a50b4459fb --- /dev/null +++ b/consensus/src/dag/blockdag.rs @@ -0,0 +1,374 @@ +use super::ghostdag::protocol::{ColoringOutput, GhostdagManager}; +use super::reachability::{inquirer, reachability_service::MTReachabilityService}; +use super::types::ghostdata::GhostdagData; +use crate::consensusdb::prelude::StoreError; +use crate::consensusdb::schemadb::GhostdagStoreReader; +use crate::consensusdb::{ + prelude::FlexiDagStorage, + schemadb::{ + DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, GhostdagStore, + HeaderStore, ReachabilityStoreReader, RelationsStore, RelationsStoreReader, + }, +}; +use crate::FlexiDagStorageConfig; +use anyhow::{anyhow, bail, Ok}; +use bcs_ext::BCSCodec; +use parking_lot::RwLock; +use starcoin_accumulator::accumulator_info::AccumulatorInfo; +use starcoin_accumulator::node::AccumulatorStoreType; +use starcoin_accumulator::{Accumulator, MerkleAccumulator}; +use starcoin_config::NodeConfig; +use starcoin_crypto::HashValue as Hash; +use starcoin_storage::flexi_dag::{KTotalDifficulty, SyncFlexiDagSnapshot, SyncFlexiDagSnapshotHasher}; +use starcoin_storage::storage::CodecKVStore; +use starcoin_storage::{BlockStore, Storage, Store, SyncFlexiDagStore}; +use starcoin_types::block::BlockNumber; +use starcoin_types::startup_info; +use starcoin_types::{ + blockhash::{BlockHashes, KType, ORIGIN}, + header::{ConsensusHeader, DagHeader}, +}; +use std::collections::HashSet; +use std::collections::{BinaryHeap, HashMap}; +use std::path::Path; +use std::sync::{Arc, Mutex}; + +pub type DbGhostdagManager = GhostdagManager< + DbGhostdagStore, + DbRelationsStore, + MTReachabilityService, + DbHeadersStore, +>; + +#[derive(Clone)] +pub enum InitDagState { + FailedToInitDag, + InitDagSuccess(Arc>), + InitedDag, + NoNeedInitDag, +} + +#[derive(Clone)] +pub struct BlockDAG { + genesis_hash: Hash, + ghostdag_manager: DbGhostdagManager, + relations_store: DbRelationsStore, + reachability_store: DbReachabilityStore, + ghostdag_store: DbGhostdagStore, + header_store: DbHeadersStore, + /// orphan blocks, parent hash -> orphan block + missing_blocks: HashMap>, +} + +impl BlockDAG { + pub fn new(genesis_hash: Hash, k: KType, db: FlexiDagStorage) -> Self { + let ghostdag_store = db.ghost_dag_store.clone(); + let header_store = db.header_store.clone(); + let relations_store = db.relations_store.clone(); + let mut reachability_store = db.reachability_store; + inquirer::init(&mut reachability_store).unwrap(); + let reachability_service = + MTReachabilityService::new(Arc::new(RwLock::new(reachability_store.clone()))); + let ghostdag_manager = DbGhostdagManager::new( + genesis_hash, + k, + ghostdag_store.clone(), + relations_store.clone(), + header_store.clone(), + reachability_service, + ); + + let mut dag = Self { + genesis_hash, + ghostdag_manager, + relations_store, + reachability_store, + ghostdag_store, + header_store, + missing_blocks: HashMap::new(), + }; + dag + } + + pub fn calculate_dag_accumulator_key(snapshot: &SyncFlexiDagSnapshotHasher) -> anyhow::Result { + Ok(Hash::sha3_256_of(&snapshot.encode().expect( + "encoding the sorted relatship set must be successful", + ))) + } + + pub fn try_init_with_storage( + storage: Arc, + config: Arc, + ) -> anyhow::Result<(Option, Option)> { + let startup_info = storage + .get_startup_info()? + .expect("startup info must exist"); + if let Some(key) = startup_info.get_dag_main() { + let accumulator_info = storage + .get_dag_accumulator_info()? + .expect("dag accumulator info should exist"); + assert!( + accumulator_info.get_num_leaves() > 0, + "the number of dag accumulator leaf must be greater than 0" + ); + let dag_accumulator = MerkleAccumulator::new_with_info( + accumulator_info, + storage.get_accumulator_store(AccumulatorStoreType::SyncDag), + ); + let dag_genesis_hash = dag_accumulator + .get_leaf(0)? + .expect("the genesis in dag accumulator must none be none"); + + let dag_genesis_header = storage + .get_block_header_by_hash(dag_genesis_hash)? + .expect("the genesis block in dag accumulator must none be none"); + + Ok(( + Some(Self::new_by_config( + DagHeader::new_genesis(dag_genesis_header), + config.data_dir().join("flexidag").as_path(), + )?), + Some(dag_accumulator), + )) + } else { + let block_header = storage + .get_block_header_by_hash(startup_info.get_main().clone())? + .expect("the genesis block in dag accumulator must none be none"); + let fork_height = storage.dag_fork_height(config.net().id().clone()); + if block_header.number() < fork_height { + Ok((None, None)) + } else if block_header.number() == fork_height { + let dag_accumulator = MerkleAccumulator::new_with_info( + AccumulatorInfo::default(), + storage.get_accumulator_store(AccumulatorStoreType::SyncDag), + ); + + + let k_total_difficulties = BinaryHeap::new(); + k_total_difficulties.push(KTotalDifficulty { + head_block_id: block_header.id(), + total_difficulty: storage + .get_block_info(block_header.id())? + .expect("block info must exist") + .get_total_difficulty(), + }); + let snapshot_hasher = SyncFlexiDagSnapshotHasher { + child_hashes: vec![block_header.id()], + head_block_id: block_header.id(), + k_total_difficulties, + }; + let key = Self::calculate_dag_accumulator_key(&snapshot_hasher)?; + dag_accumulator.append(&[key])?; + storage.get_accumulator_snapshot_storage().put( + key, + snapshot_hasher.to_snapshot(dag_accumulator.get_info()), + )?; + dag_accumulator.flush()?; + Ok(( + Some(Self::new_by_config( + DagHeader::new_genesis(block_header), + config.data_dir().join("flexidag").as_path(), + )?), + Some(dag_accumulator), + )) + } else { + bail!("failed to init dag") + } + } + } + + pub fn new_by_config(header: DagHeader, db_path: &Path) -> anyhow::Result { + let config = FlexiDagStorageConfig::create_with_params(1, 0, 1024); + let db = FlexiDagStorage::create_from_path(db_path, config)?; + let dag = Self::new(header.hash(), 16, db); + Ok(dag) + } + + pub fn clear_missing_block(&mut self) { + self.missing_blocks.clear(); + } + + pub fn init_with_genesis(&mut self, genesis: DagHeader) -> anyhow::Result<()> { + if self.relations_store.has(Hash::new(ORIGIN))? { + return Err(anyhow!("Already init with genesis")); + }; + self.relations_store + .insert(Hash::new(ORIGIN), BlockHashes::new(vec![])) + .unwrap(); + let _ = self.add_to_dag(genesis); + Ok(()) + } + + pub fn add_to_dag(&mut self, header: DagHeader) -> anyhow::Result { + //TODO:check genesis + // Generate ghostdag data + let parents_hash = header.parents_hash(); + let ghostdag_data = if header.hash() != self.genesis_hash { + self.ghostdag_manager.ghostdag(parents_hash) + } else { + self.ghostdag_manager.genesis_ghostdag_data() + }; + // Store ghostdata + self.ghostdag_store + .insert(header.hash(), Arc::new(ghostdag_data.clone())) + .unwrap(); + + // Update reachability store + let mut reachability_store = self.reachability_store.clone(); + let mut merge_set = ghostdag_data + .unordered_mergeset_without_selected_parent() + .filter(|hash| self.reachability_store.has(*hash).unwrap()); + + inquirer::add_block( + &mut reachability_store, + header.hash(), + ghostdag_data.selected_parent, + &mut merge_set, + )?; + + // store relations + self.relations_store + .insert(header.hash(), BlockHashes::new(parents_hash.to_vec()))?; + // Store header store + let _ = self + .header_store + .insert(header.hash(), Arc::new(header.to_owned()), 0)?; + return Ok(ghostdag_data.clone()); + } + + fn is_in_dag(&self, _hash: Hash) -> anyhow::Result { + return Ok(true); + } + pub fn verify_header(&self, _header: &DagHeader) -> anyhow::Result<()> { + //TODO: implemented it + Ok(()) + } + + pub fn connect_block(&mut self, header: DagHeader) -> anyhow::Result<()> { + let _ = self.verify_header(&header)?; + let is_orphan_block = self.update_orphans(&header)?; + if is_orphan_block { + return Ok(()); + } + self.add_to_dag(header.clone()); + self.check_missing_block(header)?; + Ok(()) + } + + pub fn check_missing_block(&mut self, header: DagHeader) -> anyhow::Result<()> { + if let Some(orphans) = self.missing_blocks.remove(&header.hash()) { + for orphan in orphans.iter() { + let is_orphan = self.is_orphan(&orphan)?; + if !is_orphan { + self.add_to_dag(header.clone()); + } + } + } + Ok(()) + } + fn is_orphan(&self, header: &DagHeader) -> anyhow::Result { + for parent in header.parents_hash() { + if !self.is_in_dag(parent.to_owned())? { + return Ok(false); + } + } + return Ok(true); + } + pub fn get_ghostdag_data(&self, hash: Hash) -> anyhow::Result> { + let ghostdata = self.ghostdag_store.get_data(hash)?; + return Ok(ghostdata); + } + + fn update_orphans(&mut self, block_header: &DagHeader) -> anyhow::Result { + let mut is_orphan = false; + for parent in block_header.parents_hash() { + if self.is_in_dag(parent.to_owned())? { + continue; + } + if !self + .missing_blocks + .entry(parent.to_owned()) + .or_insert_with(HashSet::new) + .insert(block_header.to_owned()) + { + return Err(anyhow::anyhow!("Block already processed as a orphan")); + } + is_orphan = true; + } + Ok(is_orphan) + } + + pub fn get_block_header(&self, hash: Hash) -> anyhow::Result { + match self.header_store.get_header(hash) { + anyhow::Result::Ok(header) => anyhow::Result::Ok(header), + Err(error) => { + println!("failed to get header by hash: {}", error.to_string()); + bail!("failed to get header by hash: {}", error.to_string()); + } + } + } + + pub fn get_parents(&self, hash: Hash) -> anyhow::Result> { + match self.relations_store.get_parents(hash) { + anyhow::Result::Ok(parents) => anyhow::Result::Ok((*parents).clone()), + Err(error) => { + println!("failed to get parents by hash: {}", error.to_string()); + bail!("failed to get parents by hash: {}", error.to_string()); + } + } + } + + pub fn get_children(&self, hash: Hash) -> anyhow::Result> { + match self.relations_store.get_children(hash) { + anyhow::Result::Ok(children) => anyhow::Result::Ok((*children).clone()), + Err(error) => { + println!("failed to get parents by hash: {}", error.to_string()); + bail!("failed to get parents by hash: {}", error.to_string()); + } + } + } + + // for testing + pub fn push_parent_children( + &mut self, + child: Hash, + parents: Arc>, + ) -> Result<(), StoreError> { + self.relations_store.insert(child, parents) + } + + pub fn get_genesis_hash(&self) -> Hash { + self.genesis_hash + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::consensusdb::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; + use starcoin_types::block::BlockHeader; + use std::{env, fs}; + + #[test] + fn base_test() { + let genesis = DagHeader::new_genesis(BlockHeader::random()); + let genesis_hash = genesis.hash(); + let k = 16; + let db_path = env::temp_dir().join("smolstc"); + println!("db path:{}", db_path.to_string_lossy()); + if db_path + .as_path() + .try_exists() + .unwrap_or_else(|_| panic!("Failed to check {db_path:?}")) + { + fs::remove_dir_all(db_path.as_path()).expect("Failed to delete temporary directory"); + } + let config = FlexiDagStorageConfig::create_with_params(1, 0, 1024); + let db = FlexiDagStorage::create_from_path(db_path, config) + .expect("Failed to create flexidag storage"); + let mut dag = BlockDAG::new(genesis_hash, k, db); + dag.init_with_genesis(genesis); + let block = DagHeader::new(BlockHeader::random()); + dag.add_to_dag(block); + } +} diff --git a/consensus/src/dag/ghostdag/mergeset.rs b/consensus/src/dag/ghostdag/mergeset.rs new file mode 100644 index 0000000000..79aefe2db7 --- /dev/null +++ b/consensus/src/dag/ghostdag/mergeset.rs @@ -0,0 +1,71 @@ +use super::protocol::GhostdagManager; +use crate::consensusdb::schemadb::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; +use crate::dag::reachability::reachability_service::ReachabilityService; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::BlockHashSet; +use std::collections::VecDeque; + +impl< + T: GhostdagStoreReader, + S: RelationsStoreReader, + U: ReachabilityService, + V: HeaderStoreReader, + > GhostdagManager +{ + pub fn ordered_mergeset_without_selected_parent( + &self, + selected_parent: Hash, + parents: &[Hash], + ) -> Vec { + self.sort_blocks(self.unordered_mergeset_without_selected_parent(selected_parent, parents)) + } + + pub fn unordered_mergeset_without_selected_parent( + &self, + selected_parent: Hash, + parents: &[Hash], + ) -> BlockHashSet { + let mut queue: VecDeque<_> = parents + .iter() + .copied() + .filter(|p| p != &selected_parent) + .collect(); + let mut mergeset: BlockHashSet = queue.iter().copied().collect(); + let mut selected_parent_past = BlockHashSet::new(); + + while let Some(current) = queue.pop_front() { + let current_parents = self + .relations_store + .get_parents(current) + .unwrap_or_else(|err| { + println!("WUT"); + panic!("{err:?}"); + }); + + // For each parent of the current block we check whether it is in the past of the selected parent. If not, + // we add it to the resulting merge-set and queue it for further processing. + for parent in current_parents.iter() { + if mergeset.contains(parent) { + continue; + } + + if selected_parent_past.contains(parent) { + continue; + } + + if self + .reachability_service + .is_dag_ancestor_of(*parent, selected_parent) + { + selected_parent_past.insert(*parent); + continue; + } + + mergeset.insert(*parent); + queue.push_back(*parent); + } + } + + mergeset + } +} diff --git a/consensus/src/dag/ghostdag/mod.rs b/consensus/src/dag/ghostdag/mod.rs new file mode 100644 index 0000000000..51a2c8fc82 --- /dev/null +++ b/consensus/src/dag/ghostdag/mod.rs @@ -0,0 +1,4 @@ +pub mod mergeset; +pub mod protocol; + +mod util; diff --git a/consensus/src/dag/ghostdag/protocol.rs b/consensus/src/dag/ghostdag/protocol.rs new file mode 100644 index 0000000000..9afc86d3bd --- /dev/null +++ b/consensus/src/dag/ghostdag/protocol.rs @@ -0,0 +1,338 @@ +use super::util::Refs; +use crate::consensusdb::schemadb::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; +use crate::dag::reachability::reachability_service::ReachabilityService; +use crate::dag::types::{ghostdata::GhostdagData, ordering::*}; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::{ + self, BlockHashExtensions, BlockHashMap, BlockHashes, BlueWorkType, HashKTypeMap, KType, +}; +use std::sync::Arc; +// For GhostdagStoreReader-related functions, use GhostDagDataWrapper instead. +// ascending_mergeset_without_selected_parent +// descending_mergeset_without_selected_parent +// consensus_ordered_mergeset +// consensus_ordered_mergeset_without_selected_parent +//use dag_database::consensus::GhostDagDataWrapper; + +#[derive(Clone)] +pub struct GhostdagManager< + T: GhostdagStoreReader, + S: RelationsStoreReader, + U: ReachabilityService, + V: HeaderStoreReader, +> { + genesis_hash: Hash, + pub(super) k: KType, + pub(super) ghostdag_store: T, + pub(super) relations_store: S, + pub(super) headers_store: V, + pub(super) reachability_service: U, +} + +impl< + T: GhostdagStoreReader, + S: RelationsStoreReader, + U: ReachabilityService, + V: HeaderStoreReader, + > GhostdagManager +{ + pub fn new( + genesis_hash: Hash, + k: KType, + ghostdag_store: T, + relations_store: S, + headers_store: V, + reachability_service: U, + ) -> Self { + Self { + genesis_hash, + k, + ghostdag_store, + relations_store, + reachability_service, + headers_store, + } + } + + pub fn genesis_ghostdag_data(&self) -> GhostdagData { + GhostdagData::new( + 0, + Default::default(), // TODO: take blue score and work from actual genesis + Hash::new(blockhash::ORIGIN), + BlockHashes::new(Vec::new()), + BlockHashes::new(Vec::new()), + HashKTypeMap::new(BlockHashMap::new()), + ) + } + + pub fn origin_ghostdag_data(&self) -> Arc { + Arc::new(GhostdagData::new( + 0, + Default::default(), + 0.into(), + BlockHashes::new(Vec::new()), + BlockHashes::new(Vec::new()), + HashKTypeMap::new(BlockHashMap::new()), + )) + } + + pub fn find_selected_parent(&self, parents: impl IntoIterator) -> Hash { + parents + .into_iter() + .map(|parent| SortableBlock { + hash: parent, + blue_work: self.ghostdag_store.get_blue_work(parent).unwrap(), + }) + .max() + .unwrap() + .hash + } + + /// Runs the GHOSTDAG protocol and calculates the block GhostdagData by the given parents. + /// The function calculates mergeset blues by iterating over the blocks in + /// the anticone of the new block selected parent (which is the parent with the + /// highest blue work) and adds any block to the blue set if by adding + /// it these conditions will not be violated: + /// + /// 1) |anticone-of-candidate-block ∩ blue-set-of-new-block| ≤ K + /// + /// 2) For every blue block in blue-set-of-new-block: + /// |(anticone-of-blue-block ∩ blue-set-new-block) ∪ {candidate-block}| ≤ K. + /// We validate this condition by maintaining a map blues_anticone_sizes for + /// each block which holds all the blue anticone sizes that were affected by + /// the new added blue blocks. + /// So to find out what is |anticone-of-blue ∩ blue-set-of-new-block| we just iterate in + /// the selected parent chain of the new block until we find an existing entry in + /// blues_anticone_sizes. + /// + /// For further details see the article https://eprint.iacr.org/2018/104.pdf + pub fn ghostdag(&self, parents: &[Hash]) -> GhostdagData { + assert!( + !parents.is_empty(), + "genesis must be added via a call to init" + ); + + // Run the GHOSTDAG parent selection algorithm + let selected_parent = self.find_selected_parent(&mut parents.iter().copied()); + // Initialize new GHOSTDAG block data with the selected parent + let mut new_block_data = GhostdagData::new_with_selected_parent(selected_parent, self.k); + // Get the mergeset in consensus-agreed topological order (topological here means forward in time from blocks to children) + let ordered_mergeset = + self.ordered_mergeset_without_selected_parent(selected_parent, parents); + + for blue_candidate in ordered_mergeset.iter().cloned() { + let coloring = self.check_blue_candidate(&new_block_data, blue_candidate); + + if let ColoringOutput::Blue(blue_anticone_size, blues_anticone_sizes) = coloring { + // No k-cluster violation found, we can now set the candidate block as blue + new_block_data.add_blue(blue_candidate, blue_anticone_size, &blues_anticone_sizes); + } else { + new_block_data.add_red(blue_candidate); + } + } + + let blue_score = self + .ghostdag_store + .get_blue_score(selected_parent) + .unwrap() + .checked_add(new_block_data.mergeset_blues.len() as u64) + .unwrap(); + + let added_blue_work: BlueWorkType = new_block_data + .mergeset_blues + .iter() + .cloned() + .map(|hash| { + if hash.is_origin() { + 0u128 + } else { + //TODO: implement caculate pow work + let _difficulty = self.headers_store.get_difficulty(hash).unwrap(); + 1024u128 + } + }) + .sum(); + + let blue_work = self + .ghostdag_store + .get_blue_work(selected_parent) + .unwrap() + .checked_add(added_blue_work) + .unwrap(); + new_block_data.finalize_score_and_work(blue_score, blue_work); + + new_block_data + } + + fn check_blue_candidate_with_chain_block( + &self, + new_block_data: &GhostdagData, + chain_block: &ChainBlock, + blue_candidate: Hash, + candidate_blues_anticone_sizes: &mut BlockHashMap, + candidate_blue_anticone_size: &mut KType, + ) -> ColoringState { + // If blue_candidate is in the future of chain_block, it means + // that all remaining blues are in the past of chain_block and thus + // in the past of blue_candidate. In this case we know for sure that + // the anticone of blue_candidate will not exceed K, and we can mark + // it as blue. + // + // The new block is always in the future of blue_candidate, so there's + // no point in checking it. + + // We check if chain_block is not the new block by checking if it has a hash. + if let Some(hash) = chain_block.hash { + if self + .reachability_service + .is_dag_ancestor_of(hash, blue_candidate) + { + return ColoringState::Blue; + } + } + + for &block in chain_block.data.mergeset_blues.iter() { + // Skip blocks that exist in the past of blue_candidate. + if self + .reachability_service + .is_dag_ancestor_of(block, blue_candidate) + { + continue; + } + + candidate_blues_anticone_sizes + .insert(block, self.blue_anticone_size(block, new_block_data)); + + *candidate_blue_anticone_size = (*candidate_blue_anticone_size).checked_add(1).unwrap(); + if *candidate_blue_anticone_size > self.k { + // k-cluster violation: The candidate's blue anticone exceeded k + return ColoringState::Red; + } + + if *candidate_blues_anticone_sizes.get(&block).unwrap() == self.k { + // k-cluster violation: A block in candidate's blue anticone already + // has k blue blocks in its own anticone + return ColoringState::Red; + } + + // This is a sanity check that validates that a blue + // block's blue anticone is not already larger than K. + assert!( + *candidate_blues_anticone_sizes.get(&block).unwrap() <= self.k, + "found blue anticone larger than K" + ); + } + + ColoringState::Pending + } + + /// Returns the blue anticone size of `block` from the worldview of `context`. + /// Expects `block` to be in the blue set of `context` + fn blue_anticone_size(&self, block: Hash, context: &GhostdagData) -> KType { + let mut current_blues_anticone_sizes = HashKTypeMap::clone(&context.blues_anticone_sizes); + let mut current_selected_parent = context.selected_parent; + loop { + if let Some(size) = current_blues_anticone_sizes.get(&block) { + return *size; + } + + if current_selected_parent == self.genesis_hash + || current_selected_parent == Hash::new(blockhash::ORIGIN) + { + panic!("block {block} is not in blue set of the given context"); + } + + current_blues_anticone_sizes = self + .ghostdag_store + .get_blues_anticone_sizes(current_selected_parent) + .unwrap(); + current_selected_parent = self + .ghostdag_store + .get_selected_parent(current_selected_parent) + .unwrap(); + } + } + + pub fn check_blue_candidate( + &self, + new_block_data: &GhostdagData, + blue_candidate: Hash, + ) -> ColoringOutput { + // The maximum length of new_block_data.mergeset_blues can be K+1 because + // it contains the selected parent. + if new_block_data.mergeset_blues.len() as KType == self.k.checked_add(1).unwrap() { + return ColoringOutput::Red; + } + + let mut candidate_blues_anticone_sizes: BlockHashMap = + BlockHashMap::with_capacity(self.k as usize); + // Iterate over all blocks in the blue past of the new block that are not in the past + // of blue_candidate, and check for each one of them if blue_candidate potentially + // enlarges their blue anticone to be over K, or that they enlarge the blue anticone + // of blue_candidate to be over K. + let mut chain_block = ChainBlock { + hash: None, + data: new_block_data.into(), + }; + let mut candidate_blue_anticone_size: KType = 0; + + loop { + let state = self.check_blue_candidate_with_chain_block( + new_block_data, + &chain_block, + blue_candidate, + &mut candidate_blues_anticone_sizes, + &mut candidate_blue_anticone_size, + ); + + match state { + ColoringState::Blue => { + return ColoringOutput::Blue( + candidate_blue_anticone_size, + candidate_blues_anticone_sizes, + ) + } + ColoringState::Red => return ColoringOutput::Red, + ColoringState::Pending => (), // continue looping + } + + chain_block = ChainBlock { + hash: Some(chain_block.data.selected_parent), + data: self + .ghostdag_store + .get_data(chain_block.data.selected_parent) + .unwrap() + .into(), + } + } + } + + pub fn sort_blocks(&self, blocks: impl IntoIterator) -> Vec { + let mut sorted_blocks: Vec = blocks.into_iter().collect(); + sorted_blocks.sort_by_cached_key(|block| SortableBlock { + hash: *block, + blue_work: self.ghostdag_store.get_blue_work(*block).unwrap(), + }); + sorted_blocks + } +} + +/// Chain block with attached ghostdag data +struct ChainBlock<'a> { + hash: Option, // if set to `None`, signals being the new block + data: Refs<'a, GhostdagData>, +} + +/// Represents the intermediate GHOSTDAG coloring state for the current candidate +enum ColoringState { + Blue, + Red, + Pending, +} + +#[derive(Debug)] +/// Represents the final output of GHOSTDAG coloring for the current candidate +pub enum ColoringOutput { + Blue(KType, BlockHashMap), // (blue anticone size, map of blue anticone sizes for each affected blue) + Red, +} diff --git a/consensus/src/dag/ghostdag/util.rs b/consensus/src/dag/ghostdag/util.rs new file mode 100644 index 0000000000..68eb4b9b31 --- /dev/null +++ b/consensus/src/dag/ghostdag/util.rs @@ -0,0 +1,57 @@ +use std::{ops::Deref, rc::Rc, sync::Arc}; +/// Enum used to represent a concrete varying pointer type which only needs to be accessed by ref. +/// We avoid adding a `Val(T)` variant in order to keep the size of the enum minimal +pub enum Refs<'a, T> { + Ref(&'a T), + Arc(Arc), + Rc(Rc), + Box(Box), +} + +impl AsRef for Refs<'_, T> { + fn as_ref(&self) -> &T { + match self { + Refs::Ref(r) => r, + Refs::Arc(a) => a, + Refs::Rc(r) => r, + Refs::Box(b) => b, + } + } +} + +impl Deref for Refs<'_, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + match self { + Refs::Ref(r) => r, + Refs::Arc(a) => a, + Refs::Rc(r) => r, + Refs::Box(b) => b, + } + } +} + +impl<'a, T> From<&'a T> for Refs<'a, T> { + fn from(r: &'a T) -> Self { + Self::Ref(r) + } +} + +impl From> for Refs<'_, T> { + fn from(a: Arc) -> Self { + Self::Arc(a) + } +} + +impl From> for Refs<'_, T> { + fn from(r: Rc) -> Self { + Self::Rc(r) + } +} + +impl From> for Refs<'_, T> { + fn from(b: Box) -> Self { + Self::Box(b) + } +} diff --git a/consensus/src/dag/mod.rs b/consensus/src/dag/mod.rs new file mode 100644 index 0000000000..9485bd456a --- /dev/null +++ b/consensus/src/dag/mod.rs @@ -0,0 +1,4 @@ +pub mod blockdag; +pub mod ghostdag; +mod reachability; +pub mod types; diff --git a/consensus/src/dag/reachability/extensions.rs b/consensus/src/dag/reachability/extensions.rs new file mode 100644 index 0000000000..9ea769fb9a --- /dev/null +++ b/consensus/src/dag/reachability/extensions.rs @@ -0,0 +1,50 @@ +use crate::consensusdb::{prelude::StoreResult, schemadb::ReachabilityStoreReader}; +use crate::dag::types::interval::Interval; +use starcoin_crypto::hash::HashValue as Hash; + +pub(super) trait ReachabilityStoreIntervalExtensions { + fn interval_children_capacity(&self, block: Hash) -> StoreResult; + fn interval_remaining_before(&self, block: Hash) -> StoreResult; + fn interval_remaining_after(&self, block: Hash) -> StoreResult; +} + +impl ReachabilityStoreIntervalExtensions for T { + /// Returns the reachability allocation capacity for children of `block` + fn interval_children_capacity(&self, block: Hash) -> StoreResult { + // The interval of a block should *strictly* contain the intervals of its + // tree children, hence we subtract 1 from the end of the range. + Ok(self.get_interval(block)?.decrease_end(1)) + } + + /// Returns the available interval to allocate for tree children, taken from the + /// beginning of children allocation capacity + fn interval_remaining_before(&self, block: Hash) -> StoreResult { + let alloc_capacity = self.interval_children_capacity(block)?; + match self.get_children(block)?.first() { + Some(first_child) => { + let first_alloc = self.get_interval(*first_child)?; + Ok(Interval::new( + alloc_capacity.start, + first_alloc.start.checked_sub(1).unwrap(), + )) + } + None => Ok(alloc_capacity), + } + } + + /// Returns the available interval to allocate for tree children, taken from the + /// end of children allocation capacity + fn interval_remaining_after(&self, block: Hash) -> StoreResult { + let alloc_capacity = self.interval_children_capacity(block)?; + match self.get_children(block)?.last() { + Some(last_child) => { + let last_alloc = self.get_interval(*last_child)?; + Ok(Interval::new( + last_alloc.end.checked_add(1).unwrap(), + alloc_capacity.end, + )) + } + None => Ok(alloc_capacity), + } + } +} diff --git a/consensus/src/dag/reachability/inquirer.rs b/consensus/src/dag/reachability/inquirer.rs new file mode 100644 index 0000000000..022a71074b --- /dev/null +++ b/consensus/src/dag/reachability/inquirer.rs @@ -0,0 +1,345 @@ +use super::{tree::*, *}; +use crate::consensusdb::schemadb::{ReachabilityStore, ReachabilityStoreReader}; +use crate::dag::types::{interval::Interval, perf}; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash; + +/// Init the reachability store to match the state required by the algorithmic layer. +/// The function first checks the store for possibly being initialized already. +pub fn init(store: &mut (impl ReachabilityStore + ?Sized)) -> Result<()> { + init_with_params(store, Hash::new(blockhash::ORIGIN), Interval::maximal()) +} + +pub(super) fn init_with_params( + store: &mut (impl ReachabilityStore + ?Sized), + origin: Hash, + capacity: Interval, +) -> Result<()> { + if store.has(origin)? { + return Ok(()); + } + store.init(origin, capacity)?; + Ok(()) +} + +type HashIterator<'a> = &'a mut dyn Iterator; + +/// Add a block to the DAG reachability data structures and persist using the provided `store`. +pub fn add_block( + store: &mut (impl ReachabilityStore + ?Sized), + new_block: Hash, + selected_parent: Hash, + mergeset_iterator: HashIterator, +) -> Result<()> { + add_block_with_params( + store, + new_block, + selected_parent, + mergeset_iterator, + None, + None, + ) +} + +fn add_block_with_params( + store: &mut (impl ReachabilityStore + ?Sized), + new_block: Hash, + selected_parent: Hash, + mergeset_iterator: HashIterator, + reindex_depth: Option, + reindex_slack: Option, +) -> Result<()> { + add_tree_block( + store, + new_block, + selected_parent, + reindex_depth.unwrap_or(perf::DEFAULT_REINDEX_DEPTH), + reindex_slack.unwrap_or(perf::DEFAULT_REINDEX_SLACK), + )?; + add_dag_block(store, new_block, mergeset_iterator)?; + Ok(()) +} + +fn add_dag_block( + store: &mut (impl ReachabilityStore + ?Sized), + new_block: Hash, + mergeset_iterator: HashIterator, +) -> Result<()> { + // Update the future covering set for blocks in the mergeset + for merged_block in mergeset_iterator { + insert_to_future_covering_set(store, merged_block, new_block)?; + } + Ok(()) +} + +fn insert_to_future_covering_set( + store: &mut (impl ReachabilityStore + ?Sized), + merged_block: Hash, + new_block: Hash, +) -> Result<()> { + match binary_search_descendant( + store, + store.get_future_covering_set(merged_block)?.as_slice(), + new_block, + )? { + // We expect the query to not succeed, and to only return the correct insertion index. + // The existences of a `future covering item` (`FCI`) which is a chain ancestor of `new_block` + // contradicts `merged_block ∈ mergeset(new_block)`. Similarly, the existence of an FCI + // which `new_block` is a chain ancestor of, contradicts processing order. + SearchOutput::Found(_, _) => Err(ReachabilityError::DataInconsistency), + SearchOutput::NotFound(i) => { + store.insert_future_covering_item(merged_block, new_block, i)?; + Ok(()) + } + } +} + +/// Hint to the reachability algorithm that `hint` is a candidate to become +/// the `virtual selected parent` (`VSP`). This might affect internal reachability heuristics such +/// as moving the reindex point. The consensus runtime is expected to call this function +/// for a new header selected tip which is `header only` / `pending UTXO verification`, or for a completely resolved `VSP`. +pub fn hint_virtual_selected_parent( + store: &mut (impl ReachabilityStore + ?Sized), + hint: Hash, +) -> Result<()> { + try_advancing_reindex_root( + store, + hint, + perf::DEFAULT_REINDEX_DEPTH, + perf::DEFAULT_REINDEX_SLACK, + ) +} + +/// Checks if the `this` block is a strict chain ancestor of the `queried` block (aka `this ∈ chain(queried)`). +/// Note that this results in `false` if `this == queried` +pub fn is_strict_chain_ancestor_of( + store: &(impl ReachabilityStoreReader + ?Sized), + this: Hash, + queried: Hash, +) -> Result { + Ok(store + .get_interval(this)? + .strictly_contains(store.get_interval(queried)?)) +} + +/// Checks if `this` block is a chain ancestor of `queried` block (aka `this ∈ chain(queried) ∪ {queried}`). +/// Note that we use the graph theory convention here which defines that a block is also an ancestor of itself. +pub fn is_chain_ancestor_of( + store: &(impl ReachabilityStoreReader + ?Sized), + this: Hash, + queried: Hash, +) -> Result { + Ok(store + .get_interval(this)? + .contains(store.get_interval(queried)?)) +} + +/// Returns true if `this` is a DAG ancestor of `queried` (aka `queried ∈ future(this) ∪ {this}`). +/// Note: this method will return true if `this == queried`. +/// The complexity of this method is O(log(|future_covering_set(this)|)) +pub fn is_dag_ancestor_of( + store: &(impl ReachabilityStoreReader + ?Sized), + this: Hash, + queried: Hash, +) -> Result { + // First, check if `this` is a chain ancestor of queried + if is_chain_ancestor_of(store, this, queried)? { + return Ok(true); + } + // Otherwise, use previously registered future blocks to complete the + // DAG reachability test + match binary_search_descendant( + store, + store.get_future_covering_set(this)?.as_slice(), + queried, + )? { + SearchOutput::Found(_, _) => Ok(true), + SearchOutput::NotFound(_) => Ok(false), + } +} + +/// Finds the child of `ancestor` which is also a chain ancestor of `descendant`. +pub fn get_next_chain_ancestor( + store: &(impl ReachabilityStoreReader + ?Sized), + descendant: Hash, + ancestor: Hash, +) -> Result { + if descendant == ancestor { + // The next ancestor does not exist + return Err(ReachabilityError::BadQuery); + } + if !is_strict_chain_ancestor_of(store, ancestor, descendant)? { + // `ancestor` isn't actually a chain ancestor of `descendant`, so by def + // we cannot find the next ancestor as well + return Err(ReachabilityError::BadQuery); + } + + get_next_chain_ancestor_unchecked(store, descendant, ancestor) +} + +/// Note: it is important to keep the unchecked version for internal module use, +/// since in some scenarios during reindexing `descendant` might have a modified +/// interval which was not propagated yet. +pub(super) fn get_next_chain_ancestor_unchecked( + store: &(impl ReachabilityStoreReader + ?Sized), + descendant: Hash, + ancestor: Hash, +) -> Result { + match binary_search_descendant(store, store.get_children(ancestor)?.as_slice(), descendant)? { + SearchOutput::Found(hash, _) => Ok(hash), + SearchOutput::NotFound(_) => Err(ReachabilityError::BadQuery), + } +} + +enum SearchOutput { + NotFound(usize), // `usize` is the position to insert at + Found(Hash, usize), +} + +fn binary_search_descendant( + store: &(impl ReachabilityStoreReader + ?Sized), + ordered_hashes: &[Hash], + descendant: Hash, +) -> Result { + if cfg!(debug_assertions) { + // This is a linearly expensive assertion, keep it debug only + assert_hashes_ordered(store, ordered_hashes); + } + + // `Interval::end` represents the unique number allocated to this block + let point = store.get_interval(descendant)?.end; + + // We use an `unwrap` here since otherwise we need to implement `binary_search` + // ourselves, which is not worth the effort given that this would be an unrecoverable + // error anyhow + match ordered_hashes.binary_search_by_key(&point, |c| store.get_interval(*c).unwrap().start) { + Ok(i) => Ok(SearchOutput::Found(ordered_hashes[i], i)), + Err(i) => { + // `i` is where `point` was expected (i.e., point < ordered_hashes[i].interval.start), + // so we expect `ordered_hashes[i - 1].interval` to be the only candidate to contain `point` + if i > 0 + && is_chain_ancestor_of( + store, + ordered_hashes[i.checked_sub(1).unwrap()], + descendant, + )? + { + Ok(SearchOutput::Found( + ordered_hashes[i.checked_sub(1).unwrap()], + i.checked_sub(1).unwrap(), + )) + } else { + Ok(SearchOutput::NotFound(i)) + } + } + } +} + +fn assert_hashes_ordered(store: &(impl ReachabilityStoreReader + ?Sized), ordered_hashes: &[Hash]) { + let intervals: Vec = ordered_hashes + .iter() + .cloned() + .map(|c| store.get_interval(c).unwrap()) + .collect(); + debug_assert!(intervals + .as_slice() + .windows(2) + .all(|w| w[0].end < w[1].start)) +} + +#[cfg(test)] +mod tests { + use super::{super::tests::*, *}; + use crate::consensusdb::schemadb::MemoryReachabilityStore; + use starcoin_types::blockhash::ORIGIN; + + #[test] + fn test_add_tree_blocks() { + // Arrange + let mut store = MemoryReachabilityStore::new(); + // Act + let root: Hash = 1.into(); + TreeBuilder::new(&mut store) + .init_with_params(root, Interval::new(1, 15)) + .add_block(2.into(), root) + .add_block(3.into(), 2.into()) + .add_block(4.into(), 2.into()) + .add_block(5.into(), 3.into()) + .add_block(6.into(), 5.into()) + .add_block(7.into(), 1.into()) + .add_block(8.into(), 6.into()) + .add_block(9.into(), 6.into()) + .add_block(10.into(), 6.into()) + .add_block(11.into(), 6.into()); + // Assert + store.validate_intervals(root).unwrap(); + } + + #[test] + fn test_add_early_blocks() { + // Arrange + let mut store = MemoryReachabilityStore::new(); + + // Act + let root: Hash = Hash::from_u64(1); + let mut builder = TreeBuilder::new_with_params(&mut store, 2, 5); + builder.init_with_params(root, Interval::maximal()); + for i in 2u64..100 { + builder.add_block(Hash::from_u64(i), Hash::from_u64(i / 2)); + } + + // Should trigger an earlier than reindex root allocation + builder.add_block(Hash::from_u64(100), Hash::from_u64(2)); + store.validate_intervals(root).unwrap(); + } + + #[test] + fn test_add_dag_blocks() { + // Arrange + let mut store = MemoryReachabilityStore::new(); + let origin_hash = Hash::new(ORIGIN); + // Act + DagBuilder::new(&mut store) + .init() + .add_block(DagBlock::new(1.into(), vec![origin_hash])) + .add_block(DagBlock::new(2.into(), vec![1.into()])) + .add_block(DagBlock::new(3.into(), vec![1.into()])) + .add_block(DagBlock::new(4.into(), vec![2.into(), 3.into()])) + .add_block(DagBlock::new(5.into(), vec![4.into()])) + .add_block(DagBlock::new(6.into(), vec![1.into()])) + .add_block(DagBlock::new(7.into(), vec![5.into(), 6.into()])) + .add_block(DagBlock::new(8.into(), vec![1.into()])) + .add_block(DagBlock::new(9.into(), vec![1.into()])) + .add_block(DagBlock::new(10.into(), vec![7.into(), 8.into(), 9.into()])) + .add_block(DagBlock::new(11.into(), vec![1.into()])) + .add_block(DagBlock::new(12.into(), vec![11.into(), 10.into()])); + + // Assert intervals + store.validate_intervals(origin_hash).unwrap(); + + // Assert genesis + for i in 2u64..=12 { + assert!(store.in_past_of(1, i)); + } + + // Assert some futures + assert!(store.in_past_of(2, 4)); + assert!(store.in_past_of(2, 5)); + assert!(store.in_past_of(2, 7)); + assert!(store.in_past_of(5, 10)); + assert!(store.in_past_of(6, 10)); + assert!(store.in_past_of(10, 12)); + assert!(store.in_past_of(11, 12)); + + // Assert some anticones + assert!(store.are_anticone(2, 3)); + assert!(store.are_anticone(2, 6)); + assert!(store.are_anticone(3, 6)); + assert!(store.are_anticone(5, 6)); + assert!(store.are_anticone(3, 8)); + assert!(store.are_anticone(11, 2)); + assert!(store.are_anticone(11, 4)); + assert!(store.are_anticone(11, 6)); + assert!(store.are_anticone(11, 9)); + } +} diff --git a/consensus/src/dag/reachability/mod.rs b/consensus/src/dag/reachability/mod.rs new file mode 100644 index 0000000000..ceb2905b03 --- /dev/null +++ b/consensus/src/dag/reachability/mod.rs @@ -0,0 +1,50 @@ +mod extensions; +pub mod inquirer; +pub mod reachability_service; +mod reindex; +pub mod relations_service; + +#[cfg(test)] +mod tests; +mod tree; + +use crate::consensusdb::prelude::StoreError; +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum ReachabilityError { + #[error("data store error")] + StoreError(#[from] StoreError), + + #[error("data overflow error")] + DataOverflow(String), + + #[error("data inconsistency error")] + DataInconsistency, + + #[error("query is inconsistent")] + BadQuery, +} + +impl ReachabilityError { + pub fn is_key_not_found(&self) -> bool { + matches!(self, ReachabilityError::StoreError(e) if matches!(e, StoreError::KeyNotFound(_))) + } +} + +pub type Result = std::result::Result; + +pub trait ReachabilityResultExtensions { + /// Unwraps the error into `None` if the internal error is `StoreError::KeyNotFound` or panics otherwise + fn unwrap_option(self) -> Option; +} + +impl ReachabilityResultExtensions for Result { + fn unwrap_option(self) -> Option { + match self { + Ok(value) => Some(value), + Err(err) if err.is_key_not_found() => None, + Err(err) => panic!("Unexpected reachability error: {err:?}"), + } + } +} diff --git a/consensus/src/dag/reachability/reachability_service.rs b/consensus/src/dag/reachability/reachability_service.rs new file mode 100644 index 0000000000..6b2fa643a7 --- /dev/null +++ b/consensus/src/dag/reachability/reachability_service.rs @@ -0,0 +1,315 @@ +use super::{inquirer, Result}; +use crate::consensusdb::schemadb::ReachabilityStoreReader; +use parking_lot::RwLock; +use starcoin_crypto::{HashValue as Hash, HashValue}; +use starcoin_types::blockhash; +use std::{ops::Deref, sync::Arc}; + +pub trait ReachabilityService { + fn is_chain_ancestor_of(&self, this: Hash, queried: Hash) -> bool; + fn is_dag_ancestor_of_result(&self, this: Hash, queried: Hash) -> Result; + fn is_dag_ancestor_of(&self, this: Hash, queried: Hash) -> bool; + fn is_dag_ancestor_of_any(&self, this: Hash, queried: &mut impl Iterator) -> bool; + fn is_any_dag_ancestor(&self, list: &mut impl Iterator, queried: Hash) -> bool; + fn is_any_dag_ancestor_result( + &self, + list: &mut impl Iterator, + queried: Hash, + ) -> Result; + fn get_next_chain_ancestor(&self, descendant: Hash, ancestor: Hash) -> Hash; +} + +/// Multi-threaded reachability service imp +#[derive(Clone)] +pub struct MTReachabilityService { + store: Arc>, +} + +impl MTReachabilityService { + pub fn new(store: Arc>) -> Self { + Self { store } + } +} + +impl ReachabilityService for MTReachabilityService { + fn is_chain_ancestor_of(&self, this: Hash, queried: Hash) -> bool { + let read_guard = self.store.read(); + inquirer::is_chain_ancestor_of(read_guard.deref(), this, queried).unwrap() + } + + fn is_dag_ancestor_of_result(&self, this: Hash, queried: Hash) -> Result { + let read_guard = self.store.read(); + inquirer::is_dag_ancestor_of(read_guard.deref(), this, queried) + } + + fn is_dag_ancestor_of(&self, this: Hash, queried: Hash) -> bool { + let read_guard = self.store.read(); + inquirer::is_dag_ancestor_of(read_guard.deref(), this, queried).unwrap() + } + + fn is_any_dag_ancestor(&self, list: &mut impl Iterator, queried: Hash) -> bool { + let read_guard = self.store.read(); + list.any(|hash| inquirer::is_dag_ancestor_of(read_guard.deref(), hash, queried).unwrap()) + } + + fn is_any_dag_ancestor_result( + &self, + list: &mut impl Iterator, + queried: Hash, + ) -> Result { + let read_guard = self.store.read(); + for hash in list { + if inquirer::is_dag_ancestor_of(read_guard.deref(), hash, queried)? { + return Ok(true); + } + } + Ok(false) + } + + fn is_dag_ancestor_of_any(&self, this: Hash, queried: &mut impl Iterator) -> bool { + let read_guard = self.store.read(); + queried.any(|hash| inquirer::is_dag_ancestor_of(read_guard.deref(), this, hash).unwrap()) + } + + fn get_next_chain_ancestor(&self, descendant: Hash, ancestor: Hash) -> Hash { + let read_guard = self.store.read(); + inquirer::get_next_chain_ancestor(read_guard.deref(), descendant, ancestor).unwrap() + } +} + +impl MTReachabilityService { + /// Returns a forward iterator walking up the chain-selection tree from `from_ancestor` + /// to `to_descendant`, where `to_descendant` is included if `inclusive` is set to true. + /// + /// To skip `from_ancestor` simply apply `skip(1)`. + /// + /// The caller is expected to verify that `from_ancestor` is indeed a chain ancestor of + /// `to_descendant`, otherwise the function will panic. + pub fn forward_chain_iterator( + &self, + from_ancestor: Hash, + to_descendant: Hash, + inclusive: bool, + ) -> impl Iterator { + ForwardChainIterator::new(self.store.clone(), from_ancestor, to_descendant, inclusive) + } + + /// Returns a backward iterator walking down the selected chain from `from_descendant` + /// to `to_ancestor`, where `to_ancestor` is included if `inclusive` is set to true. + /// + /// To skip `from_descendant` simply apply `skip(1)`. + /// + /// The caller is expected to verify that `to_ancestor` is indeed a chain ancestor of + /// `from_descendant`, otherwise the function will panic. + pub fn backward_chain_iterator( + &self, + from_descendant: Hash, + to_ancestor: Hash, + inclusive: bool, + ) -> impl Iterator { + BackwardChainIterator::new(self.store.clone(), from_descendant, to_ancestor, inclusive) + } + + /// Returns the default chain iterator, walking from `from` backward down the + /// selected chain until `virtual genesis` (aka `blockhash::ORIGIN`; exclusive) + pub fn default_backward_chain_iterator(&self, from: Hash) -> impl Iterator { + BackwardChainIterator::new( + self.store.clone(), + from, + HashValue::new(blockhash::ORIGIN), + false, + ) + } +} + +/// Iterator design: we currently read-lock at each movement of the iterator. +/// Other options are to keep the read guard throughout the iterator lifetime, or +/// a compromise where the lock is released every constant number of items. +struct BackwardChainIterator { + store: Arc>, + current: Option, + ancestor: Hash, + inclusive: bool, +} + +impl BackwardChainIterator { + fn new( + store: Arc>, + from_descendant: Hash, + to_ancestor: Hash, + inclusive: bool, + ) -> Self { + Self { + store, + current: Some(from_descendant), + ancestor: to_ancestor, + inclusive, + } + } +} + +impl Iterator for BackwardChainIterator { + type Item = Hash; + + fn next(&mut self) -> Option { + if let Some(current) = self.current { + if current == self.ancestor { + if self.inclusive { + self.current = None; + Some(current) + } else { + self.current = None; + None + } + } else { + debug_assert_ne!(current, HashValue::new(blockhash::NONE)); + let next = self.store.read().get_parent(current).unwrap(); + self.current = Some(next); + Some(current) + } + } else { + None + } + } +} + +struct ForwardChainIterator { + store: Arc>, + current: Option, + descendant: Hash, + inclusive: bool, +} + +impl ForwardChainIterator { + fn new( + store: Arc>, + from_ancestor: Hash, + to_descendant: Hash, + inclusive: bool, + ) -> Self { + Self { + store, + current: Some(from_ancestor), + descendant: to_descendant, + inclusive, + } + } +} + +impl Iterator for ForwardChainIterator { + type Item = Hash; + + fn next(&mut self) -> Option { + if let Some(current) = self.current { + if current == self.descendant { + if self.inclusive { + self.current = None; + Some(current) + } else { + self.current = None; + None + } + } else { + let next = inquirer::get_next_chain_ancestor( + self.store.read().deref(), + self.descendant, + current, + ) + .unwrap(); + self.current = Some(next); + Some(current) + } + } else { + None + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::consensusdb::schemadb::MemoryReachabilityStore; + use crate::dag::{reachability::tests::TreeBuilder, types::interval::Interval}; + + #[test] + fn test_forward_iterator() { + // Arrange + let mut store = MemoryReachabilityStore::new(); + + // Act + let root: Hash = 1.into(); + TreeBuilder::new(&mut store) + .init_with_params(root, Interval::new(1, 15)) + .add_block(2.into(), root) + .add_block(3.into(), 2.into()) + .add_block(4.into(), 2.into()) + .add_block(5.into(), 3.into()) + .add_block(6.into(), 5.into()) + .add_block(7.into(), 1.into()) + .add_block(8.into(), 6.into()) + .add_block(9.into(), 6.into()) + .add_block(10.into(), 6.into()) + .add_block(11.into(), 6.into()); + + let service = MTReachabilityService::new(Arc::new(RwLock::new(store))); + + // Exclusive + let iter = service.forward_chain_iterator(2.into(), 10.into(), false); + + // Assert + let expected_hashes = [2u64, 3, 5, 6].map(Hash::from); + assert!(expected_hashes.iter().cloned().eq(iter)); + + // Inclusive + let iter = service.forward_chain_iterator(2.into(), 10.into(), true); + + // Assert + let expected_hashes = [2u64, 3, 5, 6, 10].map(Hash::from); + assert!(expected_hashes.iter().cloned().eq(iter)); + + // Compare backward to reversed forward + let forward_iter = service.forward_chain_iterator(2.into(), 10.into(), true); + let backward_iter: Vec = service + .backward_chain_iterator(10.into(), 2.into(), true) + .collect(); + assert!(forward_iter.eq(backward_iter.iter().cloned().rev())) + } + + #[test] + fn test_iterator_boundaries() { + // Arrange & Act + let mut store = MemoryReachabilityStore::new(); + let root: Hash = 1.into(); + TreeBuilder::new(&mut store) + .init_with_params(root, Interval::new(1, 5)) + .add_block(2.into(), root); + + let service = MTReachabilityService::new(Arc::new(RwLock::new(store))); + + // Asserts + assert!([1u64, 2] + .map(Hash::from) + .iter() + .cloned() + .eq(service.forward_chain_iterator(1.into(), 2.into(), true))); + assert!([1u64] + .map(Hash::from) + .iter() + .cloned() + .eq(service.forward_chain_iterator(1.into(), 2.into(), false))); + assert!([2u64, 1] + .map(Hash::from) + .iter() + .cloned() + .eq(service.backward_chain_iterator(2.into(), root, true))); + assert!([2u64] + .map(Hash::from) + .iter() + .cloned() + .eq(service.backward_chain_iterator(2.into(), root, false))); + assert!(std::iter::once(root).eq(service.backward_chain_iterator(root, root, true))); + assert!(std::iter::empty::().eq(service.backward_chain_iterator(root, root, false))); + assert!(std::iter::once(root).eq(service.forward_chain_iterator(root, root, true))); + assert!(std::iter::empty::().eq(service.forward_chain_iterator(root, root, false))); + } +} diff --git a/consensus/src/dag/reachability/reindex.rs b/consensus/src/dag/reachability/reindex.rs new file mode 100644 index 0000000000..48895b602a --- /dev/null +++ b/consensus/src/dag/reachability/reindex.rs @@ -0,0 +1,684 @@ +use super::{ + extensions::ReachabilityStoreIntervalExtensions, inquirer::get_next_chain_ancestor_unchecked, *, +}; +use crate::consensusdb::schemadb::ReachabilityStore; +use crate::dag::types::interval::Interval; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::{BlockHashExtensions, BlockHashMap}; +use std::collections::VecDeque; + +/// A struct used during reindex operations. It represents a temporary context +/// for caching subtree information during the *current* reindex operation only +pub(super) struct ReindexOperationContext<'a, T: ReachabilityStore + ?Sized> { + store: &'a mut T, + subtree_sizes: BlockHashMap, // Cache for subtree sizes computed during this operation + _depth: u64, + slack: u64, +} + +impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { + pub(super) fn new(store: &'a mut T, depth: u64, slack: u64) -> Self { + Self { + store, + subtree_sizes: BlockHashMap::new(), + _depth: depth, + slack, + } + } + + /// Traverses the reachability subtree that's defined by the new child + /// block and reallocates reachability interval space + /// such that another reindexing is unlikely to occur shortly + /// thereafter. It does this by traversing down the reachability + /// tree until it finds a block with an interval size that's greater than + /// its subtree size. See `propagate_interval` for further details. + pub(super) fn reindex_intervals(&mut self, new_child: Hash, reindex_root: Hash) -> Result<()> { + let mut current = new_child; + + // Search for the first ancestor with sufficient interval space + loop { + let current_interval = self.store.get_interval(current)?; + self.count_subtrees(current)?; + + // `current` has sufficient space, break and propagate + if current_interval.size() >= self.subtree_sizes[¤t] { + break; + } + + let parent = self.store.get_parent(current)?; + + if parent.is_none() { + // If we ended up here it means that there are more + // than 2^64 blocks, which shouldn't ever happen. + return Err(ReachabilityError::DataOverflow( + "missing tree + parent during reindexing. Theoretically, this + should only ever happen if there are more + than 2^64 blocks in the DAG." + .to_string(), + )); + } + + if current == reindex_root { + // Reindex root is expected to hold enough capacity as long as there are less + // than ~2^52 blocks in the DAG, which should never happen in our lifetimes + // even if block rate per second is above 100. The calculation follows from the allocation of + // 2^12 (which equals 2^64/2^52) for slack per chain block below the reindex root. + return Err(ReachabilityError::DataOverflow(format!( + "unexpected behavior: reindex root {reindex_root} is out of capacity during reindexing. + Theoretically, this should only ever happen if there are more than ~2^52 blocks in the DAG." + ))); + } + + if inquirer::is_strict_chain_ancestor_of(self.store, parent, reindex_root)? { + // In this case parent is guaranteed to have sufficient interval space, + // however we avoid reindexing the entire subtree above parent + // (which includes root and thus majority of blocks mined since) + // and use slacks along the chain up forward from parent to reindex root. + // Notes: + // 1. we set `required_allocation` = subtree size of current in order to double the + // current interval capacity + // 2. it might be the case that current is the `new_child` itself + return self.reindex_intervals_earlier_than_root( + current, + reindex_root, + parent, + self.subtree_sizes[¤t], + ); + } + + current = parent + } + + self.propagate_interval(current) + } + + /// + /// Core (BFS) algorithms used during reindexing (see `count_subtrees` and `propagate_interval` below) + /// + /// + /// count_subtrees counts the size of each subtree under this block, + /// and populates self.subtree_sizes with the results. + /// It is equivalent to the following recursive implementation: + /// + /// fn count_subtrees(&mut self, block: Hash) -> Result { + /// let mut subtree_size = 0u64; + /// for child in self.store.get_children(block)?.iter().cloned() { + /// subtree_size += self.count_subtrees(child)?; + /// } + /// self.subtree_sizes.insert(block, subtree_size + 1); + /// Ok(subtree_size + 1) + /// } + /// + /// However, we are expecting (linearly) deep trees, and so a + /// recursive stack-based approach is inefficient and will hit + /// recursion limits. Instead, the same logic was implemented + /// using a (queue-based) BFS method. At a high level, the + /// algorithm uses BFS for reaching all leaves and pushes + /// intermediate updates from leaves via parent chains until all + /// size information is gathered at the root of the operation + /// (i.e. at block). + fn count_subtrees(&mut self, block: Hash) -> Result<()> { + if self.subtree_sizes.contains_key(&block) { + return Ok(()); + } + + let mut queue = VecDeque::::from([block]); + let mut counts = BlockHashMap::::new(); + + while let Some(mut current) = queue.pop_front() { + let children = self.store.get_children(current)?; + if children.is_empty() { + // We reached a leaf + self.subtree_sizes.insert(current, 1); + } else if !self.subtree_sizes.contains_key(¤t) { + // We haven't yet calculated the subtree size of + // the current block. Add all its children to the + // queue + queue.extend(children.iter()); + continue; + } + + // We reached a leaf or a pre-calculated subtree. + // Push information up + while current != block { + current = self.store.get_parent(current)?; + + let count = counts.entry(current).or_insert(0); + let children = self.store.get_children(current)?; + + *count = (*count).checked_add(1).unwrap(); + if *count < children.len() as u64 { + // Not all subtrees of the current block are ready + break; + } + + // All children of `current` have calculated their subtree size. + // Sum them all together and add 1 to get the sub tree size of + // `current`. + let subtree_sum: u64 = children.iter().map(|c| self.subtree_sizes[c]).sum(); + self.subtree_sizes + .insert(current, subtree_sum.checked_add(1).unwrap()); + } + } + + Ok(()) + } + + /// Propagates a new interval using a BFS traversal. + /// Subtree intervals are recursively allocated according to subtree sizes and + /// the allocation rule in `Interval::split_exponential`. + fn propagate_interval(&mut self, block: Hash) -> Result<()> { + // Make sure subtrees are counted before propagating + self.count_subtrees(block)?; + + let mut queue = VecDeque::::from([block]); + while let Some(current) = queue.pop_front() { + let children = self.store.get_children(current)?; + if !children.is_empty() { + let sizes: Vec = children.iter().map(|c| self.subtree_sizes[c]).collect(); + let interval = self.store.interval_children_capacity(current)?; + let intervals = interval.split_exponential(&sizes); + for (c, ci) in children.iter().copied().zip(intervals) { + self.store.set_interval(c, ci)?; + } + queue.extend(children.iter()); + } + } + Ok(()) + } + + /// This method implements the reindex algorithm for the case where the + /// new child node is not in reindex root's subtree. The function is expected to allocate + /// `required_allocation` to be added to interval of `allocation_block`. `common_ancestor` is + /// expected to be a direct parent of `allocation_block` and an ancestor of current `reindex_root`. + fn reindex_intervals_earlier_than_root( + &mut self, + allocation_block: Hash, + reindex_root: Hash, + common_ancestor: Hash, + required_allocation: u64, + ) -> Result<()> { + // The chosen child is: (i) child of `common_ancestor`; (ii) an + // ancestor of `reindex_root` or `reindex_root` itself + let chosen_child = + get_next_chain_ancestor_unchecked(self.store, reindex_root, common_ancestor)?; + let block_interval = self.store.get_interval(allocation_block)?; + let chosen_interval = self.store.get_interval(chosen_child)?; + + if block_interval.start < chosen_interval.start { + // `allocation_block` is in the subtree before the chosen child + self.reclaim_interval_before( + allocation_block, + common_ancestor, + chosen_child, + reindex_root, + required_allocation, + ) + } else { + // `allocation_block` is in the subtree after the chosen child + self.reclaim_interval_after( + allocation_block, + common_ancestor, + chosen_child, + reindex_root, + required_allocation, + ) + } + } + + fn reclaim_interval_before( + &mut self, + allocation_block: Hash, + common_ancestor: Hash, + chosen_child: Hash, + reindex_root: Hash, + required_allocation: u64, + ) -> Result<()> { + let mut slack_sum = 0u64; + let mut path_len = 0u64; + let mut path_slack_alloc = 0u64; + + let mut current = chosen_child; + // Walk up the chain from common ancestor's chosen child towards reindex root + loop { + if current == reindex_root { + // Reached reindex root. In this case, since we reached (the unlimited) root, + // we also re-allocate new slack for the chain we just traversed + let offset = required_allocation + .checked_add(self.slack.checked_mul(path_len).unwrap()) + .unwrap() + .checked_sub(slack_sum) + .unwrap(); + self.apply_interval_op_and_propagate(current, offset, Interval::increase_start)?; + self.offset_siblings_before(allocation_block, current, offset)?; + + // Set the slack for each chain block to be reserved below during the chain walk-down + path_slack_alloc = self.slack; + break; + } + + let slack_before_current = self.store.interval_remaining_before(current)?.size(); + slack_sum = slack_sum.checked_add(slack_before_current).unwrap(); + + if slack_sum >= required_allocation { + // Set offset to be just enough to satisfy required allocation + let offset = slack_before_current + .checked_sub(slack_sum.checked_sub(required_allocation).unwrap()) + .unwrap(); + self.apply_interval_op(current, offset, Interval::increase_start)?; + self.offset_siblings_before(allocation_block, current, offset)?; + + break; + } + + current = get_next_chain_ancestor_unchecked(self.store, reindex_root, current)?; + path_len = path_len.checked_add(1).unwrap(); + } + + // Go back down the reachability tree towards the common ancestor. + // On every hop we reindex the reachability subtree before the + // current block with an interval that is smaller. + // This is to make room for the required allocation. + loop { + current = self.store.get_parent(current)?; + if current == common_ancestor { + break; + } + + let slack_before_current = self.store.interval_remaining_before(current)?.size(); + let offset = slack_before_current.checked_sub(path_slack_alloc).unwrap(); + self.apply_interval_op(current, offset, Interval::increase_start)?; + self.offset_siblings_before(allocation_block, current, offset)?; + } + + Ok(()) + } + + fn reclaim_interval_after( + &mut self, + allocation_block: Hash, + common_ancestor: Hash, + chosen_child: Hash, + reindex_root: Hash, + required_allocation: u64, + ) -> Result<()> { + let mut slack_sum = 0u64; + let mut path_len = 0u64; + let mut path_slack_alloc = 0u64; + + let mut current = chosen_child; + // Walk up the chain from common ancestor's chosen child towards reindex root + loop { + if current == reindex_root { + // Reached reindex root. In this case, since we reached (the unlimited) root, + // we also re-allocate new slack for the chain we just traversed + let offset = required_allocation + .checked_add(self.slack.checked_mul(path_len).unwrap()) + .unwrap() + .checked_sub(slack_sum) + .unwrap(); + self.apply_interval_op_and_propagate(current, offset, Interval::decrease_end)?; + self.offset_siblings_after(allocation_block, current, offset)?; + + // Set the slack for each chain block to be reserved below during the chain walk-down + path_slack_alloc = self.slack; + break; + } + + let slack_after_current = self.store.interval_remaining_after(current)?.size(); + slack_sum = slack_sum.checked_add(slack_after_current).unwrap(); + + if slack_sum >= required_allocation { + // Set offset to be just enough to satisfy required allocation + let offset = slack_after_current + .checked_sub(slack_sum.checked_sub(required_allocation).unwrap()) + .unwrap(); + self.apply_interval_op(current, offset, Interval::decrease_end)?; + self.offset_siblings_after(allocation_block, current, offset)?; + + break; + } + + current = get_next_chain_ancestor_unchecked(self.store, reindex_root, current)?; + path_len = path_len.checked_add(1).unwrap(); + } + + // Go back down the reachability tree towards the common ancestor. + // On every hop we reindex the reachability subtree before the + // current block with an interval that is smaller. + // This is to make room for the required allocation. + loop { + current = self.store.get_parent(current)?; + if current == common_ancestor { + break; + } + + let slack_after_current = self.store.interval_remaining_after(current)?.size(); + let offset = slack_after_current.checked_sub(path_slack_alloc).unwrap(); + self.apply_interval_op(current, offset, Interval::decrease_end)?; + self.offset_siblings_after(allocation_block, current, offset)?; + } + + Ok(()) + } + + fn offset_siblings_before( + &mut self, + allocation_block: Hash, + current: Hash, + offset: u64, + ) -> Result<()> { + let parent = self.store.get_parent(current)?; + let children = self.store.get_children(parent)?; + + let (siblings_before, _) = split_children(&children, current)?; + for sibling in siblings_before.iter().cloned().rev() { + if sibling == allocation_block { + // We reached our final destination, allocate `offset` to `allocation_block` by increasing end and break + self.apply_interval_op_and_propagate( + allocation_block, + offset, + Interval::increase_end, + )?; + break; + } + // For non-`allocation_block` siblings offset the interval upwards in order to create space + self.apply_interval_op_and_propagate(sibling, offset, Interval::increase)?; + } + + Ok(()) + } + + fn offset_siblings_after( + &mut self, + allocation_block: Hash, + current: Hash, + offset: u64, + ) -> Result<()> { + let parent = self.store.get_parent(current)?; + let children = self.store.get_children(parent)?; + + let (_, siblings_after) = split_children(&children, current)?; + for sibling in siblings_after.iter().cloned() { + if sibling == allocation_block { + // We reached our final destination, allocate `offset` to `allocation_block` by decreasing only start and break + self.apply_interval_op_and_propagate( + allocation_block, + offset, + Interval::decrease_start, + )?; + break; + } + // For siblings before `allocation_block` offset the interval downwards to create space + self.apply_interval_op_and_propagate(sibling, offset, Interval::decrease)?; + } + + Ok(()) + } + + fn apply_interval_op( + &mut self, + block: Hash, + offset: u64, + op: fn(&Interval, u64) -> Interval, + ) -> Result<()> { + self.store + .set_interval(block, op(&self.store.get_interval(block)?, offset))?; + Ok(()) + } + + fn apply_interval_op_and_propagate( + &mut self, + block: Hash, + offset: u64, + op: fn(&Interval, u64) -> Interval, + ) -> Result<()> { + self.store + .set_interval(block, op(&self.store.get_interval(block)?, offset))?; + self.propagate_interval(block)?; + Ok(()) + } + + /// A method for handling reindex operations triggered by moving the reindex root + pub(super) fn concentrate_interval( + &mut self, + parent: Hash, + child: Hash, + is_final_reindex_root: bool, + ) -> Result<()> { + let children = self.store.get_children(parent)?; + + // Split the `children` of `parent` to siblings before `child` and siblings after `child` + let (siblings_before, siblings_after) = split_children(&children, child)?; + + let siblings_before_subtrees_sum: u64 = + self.tighten_intervals_before(parent, siblings_before)?; + let siblings_after_subtrees_sum: u64 = + self.tighten_intervals_after(parent, siblings_after)?; + + self.expand_interval_to_chosen( + parent, + child, + siblings_before_subtrees_sum, + siblings_after_subtrees_sum, + is_final_reindex_root, + )?; + + Ok(()) + } + + pub(super) fn tighten_intervals_before( + &mut self, + parent: Hash, + children_before: &[Hash], + ) -> Result { + let sizes = children_before + .iter() + .cloned() + .map(|block| { + self.count_subtrees(block)?; + Ok(self.subtree_sizes[&block]) + }) + .collect::>>()?; + let sum = sizes.iter().sum(); + + let interval = self.store.get_interval(parent)?; + let interval_before = Interval::new( + interval.start.checked_add(self.slack).unwrap(), + interval + .start + .checked_add(self.slack) + .unwrap() + .checked_add(sum) + .unwrap() + .checked_sub(1) + .unwrap(), + ); + + for (c, ci) in children_before + .iter() + .cloned() + .zip(interval_before.split_exact(sizes.as_slice())) + { + self.store.set_interval(c, ci)?; + self.propagate_interval(c)?; + } + + Ok(sum) + } + + pub(super) fn tighten_intervals_after( + &mut self, + parent: Hash, + children_after: &[Hash], + ) -> Result { + let sizes = children_after + .iter() + .cloned() + .map(|block| { + self.count_subtrees(block)?; + Ok(self.subtree_sizes[&block]) + }) + .collect::>>()?; + let sum = sizes.iter().sum(); + + let interval = self.store.get_interval(parent)?; + let interval_after = Interval::new( + interval + .end + .checked_sub(self.slack) + .unwrap() + .checked_sub(sum) + .unwrap(), + interval + .end + .checked_sub(self.slack) + .unwrap() + .checked_sub(1) + .unwrap(), + ); + + for (c, ci) in children_after + .iter() + .cloned() + .zip(interval_after.split_exact(sizes.as_slice())) + { + self.store.set_interval(c, ci)?; + self.propagate_interval(c)?; + } + + Ok(sum) + } + + pub(super) fn expand_interval_to_chosen( + &mut self, + parent: Hash, + child: Hash, + siblings_before_subtrees_sum: u64, + siblings_after_subtrees_sum: u64, + is_final_reindex_root: bool, + ) -> Result<()> { + let interval = self.store.get_interval(parent)?; + let allocation = Interval::new( + interval + .start + .checked_add(siblings_before_subtrees_sum) + .unwrap() + .checked_add(self.slack) + .unwrap(), + interval + .end + .checked_sub(siblings_after_subtrees_sum) + .unwrap() + .checked_sub(self.slack) + .unwrap() + .checked_sub(1) + .unwrap(), + ); + let current = self.store.get_interval(child)?; + + // Propagate interval only if the chosen `child` is the final reindex root AND + // the new interval doesn't contain the previous one + if is_final_reindex_root && !allocation.contains(current) { + /* + We deallocate slack on both sides as an optimization. Were we to + assign the fully allocated interval, the next time the reindex root moves we + would need to propagate intervals again. However when we do allocate slack, + next time this method is called (next time the reindex root moves), `allocation` is likely to contain `current`. + Note that below following the propagation we reassign the full `allocation` to `child`. + */ + let narrowed = Interval::new( + allocation.start.checked_add(self.slack).unwrap(), + allocation.end.checked_sub(self.slack).unwrap(), + ); + self.store.set_interval(child, narrowed)?; + self.propagate_interval(child)?; + } + + self.store.set_interval(child, allocation)?; + Ok(()) + } +} + +/// Splits `children` into two slices: the blocks that are before `pivot` and the blocks that are after. +fn split_children(children: &std::sync::Arc>, pivot: Hash) -> Result<(&[Hash], &[Hash])> { + if let Some(index) = children.iter().cloned().position(|c| c == pivot) { + Ok(( + &children[..index], + &children[index.checked_add(1).unwrap()..], + )) + } else { + Err(ReachabilityError::DataInconsistency) + } +} + +#[cfg(test)] +mod tests { + use super::{super::tests::*, *}; + use crate::consensusdb::schemadb::{MemoryReachabilityStore, ReachabilityStoreReader}; + use crate::dag::types::interval::Interval; + use starcoin_types::blockhash; + + #[test] + fn test_count_subtrees() { + let mut store = MemoryReachabilityStore::new(); + + // Arrange + let root: Hash = 1.into(); + StoreBuilder::new(&mut store) + .add_block(root, Hash::new(blockhash::NONE)) + .add_block(2.into(), root) + .add_block(3.into(), 2.into()) + .add_block(4.into(), 2.into()) + .add_block(5.into(), 3.into()) + .add_block(6.into(), 5.into()) + .add_block(7.into(), 1.into()) + .add_block(8.into(), 6.into()); + + // Act + let mut ctx = ReindexOperationContext::new(&mut store, 10, 16); + ctx.count_subtrees(root).unwrap(); + + // Assert + let expected = [ + (1u64, 8u64), + (2, 6), + (3, 4), + (4, 1), + (5, 3), + (6, 2), + (7, 1), + (8, 1), + ] + .iter() + .cloned() + .map(|(h, c)| (Hash::from(h), c)) + .collect::>(); + + assert_eq!(expected, ctx.subtree_sizes); + + // Act + ctx.store.set_interval(root, Interval::new(1, 8)).unwrap(); + ctx.propagate_interval(root).unwrap(); + + // Assert intervals manually + let expected_intervals = [ + (1u64, (1u64, 8u64)), + (2, (1, 6)), + (3, (1, 4)), + (4, (5, 5)), + (5, (1, 3)), + (6, (1, 2)), + (7, (7, 7)), + (8, (1, 1)), + ]; + let actual_intervals = (1u64..=8) + .map(|i| (i, ctx.store.get_interval(i.into()).unwrap().into())) + .collect::>(); + assert_eq!(actual_intervals, expected_intervals); + + // Assert intervals follow the general rules + store.validate_intervals(root).unwrap(); + } +} diff --git a/consensus/src/dag/reachability/relations_service.rs b/consensus/src/dag/reachability/relations_service.rs new file mode 100644 index 0000000000..755cfb49be --- /dev/null +++ b/consensus/src/dag/reachability/relations_service.rs @@ -0,0 +1,34 @@ +use crate::consensusdb::{prelude::StoreError, schemadb::RelationsStoreReader}; +use parking_lot::RwLock; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::BlockHashes; +use std::sync::Arc; +/// Multi-threaded block-relations service imp +#[derive(Clone)] +pub struct MTRelationsService { + store: Arc>>, + level: usize, +} + +impl MTRelationsService { + pub fn new(store: Arc>>, level: u8) -> Self { + Self { + store, + level: level as usize, + } + } +} + +impl RelationsStoreReader for MTRelationsService { + fn get_parents(&self, hash: Hash) -> Result { + self.store.read()[self.level].get_parents(hash) + } + + fn get_children(&self, hash: Hash) -> Result { + self.store.read()[self.level].get_children(hash) + } + + fn has(&self, hash: Hash) -> Result { + self.store.read()[self.level].has(hash) + } +} diff --git a/consensus/src/dag/reachability/tests.rs b/consensus/src/dag/reachability/tests.rs new file mode 100644 index 0000000000..e9fa593c86 --- /dev/null +++ b/consensus/src/dag/reachability/tests.rs @@ -0,0 +1,264 @@ +//! +//! Test utils for reachability +//! +use super::{inquirer::*, tree::*}; +use crate::consensusdb::{ + prelude::StoreError, + schemadb::{ReachabilityStore, ReachabilityStoreReader}, +}; +use crate::dag::types::{interval::Interval, perf}; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::{BlockHashExtensions, BlockHashMap, BlockHashSet}; +use std::collections::VecDeque; +use thiserror::Error; + +/// A struct with fluent API to streamline reachability store building +pub struct StoreBuilder<'a, T: ReachabilityStore + ?Sized> { + store: &'a mut T, +} + +impl<'a, T: ReachabilityStore + ?Sized> StoreBuilder<'a, T> { + pub fn new(store: &'a mut T) -> Self { + Self { store } + } + + pub fn add_block(&mut self, hash: Hash, parent: Hash) -> &mut Self { + let parent_height = if !parent.is_none() { + self.store.append_child(parent, hash).unwrap() + } else { + 0 + }; + self.store + .insert(hash, parent, Interval::empty(), parent_height + 1) + .unwrap(); + self + } +} + +/// A struct with fluent API to streamline tree building +pub struct TreeBuilder<'a, T: ReachabilityStore + ?Sized> { + store: &'a mut T, + reindex_depth: u64, + reindex_slack: u64, +} + +impl<'a, T: ReachabilityStore + ?Sized> TreeBuilder<'a, T> { + pub fn new(store: &'a mut T) -> Self { + Self { + store, + reindex_depth: perf::DEFAULT_REINDEX_DEPTH, + reindex_slack: perf::DEFAULT_REINDEX_SLACK, + } + } + + pub fn new_with_params(store: &'a mut T, reindex_depth: u64, reindex_slack: u64) -> Self { + Self { + store, + reindex_depth, + reindex_slack, + } + } + + pub fn init(&mut self) -> &mut Self { + init(self.store).unwrap(); + self + } + + pub fn init_with_params(&mut self, origin: Hash, capacity: Interval) -> &mut Self { + init_with_params(self.store, origin, capacity).unwrap(); + self + } + + pub fn add_block(&mut self, hash: Hash, parent: Hash) -> &mut Self { + add_tree_block( + self.store, + hash, + parent, + self.reindex_depth, + self.reindex_slack, + ) + .unwrap(); + try_advancing_reindex_root(self.store, hash, self.reindex_depth, self.reindex_slack) + .unwrap(); + self + } + + pub fn store(&self) -> &&'a mut T { + &self.store + } +} + +#[derive(Clone)] +pub struct DagBlock { + pub hash: Hash, + pub parents: Vec, +} + +impl DagBlock { + pub fn new(hash: Hash, parents: Vec) -> Self { + Self { hash, parents } + } +} + +/// A struct with fluent API to streamline DAG building +pub struct DagBuilder<'a, T: ReachabilityStore + ?Sized> { + store: &'a mut T, + map: BlockHashMap, +} + +impl<'a, T: ReachabilityStore + ?Sized> DagBuilder<'a, T> { + pub fn new(store: &'a mut T) -> Self { + Self { + store, + map: BlockHashMap::new(), + } + } + + pub fn init(&mut self) -> &mut Self { + init(self.store).unwrap(); + self + } + + pub fn add_block(&mut self, block: DagBlock) -> &mut Self { + // Select by height (longest chain) just for the sake of internal isolated tests + let selected_parent = block + .parents + .iter() + .cloned() + .max_by_key(|p| self.store.get_height(*p).unwrap()) + .unwrap(); + let mergeset = self.mergeset(&block, selected_parent); + add_block( + self.store, + block.hash, + selected_parent, + &mut mergeset.iter().cloned(), + ) + .unwrap(); + hint_virtual_selected_parent(self.store, block.hash).unwrap(); + self.map.insert(block.hash, block); + self + } + + fn mergeset(&self, block: &DagBlock, selected_parent: Hash) -> Vec { + let mut queue: VecDeque = block + .parents + .iter() + .copied() + .filter(|p| *p != selected_parent) + .collect(); + let mut mergeset: BlockHashSet = queue.iter().copied().collect(); + let mut past = BlockHashSet::new(); + + while let Some(current) = queue.pop_front() { + for parent in self.map[¤t].parents.iter() { + if mergeset.contains(parent) || past.contains(parent) { + continue; + } + + if is_dag_ancestor_of(self.store, *parent, selected_parent).unwrap() { + past.insert(*parent); + continue; + } + + mergeset.insert(*parent); + queue.push_back(*parent); + } + } + mergeset.into_iter().collect() + } + + pub fn store(&self) -> &&'a mut T { + &self.store + } +} + +#[derive(Error, Debug)] +pub enum TestError { + #[error("data store error")] + StoreError(#[from] StoreError), + + #[error("empty interval")] + EmptyInterval(Hash, Interval), + + #[error("sibling intervals are expected to be consecutive")] + NonConsecutiveSiblingIntervals(Interval, Interval), + + #[error("child interval out of parent bounds")] + IntervalOutOfParentBounds { + parent: Hash, + child: Hash, + parent_interval: Interval, + child_interval: Interval, + }, +} + +pub trait StoreValidationExtensions { + /// Checks if `block` is in the past of `other` (creates hashes from the u64 numbers) + fn in_past_of(&self, block: u64, other: u64) -> bool; + + /// Checks if `block` and `other` are in the anticone of each other + /// (creates hashes from the u64 numbers) + fn are_anticone(&self, block: u64, other: u64) -> bool; + + /// Validates that all tree intervals match the expected interval relations + fn validate_intervals(&self, root: Hash) -> std::result::Result<(), TestError>; +} + +impl StoreValidationExtensions for T { + fn in_past_of(&self, block: u64, other: u64) -> bool { + if block == other { + return false; + } + let res = is_dag_ancestor_of(self, block.into(), other.into()).unwrap(); + if res { + // Assert that the `future` relation is indeed asymmetric + assert!(!is_dag_ancestor_of(self, other.into(), block.into()).unwrap()) + } + res + } + + fn are_anticone(&self, block: u64, other: u64) -> bool { + !is_dag_ancestor_of(self, block.into(), other.into()).unwrap() + && !is_dag_ancestor_of(self, other.into(), block.into()).unwrap() + } + + fn validate_intervals(&self, root: Hash) -> std::result::Result<(), TestError> { + let mut queue = VecDeque::::from([root]); + while let Some(parent) = queue.pop_front() { + let children = self.get_children(parent)?; + queue.extend(children.iter()); + + let parent_interval = self.get_interval(parent)?; + if parent_interval.is_empty() { + return Err(TestError::EmptyInterval(parent, parent_interval)); + } + + // Verify parent-child strict relation + for child in children.iter().cloned() { + let child_interval = self.get_interval(child)?; + if !parent_interval.strictly_contains(child_interval) { + return Err(TestError::IntervalOutOfParentBounds { + parent, + child, + parent_interval, + child_interval, + }); + } + } + + // Iterate over consecutive siblings + for siblings in children.windows(2) { + let sibling_interval = self.get_interval(siblings[0])?; + let current_interval = self.get_interval(siblings[1])?; + if sibling_interval.end + 1 != current_interval.start { + return Err(TestError::NonConsecutiveSiblingIntervals( + sibling_interval, + current_interval, + )); + } + } + } + Ok(()) + } +} diff --git a/consensus/src/dag/reachability/tree.rs b/consensus/src/dag/reachability/tree.rs new file mode 100644 index 0000000000..a0d98a9b23 --- /dev/null +++ b/consensus/src/dag/reachability/tree.rs @@ -0,0 +1,161 @@ +//! +//! Tree-related functions internal to the module +//! +use super::{ + extensions::ReachabilityStoreIntervalExtensions, inquirer::*, reindex::ReindexOperationContext, + *, +}; +use crate::consensusdb::schemadb::ReachabilityStore; +use starcoin_crypto::HashValue as Hash; + +/// Adds `new_block` as a child of `parent` in the tree structure. If this block +/// has no remaining interval to allocate, a reindexing is triggered. When a reindexing +/// is triggered, the reindex root point is used within the reindex algorithm's logic +pub fn add_tree_block( + store: &mut (impl ReachabilityStore + ?Sized), + new_block: Hash, + parent: Hash, + reindex_depth: u64, + reindex_slack: u64, +) -> Result<()> { + // Get the remaining interval capacity + let remaining = store.interval_remaining_after(parent)?; + // Append the new child to `parent.children` + let parent_height = store.append_child(parent, new_block)?; + if remaining.is_empty() { + // Init with the empty interval. + // Note: internal logic relies on interval being this specific interval + // which comes exactly at the end of current capacity + store.insert( + new_block, + parent, + remaining, + parent_height.checked_add(1).unwrap(), + )?; + + // Start a reindex operation (TODO: add timing) + let reindex_root = store.get_reindex_root()?; + let mut ctx = ReindexOperationContext::new(store, reindex_depth, reindex_slack); + ctx.reindex_intervals(new_block, reindex_root)?; + } else { + let allocated = remaining.split_half().0; + store.insert( + new_block, + parent, + allocated, + parent_height.checked_add(1).unwrap(), + )?; + }; + Ok(()) +} + +/// Finds the most recent tree ancestor common to both `block` and the given `reindex root`. +/// Note that we assume that almost always the chain between the reindex root and the common +/// ancestor is longer than the chain between block and the common ancestor, hence we iterate +/// from `block`. +pub fn find_common_tree_ancestor( + store: &(impl ReachabilityStore + ?Sized), + block: Hash, + reindex_root: Hash, +) -> Result { + let mut current = block; + loop { + if is_chain_ancestor_of(store, current, reindex_root)? { + return Ok(current); + } + current = store.get_parent(current)?; + } +} + +/// Finds a possible new reindex root, based on the `current` reindex root and the selected tip `hint` +pub fn find_next_reindex_root( + store: &(impl ReachabilityStore + ?Sized), + current: Hash, + hint: Hash, + reindex_depth: u64, + reindex_slack: u64, +) -> Result<(Hash, Hash)> { + let mut ancestor = current; + let mut next = current; + + let hint_height = store.get_height(hint)?; + + // Test if current root is ancestor of selected tip (`hint`) - if not, this is a reorg case + if !is_chain_ancestor_of(store, current, hint)? { + let current_height = store.get_height(current)?; + + // We have reindex root out of (hint) selected tip chain, however we switch chains only after a sufficient + // threshold of `reindex_slack` diff in order to address possible alternating reorg attacks. + // The `reindex_slack` constant is used as an heuristic large enough on the one hand, but + // one which will not harm performance on the other hand - given the available slack at the chain split point. + // + // Note: In some cases the height of the (hint) selected tip can be lower than the current reindex root height. + // If that's the case we keep the reindex root unchanged. + if hint_height < current_height + || hint_height.checked_sub(current_height).unwrap() < reindex_slack + { + return Ok((current, current)); + } + + let common = find_common_tree_ancestor(store, hint, current)?; + ancestor = common; + next = common; + } + + // Iterate from ancestor towards the selected tip (`hint`) until passing the + // `reindex_window` threshold, for finding the new reindex root + loop { + let child = get_next_chain_ancestor_unchecked(store, hint, next)?; + let child_height = store.get_height(child)?; + + if hint_height < child_height { + return Err(ReachabilityError::DataInconsistency); + } + if hint_height.checked_sub(child_height).unwrap() < reindex_depth { + break; + } + next = child; + } + + Ok((ancestor, next)) +} + +/// Attempts to advance or move the current reindex root according to the +/// provided `virtual selected parent` (`VSP`) hint. +/// It is important for the reindex root point to follow the consensus-agreed chain +/// since this way it can benefit from chain-robustness which is implied by the security +/// of the ordering protocol. That is, it enjoys from the fact that all future blocks are +/// expected to elect the root subtree (by converging to the agreement to have it on the +/// selected chain). See also the reachability algorithms overview (TODO) +pub fn try_advancing_reindex_root( + store: &mut (impl ReachabilityStore + ?Sized), + hint: Hash, + reindex_depth: u64, + reindex_slack: u64, +) -> Result<()> { + // Get current root from the store + let current = store.get_reindex_root()?; + + // Find the possible new root + let (mut ancestor, next) = + find_next_reindex_root(store, current, hint, reindex_depth, reindex_slack)?; + + // No update to root, return + if current == next { + return Ok(()); + } + + // if ancestor == next { + // trace!("next reindex root is an ancestor of current one, skipping concentration.") + // } + while ancestor != next { + let child = get_next_chain_ancestor_unchecked(store, next, ancestor)?; + let mut ctx = ReindexOperationContext::new(store, reindex_depth, reindex_slack); + ctx.concentrate_interval(ancestor, child, child == next)?; + ancestor = child; + } + + // Update reindex root in the data store + store.set_reindex_root(next)?; + Ok(()) +} diff --git a/consensus/src/dag/types/ghostdata.rs b/consensus/src/dag/types/ghostdata.rs new file mode 100644 index 0000000000..c680172148 --- /dev/null +++ b/consensus/src/dag/types/ghostdata.rs @@ -0,0 +1,147 @@ +use super::trusted::ExternalGhostdagData; +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::{BlockHashMap, BlockHashes, BlueWorkType, HashKTypeMap, KType}; +use std::sync::Arc; + +#[derive(Clone, Serialize, Deserialize, Default, Debug)] +pub struct GhostdagData { + pub blue_score: u64, + pub blue_work: BlueWorkType, + pub selected_parent: Hash, + pub mergeset_blues: BlockHashes, + pub mergeset_reds: BlockHashes, + pub blues_anticone_sizes: HashKTypeMap, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize, Copy)] +pub struct CompactGhostdagData { + pub blue_score: u64, + pub blue_work: BlueWorkType, + pub selected_parent: Hash, +} + +impl From for GhostdagData { + fn from(value: ExternalGhostdagData) -> Self { + Self { + blue_score: value.blue_score, + blue_work: value.blue_work, + selected_parent: value.selected_parent, + mergeset_blues: Arc::new(value.mergeset_blues), + mergeset_reds: Arc::new(value.mergeset_reds), + blues_anticone_sizes: Arc::new(value.blues_anticone_sizes), + } + } +} + +impl From<&GhostdagData> for ExternalGhostdagData { + fn from(value: &GhostdagData) -> Self { + Self { + blue_score: value.blue_score, + blue_work: value.blue_work, + selected_parent: value.selected_parent, + mergeset_blues: (*value.mergeset_blues).clone(), + mergeset_reds: (*value.mergeset_reds).clone(), + blues_anticone_sizes: (*value.blues_anticone_sizes).clone(), + } + } +} + +impl GhostdagData { + pub fn new( + blue_score: u64, + blue_work: BlueWorkType, + selected_parent: Hash, + mergeset_blues: BlockHashes, + mergeset_reds: BlockHashes, + blues_anticone_sizes: HashKTypeMap, + ) -> Self { + Self { + blue_score, + blue_work, + selected_parent, + mergeset_blues, + mergeset_reds, + blues_anticone_sizes, + } + } + + pub fn new_with_selected_parent(selected_parent: Hash, k: KType) -> Self { + let mut mergeset_blues: Vec = Vec::with_capacity(k.checked_add(1).unwrap() as usize); + let mut blues_anticone_sizes: BlockHashMap = BlockHashMap::with_capacity(k as usize); + mergeset_blues.push(selected_parent); + blues_anticone_sizes.insert(selected_parent, 0); + + Self { + blue_score: Default::default(), + blue_work: Default::default(), + selected_parent, + mergeset_blues: BlockHashes::new(mergeset_blues), + mergeset_reds: Default::default(), + blues_anticone_sizes: HashKTypeMap::new(blues_anticone_sizes), + } + } + + pub fn mergeset_size(&self) -> usize { + self.mergeset_blues + .len() + .checked_add(self.mergeset_reds.len()) + .unwrap() + } + + /// Returns an iterator to the mergeset with no specified order (excluding the selected parent) + pub fn unordered_mergeset_without_selected_parent(&self) -> impl Iterator + '_ { + self.mergeset_blues + .iter() + .skip(1) // Skip the selected parent + .cloned() + .chain(self.mergeset_reds.iter().cloned()) + } + + /// Returns an iterator to the mergeset with no specified order (including the selected parent) + pub fn unordered_mergeset(&self) -> impl Iterator + '_ { + self.mergeset_blues + .iter() + .cloned() + .chain(self.mergeset_reds.iter().cloned()) + } + + pub fn to_compact(&self) -> CompactGhostdagData { + CompactGhostdagData { + blue_score: self.blue_score, + blue_work: self.blue_work, + selected_parent: self.selected_parent, + } + } + + pub fn add_blue( + &mut self, + block: Hash, + blue_anticone_size: KType, + block_blues_anticone_sizes: &BlockHashMap, + ) { + // Add the new blue block to mergeset blues + BlockHashes::make_mut(&mut self.mergeset_blues).push(block); + + // Get a mut ref to internal anticone size map + let blues_anticone_sizes = HashKTypeMap::make_mut(&mut self.blues_anticone_sizes); + + // Insert the new blue block with its blue anticone size to the map + blues_anticone_sizes.insert(block, blue_anticone_size); + + // Insert/update map entries for blocks affected by this insertion + for (blue, size) in block_blues_anticone_sizes { + blues_anticone_sizes.insert(*blue, size.checked_add(1).unwrap()); + } + } + + pub fn add_red(&mut self, block: Hash) { + // Add the new red block to mergeset reds + BlockHashes::make_mut(&mut self.mergeset_reds).push(block); + } + + pub fn finalize_score_and_work(&mut self, blue_score: u64, blue_work: BlueWorkType) { + self.blue_score = blue_score; + self.blue_work = blue_work; + } +} diff --git a/consensus/src/dag/types/interval.rs b/consensus/src/dag/types/interval.rs new file mode 100644 index 0000000000..0b5cc4f6e5 --- /dev/null +++ b/consensus/src/dag/types/interval.rs @@ -0,0 +1,377 @@ +use serde::{Deserialize, Serialize}; +use std::fmt::{Display, Formatter}; + +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] +pub struct Interval { + pub start: u64, + pub end: u64, +} + +impl Display for Interval { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "[{}, {}]", self.start, self.end) + } +} + +impl From for (u64, u64) { + fn from(val: Interval) -> Self { + (val.start, val.end) + } +} + +impl Interval { + pub fn new(start: u64, end: u64) -> Self { + debug_assert!(start > 0 && end < u64::MAX && end >= start.checked_sub(1).unwrap()); // TODO: make sure this is actually debug-only + Interval { start, end } + } + + pub fn empty() -> Self { + Self::new(1, 0) + } + + /// Returns the maximally allowed `u64` interval. We leave a margin of 1 from + /// both `u64` bounds (`0` and `u64::MAX`) in order to support the reduction of any + /// legal interval to an empty one by setting `end = start - 1` or `start = end + 1` + pub fn maximal() -> Self { + Self::new(1, u64::MAX.saturating_sub(1)) + } + + pub fn size(&self) -> u64 { + // Empty intervals are indicated by `self.end == self.start - 1`, so + // we avoid the overflow by first adding 1 + // Note: this function will panic if `self.end < self.start - 1` due to overflow + (self.end.checked_add(1).unwrap()) + .checked_sub(self.start) + .unwrap() + } + + pub fn is_empty(&self) -> bool { + self.size() == 0 + } + + pub fn increase(&self, offset: u64) -> Self { + Self::new( + self.start.checked_add(offset).unwrap(), + self.end.checked_add(offset).unwrap(), + ) + } + + pub fn decrease(&self, offset: u64) -> Self { + Self::new( + self.start.checked_sub(offset).unwrap(), + self.end.checked_sub(offset).unwrap(), + ) + } + + pub fn increase_start(&self, offset: u64) -> Self { + Self::new(self.start.checked_add(offset).unwrap(), self.end) + } + + pub fn decrease_start(&self, offset: u64) -> Self { + Self::new(self.start.checked_sub(offset).unwrap(), self.end) + } + + pub fn increase_end(&self, offset: u64) -> Self { + Self::new(self.start, self.end.checked_add(offset).unwrap()) + } + + pub fn decrease_end(&self, offset: u64) -> Self { + Self::new(self.start, self.end.checked_sub(offset).unwrap()) + } + + pub fn split_half(&self) -> (Self, Self) { + self.split_fraction(0.5) + } + + /// Splits this interval to two parts such that their + /// union is equal to the original interval and the first (left) part + /// contains the given fraction of the original interval's size. + /// Note: if the split results in fractional parts, this method rounds + /// the first part up and the last part down. + fn split_fraction(&self, fraction: f32) -> (Self, Self) { + let left_size = f32::ceil(self.size() as f32 * fraction) as u64; + + ( + Self::new( + self.start, + self.start + .checked_add(left_size) + .unwrap() + .checked_sub(1) + .unwrap(), + ), + Self::new(self.start.checked_add(left_size).unwrap(), self.end), + ) + } + + /// Splits this interval to exactly |sizes| parts where + /// |part_i| = sizes[i]. This method expects sum(sizes) to be exactly + /// equal to the interval's size. + pub fn split_exact(&self, sizes: &[u64]) -> Vec { + assert_eq!( + sizes.iter().sum::(), + self.size(), + "sum of sizes must be equal to the interval's size" + ); + let mut start = self.start; + sizes + .iter() + .map(|size| { + let interval = Self::new( + start, + start.checked_add(*size).unwrap().checked_sub(1).unwrap(), + ); + start = start.checked_add(*size).unwrap(); + interval + }) + .collect() + } + + /// Splits this interval to |sizes| parts + /// by the allocation rule described below. This method expects sum(sizes) + /// to be smaller or equal to the interval's size. Every part_i is + /// allocated at least sizes[i] capacity. The remaining budget is + /// split by an exponentially biased rule described below. + /// + /// This rule follows the GHOSTDAG protocol behavior where the child + /// with the largest subtree is expected to dominate the competition + /// for new blocks and thus grow the most. However, we may need to + /// add slack for non-largest subtrees in order to make CPU reindexing + /// attacks unworthy. + pub fn split_exponential(&self, sizes: &[u64]) -> Vec { + let interval_size = self.size(); + let sizes_sum = sizes.iter().sum::(); + assert!( + interval_size >= sizes_sum, + "interval's size must be greater than or equal to sum of sizes" + ); + assert!(sizes_sum > 0, "cannot split to 0 parts"); + if interval_size == sizes_sum { + return self.split_exact(sizes); + } + + // + // Add a fractional bias to every size in the provided sizes + // + + let mut remaining_bias = interval_size.checked_sub(sizes_sum).unwrap(); + let total_bias = remaining_bias as f64; + + let mut biased_sizes = Vec::::with_capacity(sizes.len()); + let exp_fractions = exponential_fractions(sizes); + for (i, fraction) in exp_fractions.iter().enumerate() { + let bias: u64 = if i == exp_fractions.len().checked_sub(1).unwrap() { + remaining_bias + } else { + remaining_bias.min(f64::round(total_bias * fraction) as u64) + }; + biased_sizes.push(sizes[i].checked_add(bias).unwrap()); + remaining_bias = remaining_bias.checked_sub(bias).unwrap(); + } + + self.split_exact(biased_sizes.as_slice()) + } + + pub fn contains(&self, other: Self) -> bool { + self.start <= other.start && other.end <= self.end + } + + pub fn strictly_contains(&self, other: Self) -> bool { + self.start <= other.start && other.end < self.end + } +} + +/// Returns a fraction for each size in sizes +/// as follows: +/// fraction[i] = 2^size[i] / sum_j(2^size[j]) +/// In the code below the above equation is divided by 2^max(size) +/// to avoid exploding numbers. Note that in 1 / 2^(max(size)-size[i]) +/// we divide 1 by potentially a very large number, which will +/// result in loss of float precision. This is not a problem - all +/// numbers close to 0 bear effectively the same weight. +fn exponential_fractions(sizes: &[u64]) -> Vec { + let max_size = sizes.iter().copied().max().unwrap_or_default(); + + let mut fractions = sizes + .iter() + .map(|s| 1f64 / 2f64.powf((max_size - s) as f64)) + .collect::>(); + + let fractions_sum = fractions.iter().sum::(); + for item in &mut fractions { + *item /= fractions_sum; + } + + fractions +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_interval_basics() { + let interval = Interval::new(101, 164); + let increased = interval.increase(10); + let decreased = increased.decrease(5); + // println!("{}", interval.clone()); + + assert_eq!(interval.start + 10, increased.start); + assert_eq!(interval.end + 10, increased.end); + + assert_eq!(interval.start + 5, decreased.start); + assert_eq!(interval.end + 5, decreased.end); + + assert_eq!(interval.size(), 64); + assert_eq!(Interval::maximal().size(), u64::MAX - 1); + assert_eq!(Interval::empty().size(), 0); + + let (empty_left, empty_right) = Interval::empty().split_half(); + assert_eq!(empty_left.size(), 0); + assert_eq!(empty_right.size(), 0); + + assert_eq!(interval.start + 10, interval.increase_start(10).start); + assert_eq!(interval.start - 10, interval.decrease_start(10).start); + assert_eq!(interval.end + 10, interval.increase_end(10).end); + assert_eq!(interval.end - 10, interval.decrease_end(10).end); + + assert_eq!(interval.end, interval.increase_start(10).end); + assert_eq!(interval.end, interval.decrease_start(10).end); + assert_eq!(interval.start, interval.increase_end(10).start); + assert_eq!(interval.start, interval.decrease_end(10).start); + + // println!("{:?}", Interval::maximal()); + // println!("{:?}", Interval::maximal().split_half()); + } + + #[test] + fn test_split_exact() { + let sizes = vec![5u64, 10, 15, 20]; + let intervals = Interval::new(1, 50).split_exact(sizes.as_slice()); + assert_eq!(intervals.len(), sizes.len()); + for i in 0..sizes.len() { + assert_eq!(intervals[i].size(), sizes[i]) + } + } + + #[test] + fn test_exponential_fractions() { + let mut exp_fractions = exponential_fractions(vec![2, 4, 8, 16].as_slice()); + // println!("{:?}", exp_fractions); + for i in 0..exp_fractions.len() - 1 { + assert!(exp_fractions[i + 1] > exp_fractions[i]); + } + + exp_fractions = exponential_fractions(vec![].as_slice()); + assert_eq!(exp_fractions.len(), 0); + + exp_fractions = exponential_fractions(vec![0, 0].as_slice()); + assert_eq!(exp_fractions.len(), 2); + assert_eq!(0.5f64, exp_fractions[0]); + assert_eq!(exp_fractions[0], exp_fractions[1]); + } + + #[test] + fn test_contains() { + assert!(Interval::new(1, 100).contains(Interval::new(1, 100))); + assert!(Interval::new(1, 100).contains(Interval::new(1, 99))); + assert!(Interval::new(1, 100).contains(Interval::new(2, 100))); + assert!(Interval::new(1, 100).contains(Interval::new(2, 99))); + assert!(!Interval::new(1, 100).contains(Interval::new(50, 150))); + assert!(!Interval::new(1, 100).contains(Interval::new(150, 160))); + } + + #[test] + fn test_split_exponential() { + struct Test { + interval: Interval, + sizes: Vec, + expected: Vec, + } + + let tests = [ + Test { + interval: Interval::new(1, 100), + sizes: vec![100u64], + expected: vec![Interval::new(1, 100)], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![50u64, 50], + expected: vec![Interval::new(1, 50), Interval::new(51, 100)], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![10u64, 20, 30, 40], + expected: vec![ + Interval::new(1, 10), + Interval::new(11, 30), + Interval::new(31, 60), + Interval::new(61, 100), + ], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![25u64, 25], + expected: vec![Interval::new(1, 50), Interval::new(51, 100)], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![1u64, 1], + expected: vec![Interval::new(1, 50), Interval::new(51, 100)], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![33u64, 33, 33], + expected: vec![ + Interval::new(1, 33), + Interval::new(34, 66), + Interval::new(67, 100), + ], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![10u64, 15, 25], + expected: vec![ + Interval::new(1, 10), + Interval::new(11, 25), + Interval::new(26, 100), + ], + }, + Test { + interval: Interval::new(1, 100), + sizes: vec![25u64, 15, 10], + expected: vec![ + Interval::new(1, 75), + Interval::new(76, 90), + Interval::new(91, 100), + ], + }, + Test { + interval: Interval::new(1, 10_000), + sizes: vec![10u64, 10, 20], + expected: vec![ + Interval::new(1, 20), + Interval::new(21, 40), + Interval::new(41, 10_000), + ], + }, + Test { + interval: Interval::new(1, 100_000), + sizes: vec![31_000u64, 31_000, 30_001], + expected: vec![ + Interval::new(1, 35_000), + Interval::new(35_001, 69_999), + Interval::new(70_000, 100_000), + ], + }, + ]; + + for test in &tests { + assert_eq!( + test.expected, + test.interval.split_exponential(test.sizes.as_slice()) + ); + } + } +} diff --git a/consensus/src/dag/types/mod.rs b/consensus/src/dag/types/mod.rs new file mode 100644 index 0000000000..d3acae1c23 --- /dev/null +++ b/consensus/src/dag/types/mod.rs @@ -0,0 +1,6 @@ +pub mod ghostdata; +pub mod interval; +pub mod ordering; +pub mod perf; +pub mod reachability; +pub mod trusted; diff --git a/consensus/src/dag/types/ordering.rs b/consensus/src/dag/types/ordering.rs new file mode 100644 index 0000000000..a1ed8c2561 --- /dev/null +++ b/consensus/src/dag/types/ordering.rs @@ -0,0 +1,36 @@ +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::BlueWorkType; +use std::cmp::Ordering; + +#[derive(Eq, Clone, Debug, Serialize, Deserialize)] +pub struct SortableBlock { + pub hash: Hash, + pub blue_work: BlueWorkType, +} + +impl SortableBlock { + pub fn new(hash: Hash, blue_work: BlueWorkType) -> Self { + Self { hash, blue_work } + } +} + +impl PartialEq for SortableBlock { + fn eq(&self, other: &Self) -> bool { + self.hash == other.hash + } +} + +impl PartialOrd for SortableBlock { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for SortableBlock { + fn cmp(&self, other: &Self) -> Ordering { + self.blue_work + .cmp(&other.blue_work) + .then_with(|| self.hash.cmp(&other.hash)) + } +} diff --git a/consensus/src/dag/types/perf.rs b/consensus/src/dag/types/perf.rs new file mode 100644 index 0000000000..6da44d4cd7 --- /dev/null +++ b/consensus/src/dag/types/perf.rs @@ -0,0 +1,51 @@ +//! +//! A module for performance critical constants which depend on consensus parameters. +//! The constants in this module should all be revisited if mainnet consensus parameters change. +//! + +/// The default target depth for reachability reindexes. +pub const DEFAULT_REINDEX_DEPTH: u64 = 100; + +/// The default slack interval used by the reachability +/// algorithm to encounter for blocks out of the selected chain. +pub const DEFAULT_REINDEX_SLACK: u64 = 1 << 12; + +#[derive(Clone, Debug)] +pub struct PerfParams { + // + // Cache sizes + // + /// Preferred cache size for header-related data + pub header_data_cache_size: u64, + + /// Preferred cache size for block-body-related data which + /// is typically orders-of magnitude larger than header data + /// (Note this cannot be set to high due to severe memory consumption) + pub block_data_cache_size: u64, + + /// Preferred cache size for UTXO-related data + pub utxo_set_cache_size: u64, + + /// Preferred cache size for block-window-related data + pub block_window_cache_size: u64, + + // + // Thread-pools + // + /// Defaults to 0 which indicates using system default + /// which is typically the number of logical CPU cores + pub block_processors_num_threads: usize, + + /// Defaults to 0 which indicates using system default + /// which is typically the number of logical CPU cores + pub virtual_processor_num_threads: usize, +} + +pub const PERF_PARAMS: PerfParams = PerfParams { + header_data_cache_size: 10_000, + block_data_cache_size: 200, + utxo_set_cache_size: 10_000, + block_window_cache_size: 2000, + block_processors_num_threads: 0, + virtual_processor_num_threads: 0, +}; diff --git a/consensus/src/dag/types/reachability.rs b/consensus/src/dag/types/reachability.rs new file mode 100644 index 0000000000..35dc3979b6 --- /dev/null +++ b/consensus/src/dag/types/reachability.rs @@ -0,0 +1,26 @@ +use super::interval::Interval; +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::BlockHashes; +use std::sync::Arc; + +#[derive(Clone, Default, Debug, Serialize, Deserialize)] +pub struct ReachabilityData { + pub children: BlockHashes, + pub parent: Hash, + pub interval: Interval, + pub height: u64, + pub future_covering_set: BlockHashes, +} + +impl ReachabilityData { + pub fn new(parent: Hash, interval: Interval, height: u64) -> Self { + Self { + children: Arc::new(vec![]), + parent, + interval, + height, + future_covering_set: Arc::new(vec![]), + } + } +} diff --git a/consensus/src/dag/types/trusted.rs b/consensus/src/dag/types/trusted.rs new file mode 100644 index 0000000000..9a4cf37bbd --- /dev/null +++ b/consensus/src/dag/types/trusted.rs @@ -0,0 +1,26 @@ +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue as Hash; +use starcoin_types::blockhash::{BlockHashMap, BlueWorkType, KType}; + +/// Represents semi-trusted externally provided Ghostdag data (by a network peer) +#[derive(Clone, Serialize, Deserialize)] +pub struct ExternalGhostdagData { + pub blue_score: u64, + pub blue_work: BlueWorkType, + pub selected_parent: Hash, + pub mergeset_blues: Vec, + pub mergeset_reds: Vec, + pub blues_anticone_sizes: BlockHashMap, +} + +/// Represents externally provided Ghostdag data associated with a block Hash +pub struct TrustedGhostdagData { + pub hash: Hash, + pub ghostdag: ExternalGhostdagData, +} + +impl TrustedGhostdagData { + pub fn new(hash: Hash, ghostdag: ExternalGhostdagData) -> Self { + Self { hash, ghostdag } + } +} diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 8b870c6d2e..6a5bb88b95 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -23,11 +23,19 @@ pub mod cn; mod consensus; #[cfg(test)] mod consensus_test; +mod consensusdb; +pub mod dag; pub mod difficulty; pub mod dummy; pub mod keccak; pub use consensus::{Consensus, ConsensusVerifyError}; +pub use consensusdb::consensus_relations::{ + DbRelationsStore, RelationsStore, RelationsStoreReader, +}; +pub use consensusdb::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; +pub use consensusdb::schema; +pub use dag::blockdag::BlockDAG; pub use starcoin_time_service::duration_since_epoch; pub fn target_to_difficulty(target: U256) -> U256 { diff --git a/executor/benchmark/src/lib.rs b/executor/benchmark/src/lib.rs index 3706971393..87aaa90f63 100644 --- a/executor/benchmark/src/lib.rs +++ b/executor/benchmark/src/lib.rs @@ -256,7 +256,7 @@ pub fn run_benchmark( let chain_state = ChainStateDB::new(storage, None); let net = ChainNetwork::new_test(); let genesis_txn = Genesis::build_genesis_transaction(&net).unwrap(); - let _ = Genesis::execute_genesis_txn(&chain_state, genesis_txn).unwrap(); + let _txn_info = Genesis::execute_genesis_txn(&chain_state, genesis_txn).unwrap(); let (block_sender, block_receiver) = mpsc::sync_channel(50 /* bound */); diff --git a/flexidag/Cargo.toml b/flexidag/Cargo.toml new file mode 100644 index 0000000000..79d1439fa4 --- /dev/null +++ b/flexidag/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "starcoin-flexidag" +authors = { workspace = true } +edition = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +version = "1.13.7" +homepage = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = { workspace = true } +async-trait = { workspace = true } +futures = { workspace = true } +starcoin-config = { workspace = true } +starcoin-crypto = { workspace = true } +starcoin-logger = { workspace = true } +starcoin-service-registry = { workspace = true } +starcoin-storage = { workspace = true } +starcoin-types = { workspace = true } +tokio = { workspace = true } +starcoin-consensus = { workspace = true } +starcoin-accumulator = { workspace = true } +thiserror = { workspace = true } + diff --git a/flexidag/src/flexidag_service.rs b/flexidag/src/flexidag_service.rs new file mode 100644 index 0000000000..a83edf2de1 --- /dev/null +++ b/flexidag/src/flexidag_service.rs @@ -0,0 +1,572 @@ +use std::{ + collections::{BTreeSet, BinaryHeap}, + sync::Arc, +}; + +use anyhow::{anyhow, bail, Error, Ok, Result}; +use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator, node::AccumulatorStoreType}; +use starcoin_config::{NodeConfig, TimeService}; +use starcoin_consensus::{dag::types::ghostdata::GhostdagData, BlockDAG}; +use starcoin_crypto::HashValue; +use starcoin_service_registry::{ + ActorService, ServiceContext, ServiceFactory, ServiceHandler, ServiceRequest, +}; +use starcoin_storage::{ + flexi_dag::{KTotalDifficulty, SyncFlexiDagSnapshot, SyncFlexiDagSnapshotHasher}, + storage::CodecKVStore, + BlockStore, Storage, SyncFlexiDagStore, block_info::BlockInfoStore, Store, +}; +use starcoin_types::{block::BlockHeader, header::DagHeader, startup_info}; + +#[derive(Debug, Clone)] +pub struct DumpTipsToAccumulator { + pub block_header: BlockHeader, + pub current_head_block_id: HashValue, + pub k_total_difficulty: KTotalDifficulty, +} + +impl ServiceRequest for DumpTipsToAccumulator { + type Response = anyhow::Result<()>; +} + +#[derive(Debug, Clone)] +pub struct UpdateDagTips { + pub block_header: BlockHeader, + pub current_head_block_id: HashValue, + pub k_total_difficulty: KTotalDifficulty, +} + +impl ServiceRequest for UpdateDagTips { + type Response = anyhow::Result<()>; +} + +#[derive(Debug, Clone)] +pub struct GetDagTips; + +impl ServiceRequest for GetDagTips { + type Response = anyhow::Result>>; +} + +#[derive(Debug, Clone)] +pub struct GetDagAccumulatorInfo; + +impl ServiceRequest for GetDagAccumulatorInfo { + type Response = anyhow::Result>; +} + +#[derive(Debug, Clone)] +pub struct GetDagAccumulatorLeafDetail { + pub leaf_index: u64, + pub batch_size: u64, +} + +#[derive(Debug, Clone)] +pub struct DagAccumulatorLeafDetail { + pub accumulator_root: HashValue, + pub tips: Vec, +} + +impl ServiceRequest for GetDagAccumulatorLeafDetail { + type Response = anyhow::Result>; +} + +#[derive(Debug, Clone)] +pub struct GetDagBlockParents { + pub block_id: HashValue, +} + +#[derive(Debug, Clone)] +pub struct DagBlockParents { + pub parents: Vec, +} + +impl ServiceRequest for GetDagBlockParents { + type Response = anyhow::Result; +} + +#[derive(Debug, Clone)] +pub struct GetDagAccumulatorLeaves { + pub leaf_index: u64, + pub batch_size: u64, + pub reverse: bool, +} + +#[derive(Debug, Clone)] +pub struct DagAccumulatorLeaf { + pub leaf_index: u64, + pub dag_accumulator_root: HashValue, +} + +impl ServiceRequest for GetDagAccumulatorLeaves { + type Response = anyhow::Result>; +} + +#[derive(Debug, Clone)] +pub struct AddToDag { + pub block_header: BlockHeader, +} + +#[derive(Debug, Clone)] +pub struct MergesetBlues { + pub selected_parent: HashValue, + pub mergeset_blues: Vec, +} + +impl ServiceRequest for AddToDag { + type Response = anyhow::Result; +} + +#[derive(Debug, Clone)] +pub struct ForkDagAccumulator { + pub new_blocks: Vec, + pub dag_accumulator_index: u64, + pub block_header_id: HashValue, +} + +impl ServiceRequest for ForkDagAccumulator { + type Response = anyhow::Result; +} + +#[derive(Debug, Clone)] +pub struct FinishSync { + pub dag_accumulator_info: AccumulatorInfo, +} + +impl ServiceRequest for FinishSync { + type Response = anyhow::Result<()>; +} + +pub struct TipInfo { + tips: Option>, // some is for dag or the state of the chain is still in old version + k_total_difficulties: BTreeSet, +} + +pub struct FlexidagService { + dag: Option, + dag_accumulator: Option, + tip_info: Option, + storage: Arc, +} + +impl FlexidagService { + pub fn add_to_dag(&mut self, header: BlockHeader) -> Result> { + let dag = match &mut self.dag { + Some(dag) => dag, + None => bail!("dag is none"), + }; + match dag.get_ghostdag_data(header.id()) { + std::result::Result::Ok(ghost_dag_data) => Ok(ghost_dag_data), + Err(_) => std::result::Result::Ok(Arc::new( + // jacktest: TODO:add_to_dag should not use parents hash since the block header has them + dag.add_to_dag(DagHeader::new(header.clone()))?, + )), + } + } + + fn create_snapshot_by_tips(tips: Vec, head_block_id: HashValue) -> Result<(HashValue, SyncFlexiDagSnapshotHasher)> { + let k_total_difficulties = BTreeSet::new(); + tips.iter().for_each(|block_id| { + k_total_difficulties.insert(KTotalDifficulty { + head_block_id: block_id.clone(), + total_difficulty: self.storage.get_block_info(block_id.clone()).expect("block info should not be none").ok_or_else(error || anyhow!("block info should not be none"))?.total_difficulty, + }); + }); + + let snaphot_hasher = SyncFlexiDagSnapshotHasher { + child_hashes: tips, + head_block_id, + k_total_difficulties, + }; + + Ok((BlockDAG::calculate_dag_accumulator_key(&snapshot_hasher)?, snaphot_hasher)) + } + + fn merge_from_big_dag(&mut self, msg: ForkDagAccumulator) -> Result { + let dag_accumulator = self.dag_accumulator.as_mut().ok_or_else("the dag accumulator should not be none")?; + if dag_accumulator.num_leaves() != msg.dag_accumulator_index { + bail!("cannot merge dag accumulator since its number is not the same as other"); + } + let tip_info = self.tip_info.as_mut().ok_or_else("the tips should not be none")?; + msg.new_blocks.iter().for_each(|block_id| { + if !tip_info.tips.contains(block_id) { + tip_info.tips.push(block_id.clone()); + } + }); + + let (key, snaphot_hasher) = Self::create_snapshot_by_tips(tip_info.tips, msg.block_header_id)?; + dag_accumulator.append(&vec![key])?; + let dag_accumulator_info = dag_accumulator.get_info(); + self.storage.get_accumulator_snapshot_storage().put(key, snaphot_hasher.to_snapshot(dag_accumulator_info))?; + dag_accumulator.flush()?; + Ok(dag_accumulator_info) + } + + fn merge_from_small_dag(&mut self, msg: ForkDagAccumulator) -> Result { + let dag_accumulator = self + .dag_accumulator + .as_mut() + .ok_or_else(error || anyhow!("dag accumulator is none"))?; + // fetch the block in the dag according to the dag accumulator index + let previous_key = dag_accumulator.get_leaf(msg.dag_accumulator_index - 1)? + .ok_or_else(error || anyhow!("the dag snapshot hash is none"))?; + + let current_key = dag_accumulator.get_leaf(msg.dag_accumulator_index)? + .ok_or_else(error || anyhow!("the dag snapshot hash is none"))?; + + let pre_snapshot = self + .storage + .get_accumulator_snapshot_storage() + .get(previous_key)? + .ok_or_else(error || anyhow!("the dag snapshot is none"))?; + + let current_snapshot = self + .storage + .get_accumulator_snapshot_storage() + .get(current_key)? + .ok_or_else(error || anyhow!("the dag snapshot is none"))?; + + // fork the dag accumulator according to the ForkDagAccumulator.dag_accumulator_index + let fork = dag_accumulator.fork(Some(pre_snapshot.accumulator_info)); + + let mut new_blocks = msg.new_blocks; + current_snapshot.child_hashes.iter().for_each(|block_id| { + if !new_blocks.contains(block_id) { + new_blocks.push(block_id.clone()); + } + }); + + let (key, snaphot_hasher) = Self::create_snapshot_by_tips(new_blocks, msg.block_header_id)?; + fork.append(&vec![key])?; + let dag_accumulator_info = fork.get_info(); + self.storage.get_accumulator_snapshot_storage().put(key, snaphot_hasher.to_snapshot(dag_accumulator_info))?; + fork.flush()?; + Ok(dag_accumulator_info) + } + +} + +impl ServiceFactory for FlexidagService { + fn create(ctx: &mut ServiceContext) -> Result { + let storage = ctx.get_shared::>()?; + let config = ctx.get_shared::>()?; + let (dag, dag_accumulator) = + BlockDAG::try_init_with_storage(storage.clone(), config.clone())?; + let tip_info = dag_accumulator.as_ref().map(|accumulator| { + let tips_index = accumulator.num_leaves(); + let tips_key = accumulator + .get_leaf(tips_index) + .expect("failed to read the dag snapshot hash") + .expect("the dag snapshot hash is none"); + let snapshot = storage + .get_accumulator_snapshot_storage() + .get(tips_key) + .expect("failed to read the snapsho object") + .expect("dag snapshot object is none"); + TipInfo { + tips: Some(snapshot.child_hashes), + k_total_difficulties: snapshot.k_total_difficulties, + } + }); + Ok(Self { + dag, + dag_accumulator, + tip_info, + storage: storage.clone(), + }) + } +} + +impl ActorService for FlexidagService { + fn started(&mut self, ctx: &mut ServiceContext) -> Result<()> { + // ctx.subscribe::(); + Ok(()) + } + + fn stopped(&mut self, ctx: &mut ServiceContext) -> Result<()> { + // ctx.unsubscribe::(); + Ok(()) + } +} + +// send this message after minting a new block +// and the block was committed +// and startup info was updated +impl ServiceHandler for FlexidagService { + fn handle( + &mut self, + msg: DumpTipsToAccumulator, + ctx: &mut ServiceContext, + ) -> Result<()> { + let storage = ctx.get_shared::>()?; + if self.tips.is_none() { + let config = ctx.get_shared::>()?; + let (dag, dag_accumulator) = BlockDAG::try_init_with_storage(storage, config)?; + if dag.is_none() { + Ok(()) // the chain is still in single chain + } else { + // initialize the dag data, the chain will be the dag chain at next block + self.dag = dag; + self.dag_accumulator = dag_accumulator; + self.tip_info = Some(TipInfo { + tips: Some(vec![msg.block_header.id()]), + k_total_difficulties: [msg.block_header.id()].into_iter().cloned().collect(), + }); + self.storage = storage.clone(); + Ok(()) + } + } else { + // the chain had became the flexidag chain + let tip_info = self + .tip_info + .take() + .expect("the tips should not be none in this branch"); + let key = BlockDAG::calculate_dag_accumulator_key(tips.clone())?; + let dag = self + .dag_accumulator + .as_mut() + .expect("the tips is not none but the dag accumulator is none"); + dag.append(&vec![key])?; + storage.get_accumulator_snapshot_storage().put( + key, + SyncFlexiDagSnapshot { + child_hashes: tip_info.tips.expect("the tips should not be none"), + accumulator_info: dag.get_info(), + head_block_id: msg.current_head_block_id, + k_total_difficulties: tip_info + .k_total_difficulties + .into_iter() + .take(16) + .cloned() + .collect(), + }, + )?; + dag.flush()?; + self.tip_info = Some(TipInfo { + tips: Some(vec![msg.block_header.id()]), + k_total_difficulties: [msg.block_header.id()].into_iter().cloned().collect(), + }); + self.storage = storage.clone(); + Ok(()) + } + } +} + +impl ServiceHandler for FlexidagService { + fn handle( + &mut self, + msg: UpdateDagTips, + ctx: &mut ServiceContext, + ) -> Result<()> { + let header = msg.block_header; + match &mut self.tip_info { + Some(tip_info) => { + if !tip_info.tips.contains(&header.id()) { + tip_info.tips.push(header.id()); + tip_info.k_total_difficulties.insert(KTotalDifficulty { + head_block_id: msg.k_total_difficulty.head_block_id, + total_difficulty: msg.k_total_difficulty.total_difficulty, + }); + } + Ok(()) + } + None => { + let storage = ctx.get_shared::>()?; + let config = ctx.get_shared::>()?; + if header.number() == storage.dag_fork_height(config.net().id().clone()) { + let (dag, dag_accumulator) = + BlockDAG::try_init_with_storage(storage.clone(), config)?; + if dag.is_none() { + Ok(()) // the chain is still in single chain + } else { + // initialize the dag data, the chain will be the dag chain at next block + self.dag = dag; + self.tip_info = Some(TipInfo { + tips: Some(vec![msg.block_header.id()]), + k_total_difficulties: [msg.block_header.id()] + .into_iter() + .cloned() + .collect(), + }); + self.dag_accumulator = dag_accumulator; + + storage + .get_startup_info()? + .map(|mut startup_info| { + startup_info.dag_main = Some(header.id()); + storage.save_startup_info(startup_info) + }) + .expect("starup info should not be none") + } + } else { + Ok(()) // drop the block, the chain is still in single chain + } + } + } + } +} + +impl ServiceHandler for FlexidagService { + fn handle( + &mut self, + _msg: GetDagTips, + _ctx: &mut ServiceContext, + ) -> Result>> { + Ok(self.tips.clone()) + } +} + +impl ServiceHandler for FlexidagService { + fn handle( + &mut self, + _msg: GetDagAccumulatorInfo, + _ctx: &mut ServiceContext, + ) -> Result> { + Ok(self + .dag_accumulator + .as_ref() + .map(|dag_accumulator_info| dag_accumulator_info.get_info())) + } +} + +impl ServiceHandler for FlexidagService { + fn handle( + &mut self, + msg: GetDagAccumulatorLeaves, + _ctx: &mut ServiceContext, + ) -> Result> { + match &self.dag_accumulator { + Some(dag_accumulator) => { + let end_index = std::cmp::min( + msg.leaf_index + msg.batch_size - 1, + dag_accumulator.num_leaves() - 1, + ); + let mut result = vec![]; + for index in msg.leaf_index..=end_index { + let real_index = if msg.reverse { + end_index - index + 1 + } else { + index + }; + let key = dag_accumulator + .get_leaf(real_index)? + .ok_or_else(|| anyhow!("the dag snapshot hash is none"))?; + let snaptshot = self + .storage + .get_accumulator_snapshot_storage() + .get(key)? + .expect("the snapshot should not be none"); + result.push(DagAccumulatorLeaf { + leaf_index: real_index, + dag_accumulator_root: snaptshot.accumulator_info.accumulator_root, + }); + } + Ok(result) + } + None => bail!("dag accumulator is none"), + } + } +} + +impl ServiceHandler for FlexidagService { + fn handle( + &mut self, + msg: GetDagBlockParents, + _ctx: &mut ServiceContext, + ) -> Result { + match &self.dag { + Some(dag) => Ok(DagBlockParents { + parents: dag.get_parents(msg.block_id)?, + }), + None => bail!("dag is none"), + } + } +} + +impl ServiceHandler for FlexidagService { + fn handle( + &mut self, + msg: GetDagAccumulatorLeafDetail, + _ctx: &mut ServiceContext, + ) -> Result> { + match &self.dag_accumulator { + Some(dag_accumulator) => { + let end_index = std::cmp::min( + msg.leaf_index + msg.batch_size - 1, + dag_accumulator.num_leaves() - 1, + ); + let mut details = vec![]; + let snapshot_storage = self.storage.get_accumulator_snapshot_storage(); + for index in msg.leaf_index..=end_index { + let key = dag_accumulator + .get_leaf(index)? + .ok_or_else(|| anyhow!("the dag snapshot hash is none"))?; + let snapshot = snapshot_storage + .get(key)? + .ok_or_else(|| anyhow!("the dag snapshot is none"))?; + details.push(DagAccumulatorLeafDetail { + accumulator_root: snapshot.accumulator_info.accumulator_root, + tips: snapshot.child_hashes, + }); + } + Ok(details) + } + None => bail!("dag accumulator is none"), + } + } +} + +impl ServiceHandler for FlexidagService { + fn handle( + &mut self, + msg: AddToDag, + _ctx: &mut ServiceContext, + ) -> Result { + let ghost_dag_data = self.add_to_dag(msg.block_header)?; + Ok(MergesetBlues { + selected_parent: ghost_dag_data.selected_parent, + mergeset_blues: ghost_dag_data.mergeset_blues.as_ref().clone(), + }) + } +} + +impl ServiceHandler for FlexidagService { + fn handle( + &mut self, + msg: ForkDagAccumulator, + _ctx: &mut ServiceContext, + ) -> Result { + let dag_accumulator = self + .dag_accumulator + .as_ref() + .ok_or_else(error || anyhow!("dag accumulator is none"))?; + + if msg.dag_accumulator_index > dag_accumulator.num_leaves() { + self.merge_from_big_dag(msg) + } else { + self.merge_from_small_dag(msg) + } + } +} + +impl ServiceHandler for FlexidagService { + fn handle( + &mut self, + msg: FinishSync, + _ctx: &mut ServiceContext, + ) -> Result<()> { + let dag_accumulator = self.dag_accumulator.ok_or_else(|| anyhow!("the dag_accumulator is none when sync finish"))?; + let local_info = dag_accumulator.get_info(); + if msg.dag_accumulator_info.get_num_leaves() < local_info.get_num_leaves() { + let mut new_dag_accumulator = MerkleAccumulator::new_with_info(msg.dag_accumulator_info, self.storage.get_accumulator_store(AccumulatorStoreType::SyncDag)); + for index in msg.dag_accumulator_info.get_num_leaves()..local_info.get_num_leaves() { + let key = dag_accumulator.get_leaf(index)?.ok_or_else(|| anyhow!("the dag_accumulator leaf is none when sync finish"))?; + new_dag_accumulator.append(&[key])?; + } + self.dag_accumulator = Some(new_dag_accumulator); + Ok(()) + } else { + self.dag_accumulator = Some(MerkleAccumulator::new_with_info(msg.dag_accumulator_info, self.storage.get_accumulator_store(AccumulatorStoreType::SyncDag))); + Ok(()) + } + } +} \ No newline at end of file diff --git a/flexidag/src/lib.rs b/flexidag/src/lib.rs new file mode 100644 index 0000000000..66689bb05f --- /dev/null +++ b/flexidag/src/lib.rs @@ -0,0 +1,17 @@ +pub mod flexidag_service; +pub use flexidag_service::FlexidagService; + +pub fn add(left: usize, right: usize) -> usize { + left + right +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_works() { + let result = add(2, 2); + assert_eq!(result, 4); + } +} diff --git a/genesis/generated/halley/genesis b/genesis/generated/halley/genesis index 0b31f956ca..f78101314e 100644 Binary files a/genesis/generated/halley/genesis and b/genesis/generated/halley/genesis differ diff --git a/genesis/src/lib.rs b/genesis/src/lib.rs index f16dc6b0ed..4683b286d8 100644 --- a/genesis/src/lib.rs +++ b/genesis/src/lib.rs @@ -261,11 +261,12 @@ impl Genesis { storage.clone(), net.genesis_epoch(), self.block.clone(), + net.id().clone(), )?; let startup_info = StartupInfo::new(genesis_chain.current_header().id()); storage.save_startup_info(startup_info)?; storage - .get_chain_info()? + .get_chain_info(net.id().clone())? .ok_or_else(|| format_err!("ChainInfo should exist after genesis block executed.")) } @@ -318,7 +319,7 @@ impl Genesis { data_dir: &Path, ) -> Result<(ChainInfo, Genesis)> { debug!("load startup_info."); - let (chain_info, genesis) = match storage.get_chain_info() { + let (chain_info, genesis) = match storage.get_chain_info(net.id().clone()) { Ok(Some(chain_info)) => { debug!("Get chain info {:?} from db", chain_info); info!("Check genesis file."); diff --git a/miner/Cargo.toml b/miner/Cargo.toml index d5180be4e1..6a023bf48c 100644 --- a/miner/Cargo.toml +++ b/miner/Cargo.toml @@ -27,6 +27,8 @@ starcoin-txpool-api = { workspace = true } starcoin-vm-types = { workspace = true } tokio = { features = ["full"], workspace = true } starcoin-types = { package = "starcoin-types", workspace = true } +starcoin-flexidag = { workspace = true } +async-std = { workspace = true } [dev-dependencies] starcoin-network-rpc = { package = "starcoin-network-rpc", workspace = true } diff --git a/miner/src/create_block_template/mod.rs b/miner/src/create_block_template/mod.rs index 5e6ba1ae50..17764bcb83 100644 --- a/miner/src/create_block_template/mod.rs +++ b/miner/src/create_block_template/mod.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::create_block_template::metrics::BlockBuilderMetrics; -use anyhow::{format_err, Result}; +use anyhow::{bail, format_err, Result}; use futures::executor::block_on; use starcoin_account_api::{AccountAsyncService, AccountInfo, DefaultAccountChangeEvent}; use starcoin_account_service::AccountService; @@ -13,10 +13,13 @@ use starcoin_config::NodeConfig; use starcoin_consensus::Consensus; use starcoin_crypto::hash::HashValue; use starcoin_executor::VMMetrics; +use starcoin_flexidag::flexidag_service::GetDagTips; +use starcoin_flexidag::{flexidag_service, FlexidagService}; use starcoin_logger::prelude::*; use starcoin_open_block::OpenedBlock; use starcoin_service_registry::{ - ActorService, EventHandler, ServiceContext, ServiceFactory, ServiceHandler, ServiceRequest, + ActorService, EventHandler, ServiceContext, ServiceFactory, ServiceHandler, ServiceRef, + ServiceRequest, }; use starcoin_storage::{BlockStore, Storage, Store}; use starcoin_txpool::TxPoolService; @@ -79,6 +82,7 @@ impl ServiceFactory for BlockBuilderService { .and_then(|registry| BlockBuilderMetrics::register(registry).ok()); let vm_metrics = ctx.get_shared_opt::()?; + let flexidag_service = ctx.service_ref::()?.clone(); let inner = Inner::new( config.net(), storage, @@ -87,6 +91,7 @@ impl ServiceFactory for BlockBuilderService { config.miner.block_gas_limit, miner_account, metrics, + flexidag_service, vm_metrics, )?; Ok(Self { inner }) @@ -190,6 +195,7 @@ pub struct Inner

{ local_block_gas_limit: Option, miner_account: AccountInfo, metrics: Option, + flexidag_service: ServiceRef, vm_metrics: Option, } @@ -205,12 +211,14 @@ where local_block_gas_limit: Option, miner_account: AccountInfo, metrics: Option, + flexidag_service: ServiceRef, vm_metrics: Option, ) -> Result { let chain = BlockChain::new( net.time_service(), block_id, storage.clone(), + net.id().clone(), vm_metrics.clone(), )?; @@ -223,6 +231,7 @@ where local_block_gas_limit, miner_account, metrics, + flexidag_service, vm_metrics, }) } @@ -250,6 +259,7 @@ where self.chain.time_service(), block.header().id(), self.storage.clone(), + self.chain.net_id(), self.vm_metrics.clone(), )?; //current block possible be uncle. @@ -312,6 +322,9 @@ where let author = *self.miner_account.address(); let previous_header = self.chain.current_header(); + + let dag_block_parents = self.get_dag_block_parents(); + let uncles = self.find_uncles(); let mut now_millis = self.chain.time_service().now_millis(); if now_millis <= previous_header.timestamp() { @@ -345,6 +358,7 @@ where difficulty, strategy, self.vm_metrics.clone(), + tips_header, )?; let excluded_txns = opened_block.push_txns(txns)?; let template = opened_block.finalize()?; @@ -357,4 +371,10 @@ where template, }) } + + fn get_dag_block_parents(&self) -> Result>> { + Ok(async_std::task::block_on( + self.flexidag_service.send(GetDagTips), + )??) + } } diff --git a/miner/src/create_block_template/test_create_block_template.rs b/miner/src/create_block_template/test_create_block_template.rs index ebcb912977..eeb610cbde 100644 --- a/miner/src/create_block_template/test_create_block_template.rs +++ b/miner/src/create_block_template/test_create_block_template.rs @@ -79,7 +79,14 @@ fn test_switch_main() { let net = node_config.net(); for i in 0..times { - let mut main = BlockChain::new(net.time_service(), head_id, storage.clone(), None).unwrap(); + let mut main = BlockChain::new( + net.time_service(), + head_id, + storage.clone(), + net.id().clone(), + None, + ) + .unwrap(); let mut tmp_inner = Inner::new( net, @@ -116,8 +123,14 @@ fn test_switch_main() { } for i in 0..3 { - let mut new_main = - BlockChain::new(net.time_service(), head_id, storage.clone(), None).unwrap(); + let mut new_main = BlockChain::new( + net.time_service(), + head_id, + storage.clone(), + net.id().clone(), + None, + ) + .unwrap(); let block_template = if i == 0 { let tmp = Inner::new( @@ -196,7 +209,14 @@ fn test_do_uncles() { let net = node_config.net(); for _i in 0..times { - let mut main = BlockChain::new(net.time_service(), head_id, storage.clone(), None).unwrap(); + let mut main = BlockChain::new( + net.time_service(), + head_id, + storage.clone(), + net.id().clone(), + None, + ) + .unwrap(); let mut tmp_inner = Inner::new( net, @@ -224,8 +244,14 @@ fn test_do_uncles() { // branch for _i in 0..times { - let mut branch = - BlockChain::new(net.time_service(), genesis_id, storage.clone(), None).unwrap(); + let mut branch = BlockChain::new( + net.time_service(), + genesis_id, + storage.clone(), + net.id().clone(), + None, + ) + .unwrap(); let inner = Inner::new( net, storage.clone(), @@ -254,7 +280,14 @@ fn test_do_uncles() { // uncles for i in 0..times { - let mut main = BlockChain::new(net.time_service(), head_id, storage.clone(), None).unwrap(); + let mut main = BlockChain::new( + net.time_service(), + head_id, + storage.clone(), + net.id().clone(), + None, + ) + .unwrap(); let block_template = main_inner .as_ref() @@ -367,8 +400,14 @@ fn test_new_branch() { let mut new_head_id = genesis_id; let net = node_config.net(); for i in 0..(times * 2) { - let mut branch = - BlockChain::new(net.time_service(), new_head_id, storage.clone(), None).unwrap(); + let mut branch = BlockChain::new( + net.time_service(), + new_head_id, + storage.clone(), + net.id().clone(), + None, + ) + .unwrap(); let inner = Inner::new( net, storage.clone(), diff --git a/miner/src/lib.rs b/miner/src/lib.rs index 54dfd52c12..229d371aba 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -251,10 +251,11 @@ impl MinerService { } if let Some(task) = self.current_task.take() { + let tips_header = task.block_template.parents_hash.clone(); let block = task.finish(nonce, extra); - let block_hash = block.id(); + let block_hash: HashValue = block.id(); info!(target: "miner", "Mint new block: {}", block); - ctx.broadcast(MinedBlock(Arc::new(block))); + ctx.broadcast(MinedBlock(Arc::new(block), tips_header)); if let Some(metrics) = self.metrics.as_ref() { metrics.block_mint_count.inc(); } diff --git a/network-rpc/api/src/dag_protocol.rs b/network-rpc/api/src/dag_protocol.rs new file mode 100644 index 0000000000..17b2936f7d --- /dev/null +++ b/network-rpc/api/src/dag_protocol.rs @@ -0,0 +1,47 @@ +use network_p2p_core::PeerId; +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue; +use starcoin_types::block::Block; + +#[derive(Clone, Debug, Hash, Eq, PartialOrd, Ord, PartialEq, Serialize, Deserialize)] +pub struct RelationshipPair { + pub parent: HashValue, + pub child: HashValue, +} + +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)] +pub struct GetDagAccumulatorLeaves { + pub accumulator_leaf_index: u64, + pub batch_size: u64, +} + +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)] +pub struct TargetDagAccumulatorLeaf { + pub accumulator_root: HashValue, // accumulator info root + pub leaf_index: u64, +} + +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)] +pub struct GetTargetDagAccumulatorLeafDetail { + pub leaf_index: u64, + pub batch_size: u64, +} + +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)] +pub struct TargetDagAccumulatorLeafDetail { + pub accumulator_root: HashValue, + pub tips: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct GetSyncDagBlockInfo { + pub leaf_index: u64, + pub batch_size: u64, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct SyncDagBlockInfo { + pub block_id: HashValue, + pub block: Option, + pub peer_id: Option, +} diff --git a/network-rpc/api/src/lib.rs b/network-rpc/api/src/lib.rs index dd4b3a909c..5ea816fcc7 100644 --- a/network-rpc/api/src/lib.rs +++ b/network-rpc/api/src/lib.rs @@ -21,6 +21,7 @@ use starcoin_types::block::{Block, BlockHeader, BlockInfo, BlockNumber}; use starcoin_types::transaction::{SignedUserTransaction, Transaction, TransactionInfo}; use starcoin_vm_types::state_store::table::TableInfo; +pub mod dag_protocol; mod remote_chain_state; pub use network_p2p_core::RawRpcClient; @@ -288,6 +289,21 @@ pub trait NetworkRpc: Sized + Send + Sync + 'static { request: GetStateWithTableItemProof, ) -> BoxFuture>; + fn get_dag_accumulator_leaves( + &self, + peer_id: PeerId, + req: dag_protocol::GetDagAccumulatorLeaves, + ) -> BoxFuture>>; + fn get_accumulator_leaf_detail( + &self, + peer_id: PeerId, + req: dag_protocol::GetTargetDagAccumulatorLeafDetail, + ) -> BoxFuture>>>; + fn get_dag_block_info( + &self, + peer_id: PeerId, + req: dag_protocol::GetSyncDagBlockInfo, + ) -> BoxFuture>>>; fn get_state_table_info( &self, peer_id: PeerId, diff --git a/network-rpc/src/rpc.rs b/network-rpc/src/rpc.rs index c333341a44..f51a1dc261 100644 --- a/network-rpc/src/rpc.rs +++ b/network-rpc/src/rpc.rs @@ -10,10 +10,11 @@ use starcoin_accumulator::AccumulatorNode; use starcoin_chain_service::{ChainAsyncService, ChainReaderService}; use starcoin_crypto::HashValue; use starcoin_network_rpc_api::{ - gen_server, BlockBody, GetAccountState, GetAccumulatorNodeByNodeHash, GetBlockHeadersByNumber, - GetBlockIds, GetStateWithProof, GetStateWithTableItemProof, GetTableInfo, GetTxnsWithHash, - GetTxnsWithSize, Ping, RpcRequest, MAX_BLOCK_HEADER_REQUEST_SIZE, MAX_BLOCK_INFO_REQUEST_SIZE, - MAX_BLOCK_REQUEST_SIZE, MAX_TXN_REQUEST_SIZE, + dag_protocol, gen_server, BlockBody, GetAccountState, GetAccumulatorNodeByNodeHash, + GetBlockHeadersByNumber, GetBlockIds, GetStateWithProof, GetStateWithTableItemProof, + GetTableInfo, GetTxnsWithHash, GetTxnsWithSize, Ping, RpcRequest, + MAX_BLOCK_HEADER_REQUEST_SIZE, MAX_BLOCK_INFO_REQUEST_SIZE, MAX_BLOCK_REQUEST_SIZE, + MAX_TXN_REQUEST_SIZE, }; use starcoin_service_registry::ServiceRef; use starcoin_state_api::{ChainStateAsyncService, StateWithProof, StateWithTableItemProof}; @@ -196,7 +197,7 @@ impl gen_server::NetworkRpc for NetworkRpcImpl { let blocks = chain_reader.get_blocks(hashes).await?; let mut bodies = vec![]; for block in blocks { - bodies.push(block.map(|block| block.body)); + bodies.push(block.map(|(block, _, _)| block.body)); } Ok(bodies) }; @@ -303,7 +304,7 @@ impl gen_server::NetworkRpc for NetworkRpcImpl { &self, _peer_id: PeerId, ids: Vec, - ) -> BoxFuture>>> { + ) -> BoxFuture>, Option)>>>> { let chain_service = self.chain_service.clone(); let fut = async move { if ids.len() as u64 > MAX_BLOCK_REQUEST_SIZE { @@ -317,4 +318,32 @@ impl gen_server::NetworkRpc for NetworkRpcImpl { }; Box::pin(fut) } + + fn get_dag_accumulator_leaves( + &self, + _peer_id: PeerId, + req: dag_protocol::GetDagAccumulatorLeaves, + ) -> BoxFuture>> { + let chain_service = self.chain_service.clone(); + let fut = async move { chain_service.get_dag_accumulator_leaves(req).await }; + Box::pin(fut) + } + + fn get_accumulator_leaf_detail( + &self, + _peer_id: PeerId, + req: dag_protocol::GetTargetDagAccumulatorLeafDetail, + ) -> BoxFuture>>> { + let chain_service = self.chain_service.clone(); + let fut = async move { chain_service.get_dag_accumulator_leaves_detail(req).await }; + Box::pin(fut) + } + + fn get_dag_block_info( + &self, + _peer_id: PeerId, + _req: dag_protocol::GetSyncDagBlockInfo, + ) -> BoxFuture>>> { + todo!() + } } diff --git a/network/api/src/messages.rs b/network/api/src/messages.rs index 046fb58e77..0e0de5351b 100644 --- a/network/api/src/messages.rs +++ b/network/api/src/messages.rs @@ -51,7 +51,11 @@ pub struct CompactBlockMessage { } impl CompactBlockMessage { - pub fn new(compact_block: CompactBlock, block_info: BlockInfo) -> Self { + pub fn new( + compact_block: CompactBlock, + block_info: BlockInfo, + tips_hash: Option>, + ) -> Self { Self { compact_block, block_info, @@ -61,7 +65,7 @@ impl CompactBlockMessage { impl Sample for CompactBlockMessage { fn sample() -> Self { - Self::new(CompactBlock::sample(), BlockInfo::sample()) + Self::new(CompactBlock::sample(), BlockInfo::sample(), None) } } diff --git a/network/api/src/peer_provider.rs b/network/api/src/peer_provider.rs index 0987895bbf..fda14bba1e 100644 --- a/network/api/src/peer_provider.rs +++ b/network/api/src/peer_provider.rs @@ -314,6 +314,14 @@ impl PeerSelector { } } + pub fn peer_infos(&self) -> Vec { + self.details + .lock() + .iter() + .map(|peer| peer.peer_info) + .collect() + } + pub fn peers(&self) -> Vec { self.details .lock() diff --git a/network/api/src/tests.rs b/network/api/src/tests.rs index 801277064e..5264d236b9 100644 --- a/network/api/src/tests.rs +++ b/network/api/src/tests.rs @@ -3,10 +3,12 @@ use crate::peer_provider::{PeerSelector, PeerStrategy}; use crate::peer_score::{InverseScore, Score}; +use bcs_ext::Sample; use network_p2p_types::peer_id::PeerId; use network_types::peer_info::PeerInfo; use starcoin_crypto::HashValue; use starcoin_logger::prelude::*; +use starcoin_types::dag_block::AccumulatorInfo; use starcoin_types::startup_info::{ChainInfo, ChainStatus}; use starcoin_types::U256; @@ -34,28 +36,52 @@ fn test_peer_selector() { let peers = vec![ PeerInfo::new( PeerId::random(), - ChainInfo::new(1.into(), HashValue::zero(), mock_chain_status(100.into())), + ChainInfo::new( + 1.into(), + HashValue::zero(), + mock_chain_status(100.into()), + None, + None, + ), vec![], vec![], None, ), PeerInfo::new( PeerId::random(), - ChainInfo::new(1.into(), HashValue::zero(), mock_chain_status(99.into())), + ChainInfo::new( + 1.into(), + HashValue::zero(), + mock_chain_status(99.into()), + None, + None, + ), vec![], vec![], None, ), PeerInfo::new( PeerId::random(), - ChainInfo::new(1.into(), HashValue::zero(), mock_chain_status(100.into())), + ChainInfo::new( + 1.into(), + HashValue::zero(), + mock_chain_status(100.into()), + None, + None, + ), vec![], vec![], None, ), PeerInfo::new( PeerId::random(), - ChainInfo::new(1.into(), HashValue::zero(), mock_chain_status(1.into())), + ChainInfo::new( + 1.into(), + HashValue::zero(), + mock_chain_status(1.into()), + None, + None, + ), vec![], vec![], None, diff --git a/network/src/network_p2p_handle.rs b/network/src/network_p2p_handle.rs index 0c58124c82..bfb5458f22 100644 --- a/network/src/network_p2p_handle.rs +++ b/network/src/network_p2p_handle.rs @@ -13,7 +13,7 @@ use serde::{Deserialize, Serialize}; use starcoin_types::startup_info::{ChainInfo, ChainStatus}; /// Current protocol version. -pub(crate) const CURRENT_VERSION: u32 = 5; +pub(crate) const CURRENT_VERSION: u32 = 6; /// Lowest version we support pub(crate) const MIN_VERSION: u32 = 3; @@ -123,9 +123,9 @@ impl BusinessLayerHandle for Networkp2pHandle { } fn update_status(&mut self, peer_status: &[u8]) -> Result<(), anyhow::Error> { - match ChainStatus::decode(peer_status) { - std::result::Result::Ok(status) => { - self.status.info.update_status(status); + match ChainInfo::decode(peer_status) { + std::result::Result::Ok(chain_info) => { + self.status.info = chain_info; Ok(()) } Err(error) => { diff --git a/network/src/service.rs b/network/src/service.rs index e4478f5a03..15985eea9f 100644 --- a/network/src/service.rs +++ b/network/src/service.rs @@ -56,7 +56,7 @@ impl NetworkActor for NetworkActorService {} impl NetworkActorService { pub fn new( config: Arc, - chain_info: ChainInfo, + chain_state_info: ChainInfo, rpc: Option<(RpcInfo, ServiceRef)>, peer_message_handler: H, ) -> Result @@ -65,7 +65,7 @@ impl NetworkActorService { { let (self_info, worker) = build_network_worker( &config.network, - chain_info, + chain_state_info, config.network.supported_network_protocols(), rpc, config.metrics.registry().cloned(), @@ -484,11 +484,14 @@ impl Inner { } pub(crate) fn update_chain_status(&mut self, sync_status: SyncStatus) { - let chain_status = sync_status.chain_status().clone(); + let chain_status: ChainStatus = sync_status.chain_status().clone(); self.self_peer .peer_info .update_chain_status(chain_status.clone()); - match chain_status.encode() { + self.self_peer + .peer_info + .update_dag_accumulator_info(sync_status.dag_accumulator_info().clone()); + match self.self_peer.peer_info.chain_info.encode() { Ok(status) => { self.network_service.update_business_status(status); } @@ -554,7 +557,7 @@ impl Inner { ); peer_info.known_blocks.put(block_id, ()); peer_info.peer_info.update_chain_status(ChainStatus::new( - block_header, + block_header.clone(), compact_block_message.block_info.clone(), )); diff --git a/network/tests/network_node_test.rs b/network/tests/network_node_test.rs index e17b9e94ae..c70ef5af26 100644 --- a/network/tests/network_node_test.rs +++ b/network/tests/network_node_test.rs @@ -35,7 +35,7 @@ fn test_reconnected_peers() -> anyhow::Result<()> { // stop node2, node1's peers is empty node2.stop()?; - thread::sleep(Duration::from_secs(3)); + thread::sleep(Duration::from_secs(12)); loop { let network_state = block_on(async { node1_network.network_state().await })?; debug!("network_state: {:?}", network_state); diff --git a/network/tests/network_service_test.rs b/network/tests/network_service_test.rs index 0b2ca2958a..897a0c35c9 100644 --- a/network/tests/network_service_test.rs +++ b/network/tests/network_service_test.rs @@ -38,6 +38,8 @@ fn build_test_network_services(num: usize) -> Vec { BuiltinNetworkID::Test.chain_id(), HashValue::random(), ChainStatus::random(), + None, + None, ); for _index in 0..num { let mut boot_nodes = Vec::new(); @@ -157,6 +159,7 @@ async fn test_event_notify_receive() { CompactBlockMessage::new( CompactBlock::new(Block::new(BlockHeader::random(), BlockBody::new_empty())), mock_block_info(1.into()), + Some(vec![HashValue::zero()]), ), ); let mut receiver = network2.message_handler.channel(); @@ -173,12 +176,20 @@ async fn test_event_notify_receive_repeat_block() { let msg_send1 = PeerMessage::new_compact_block( network2.peer_id(), - CompactBlockMessage::new(CompactBlock::new(block.clone()), mock_block_info(1.into())), + CompactBlockMessage::new( + CompactBlock::new(block.clone()), + mock_block_info(1.into()), + Some(vec![HashValue::zero()]), + ), ); let msg_send2 = PeerMessage::new_compact_block( network2.peer_id(), - CompactBlockMessage::new(CompactBlock::new(block.clone()), mock_block_info(1.into())), + CompactBlockMessage::new( + CompactBlock::new(block.clone()), + mock_block_info(1.into()), + Some(vec![HashValue::zero()]), + ), ); let mut receiver = network2.message_handler.channel(); @@ -264,6 +275,7 @@ async fn test_event_broadcast() { CompactBlock::new(block.clone()), //difficulty should > genesis block difficulty. mock_block_info(10.into()), + Some(vec![HashValue::zero()]), ))); node1.service_ref.broadcast(notification.clone()); diff --git a/network/types/src/peer_info.rs b/network/types/src/peer_info.rs index 1ab7bdd70e..13b0463afb 100644 --- a/network/types/src/peer_info.rs +++ b/network/types/src/peer_info.rs @@ -9,6 +9,7 @@ use serde::{Deserialize, Serialize}; use starcoin_crypto::HashValue; use starcoin_types::block::BlockHeader; use starcoin_types::block::BlockNumber; +use starcoin_types::dag_block::AccumulatorInfo; use starcoin_types::startup_info::{ChainInfo, ChainStatus}; use starcoin_types::U256; use std::borrow::Cow; @@ -67,6 +68,11 @@ impl PeerInfo { self.chain_info.update_status(chain_status) } + pub fn update_dag_accumulator_info(&mut self, dag_accumulator_info: Option) { + self.chain_info + .update_dag_accumulator_info(dag_accumulator_info) + } + /// This peer is support notification pub fn is_support_notification(&self) -> bool { !self.notif_protocols.is_empty() diff --git a/node/Cargo.toml b/node/Cargo.toml index cc9c4797fa..f8d94cc883 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -47,6 +47,7 @@ starcoin-vm-runtime = { workspace = true } thiserror = { workspace = true } timeout-join-handler = { workspace = true } tokio = { features = ["full"], workspace = true } +starcoin-accumulator = { workspace = true } num_cpus = { workspace = true } [dev-dependencies] diff --git a/node/src/network_service_factory.rs b/node/src/network_service_factory.rs index 173ee9295b..aa381d5d11 100644 --- a/node/src/network_service_factory.rs +++ b/node/src/network_service_factory.rs @@ -28,7 +28,7 @@ impl ServiceFactory for NetworkServiceFactory { NodePeerMessageHandler::new(txpool_service, block_relayer, announcement_service); let chain_info = storage - .get_chain_info()? + .get_chain_info(config.net().id().clone())? .ok_or_else(|| format_err!("Can not get chain info."))?; let actor_service = NetworkActorService::new( config, diff --git a/node/src/node.rs b/node/src/node.rs index fd3e7fcf77..23d09bb621 100644 --- a/node/src/node.rs +++ b/node/src/node.rs @@ -13,10 +13,13 @@ use futures::executor::block_on; use futures_timer::Delay; use network_api::{PeerProvider, PeerSelector, PeerStrategy}; use starcoin_account_service::{AccountEventService, AccountService, AccountStorage}; +use starcoin_accumulator::node::AccumulatorStoreType; use starcoin_block_relayer::BlockRelayer; use starcoin_chain_notify::ChainNotifyHandlerService; use starcoin_chain_service::ChainReaderService; use starcoin_config::NodeConfig; +use starcoin_consensus::{BlockDAG, FlexiDagStorage, FlexiDagStorageConfig}; +use starcoin_crypto::HashValue; use starcoin_genesis::{Genesis, GenesisError}; use starcoin_logger::prelude::*; use starcoin_logger::structured_log::init_slog_logger; @@ -43,7 +46,7 @@ use starcoin_storage::db_storage::DBStorage; use starcoin_storage::errors::StorageInitError; use starcoin_storage::metrics::StorageMetrics; use starcoin_storage::storage::StorageInstance; -use starcoin_storage::{BlockStore, Storage}; +use starcoin_storage::{BlockStore, Storage, Store}; use starcoin_stratum::service::{StratumService, StratumServiceFactory}; use starcoin_stratum::stratum::{Stratum, StratumFactory}; use starcoin_sync::announcement::AnnouncementService; @@ -51,10 +54,12 @@ use starcoin_sync::block_connector::{BlockConnectorService, ExecuteRequest, Rese use starcoin_sync::sync::SyncService; use starcoin_sync::txn_sync::TxnSyncService; use starcoin_sync::verified_rpc_client::VerifiedRpcClient; -use starcoin_txpool::TxPoolActorService; +use starcoin_txpool::{TxPoolActorService, TxPoolService}; +use starcoin_types::blockhash::ORIGIN; +use starcoin_types::header::DagHeader; use starcoin_types::system_events::{SystemShutdown, SystemStarted}; use starcoin_vm_runtime::metrics::VMMetrics; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use std::time::{Duration, SystemTime}; pub struct NodeService { @@ -133,10 +138,34 @@ impl ServiceHandler for NodeService { .start_service_sync(GenerateBlockEventPacemaker::service_name()), ), NodeRequest::ResetNode(block_hash) => { - let connect_service = ctx.service_ref::()?.clone(); + let connect_service = ctx + .service_ref::>()? + .clone(); + let dag = ctx + .get_shared::>() + .expect("ghost dag object does not exits"); + let parents = match dag.get_parents(block_hash) { + Ok(parents) => { + if parents.is_empty() { + None + } else { + Some(parents) + } + } + Err(error) => { + error!("Get parents error: {:?}", error); + None + } + }; + let fut = async move { info!("Prepare to reset node startup info to {}", block_hash); - connect_service.send(ResetRequest { block_hash }).await? + connect_service + .send(ResetRequest { + block_hash, + dag_block_parent: parents, + }) + .await? }; let receiver = ctx.exec(fut); NodeResponse::AsyncResult(receiver) @@ -147,8 +176,27 @@ impl ServiceHandler for NodeService { .get_shared_sync::>() .expect("Storage must exist."); - let connect_service = ctx.service_ref::()?.clone(); + let connect_service = ctx + .service_ref::>()? + .clone(); let network = ctx.get_shared::()?; + let dag = ctx + .get_shared::>() + .expect("ghost dag object does not exits"); + let parents = match dag.get_parents(block_hash) { + Ok(parents) => { + if parents.is_empty() { + None + } else { + Some(parents) + } + } + Err(error) => { + error!("Get parents error: {:?}", error); + None + } + }; + // let dag_transaction_parent = storage.get_accumulator_store(AccumulatorStoreType::Block).?; let fut = async move { info!("Prepare to re execute block {}", block_hash); let block = match storage.get_block(block_hash)? { @@ -166,7 +214,7 @@ impl ServiceHandler for NodeService { peer_selector.retain_rpc_peers(); let rpc_client = VerifiedRpcClient::new(peer_selector, network); let mut blocks = rpc_client.get_blocks(vec![block_hash]).await?; - blocks.pop().flatten().map(|(block, _peer)| block) + blocks.pop().flatten().map(|(block, _peer, _, _)| block) } } }; @@ -311,9 +359,15 @@ impl NodeService { let upgrade_time = SystemTime::now().duration_since(start_time)?; let storage = Arc::new(Storage::new(storage_instance)?); registry.put_shared(storage.clone()).await?; + let (chain_info, genesis) = Genesis::init_and_check_storage(config.net(), storage.clone(), config.data_dir())?; + match BlockDAG::init_with_storage(storage.clone(), config.clone())? { + Some(dag) => registry.put_shared(Arc::new(dag)).await?, + None => info!("dag will be initialized later when the height of the chain reaches the specific one"), + } + info!( "Start node with chain info: {}, number {} upgrade_time cost {} secs, ", chain_info, @@ -347,7 +401,9 @@ impl NodeService { registry.register::().await?; - registry.register::().await?; + registry + .register::>() + .await?; registry.register::().await?; let block_relayer = registry.register::().await?; diff --git a/rpc/api/src/types.rs b/rpc/api/src/types.rs index 532a140998..523be0cb14 100644 --- a/rpc/api/src/types.rs +++ b/rpc/api/src/types.rs @@ -24,7 +24,7 @@ use starcoin_resource_viewer::{AnnotatedMoveStruct, AnnotatedMoveValue}; use starcoin_service_registry::ServiceRequest; use starcoin_state_api::{StateProof, StateWithProof, StateWithTableItemProof}; use starcoin_types::block::{ - Block, BlockBody, BlockHeader, BlockHeaderExtra, BlockInfo, BlockNumber, + Block, BlockBody, BlockHeader, BlockHeaderExtra, BlockInfo, BlockNumber, ParentsHash, }; use starcoin_types::contract_event::{ContractEvent, ContractEventInfo}; use starcoin_types::event::EventKey; @@ -433,6 +433,8 @@ pub struct BlockHeaderView { pub nonce: u32, /// block header extra pub extra: BlockHeaderExtra, + /// block parents + pub parents_hash: ParentsHash, } impl From for BlockHeaderView { @@ -453,6 +455,7 @@ impl From for BlockHeaderView { chain_id: origin.chain_id().id(), nonce: origin.nonce(), extra: *origin.extra(), + parents_hash: origin.parents_hash(), } } } @@ -473,6 +476,7 @@ impl From for BlockHeader { genesis_config::ChainId::new(header_view.chain_id), header_view.nonce, header_view.extra, + header_view.parents_hash, ) } } diff --git a/rpc/server/src/module/chain_rpc.rs b/rpc/server/src/module/chain_rpc.rs index 3544155169..db62630e2e 100644 --- a/rpc/server/src/module/chain_rpc.rs +++ b/rpc/server/src/module/chain_rpc.rs @@ -73,7 +73,7 @@ where let fut = async move { let chain_status = service.main_status().await?; //TODO get chain info from chain service. - Ok(ChainInfo::new(chain_id, genesis_hash, chain_status).into()) + Ok(ChainInfo::new(chain_id, genesis_hash, chain_status, None, None).into()) }; Box::pin(fut.boxed().map_err(map_err)) } diff --git a/rpc/server/src/module/pubsub/tests.rs b/rpc/server/src/module/pubsub/tests.rs index fc5d74cc7d..587b8eaa43 100644 --- a/rpc/server/src/module/pubsub/tests.rs +++ b/rpc/server/src/module/pubsub/tests.rs @@ -38,7 +38,13 @@ pub async fn test_subscribe_to_events() -> Result<()> { test_helper::start_txpool_with_miner(1000, true).await; let startup_info = storage.get_startup_info()?.unwrap(); let net = config.net(); - let mut block_chain = BlockChain::new(net.time_service(), startup_info.main, storage, None)?; + let mut block_chain = BlockChain::new( + net.time_service(), + startup_info.main, + storage, + net.id().clone(), + None, + )?; let miner_account = AccountInfo::random(); let pri_key = Ed25519PrivateKey::genesis(); @@ -66,7 +72,7 @@ pub async fn test_subscribe_to_events() -> Result<()> { let new_block = block_chain .consensus() .create_block(block_template, net.time_service().as_ref())?; - let executed_block = block_chain.apply(new_block.clone())?; + let executed_block = block_chain.apply(new_block.clone(), None)?; let reader = block_chain.chain_state_reader(); let balance = reader.get_balance(account_address)?; @@ -109,7 +115,7 @@ pub async fn test_subscribe_to_events() -> Result<()> { // send block let block_detail = Arc::new(executed_block); - bus.broadcast(NewHeadBlock(block_detail))?; + bus.broadcast(NewHeadBlock(block_detail, None, None))?; let mut receiver = receiver; diff --git a/state/service/src/service.rs b/state/service/src/service.rs index f54738a1e8..7c033860aa 100644 --- a/state/service/src/service.rs +++ b/state/service/src/service.rs @@ -131,7 +131,7 @@ impl ServiceHandler for ChainStateService { impl EventHandler for ChainStateService { fn handle_event(&mut self, msg: NewHeadBlock, _ctx: &mut ServiceContext) { - let NewHeadBlock(block) = msg; + let NewHeadBlock(block, _dag_parents) = msg; let state_root = block.header().state_root(); debug!("ChainStateActor change StateRoot to : {:?}", state_root); diff --git a/storage/src/accumulator/mod.rs b/storage/src/accumulator/mod.rs index fbbb6bc37e..594e5681d1 100644 --- a/storage/src/accumulator/mod.rs +++ b/storage/src/accumulator/mod.rs @@ -1,9 +1,9 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::define_storage; use crate::storage::{CodecKVStore, ValueCodec}; use crate::StorageInstance; +use crate::{define_storage, SYNC_FLEXI_DAG_ACCUMULATOR_PREFIX_NAME}; use crate::{BLOCK_ACCUMULATOR_NODE_PREFIX_NAME, TRANSACTION_ACCUMULATOR_NODE_PREFIX_NAME}; use anyhow::Result; use bcs_ext::BCSCodec; @@ -24,6 +24,13 @@ define_storage!( TRANSACTION_ACCUMULATOR_NODE_PREFIX_NAME ); +define_storage!( + DagBlockAccumulatorStorage, + HashValue, + AccumulatorNode, + SYNC_FLEXI_DAG_ACCUMULATOR_PREFIX_NAME +); + impl ValueCodec for AccumulatorNode { fn encode_value(&self) -> Result> { self.encode() @@ -62,6 +69,16 @@ impl AccumulatorStorage { } } +impl AccumulatorStorage { + pub fn new_dag_block_accumulator_storage( + instance: StorageInstance, + ) -> AccumulatorStorage { + Self { + store: DagBlockAccumulatorStorage::new(instance), + } + } +} + impl AccumulatorTreeStore for AccumulatorStorage where S: CodecKVStore, diff --git a/storage/src/batch/mod.rs b/storage/src/batch/mod.rs index 60e463274e..562ed71ae1 100644 --- a/storage/src/batch/mod.rs +++ b/storage/src/batch/mod.rs @@ -5,29 +5,31 @@ use crate::storage::{CodecWriteBatch, KeyCodec, ValueCodec, WriteOp}; use anyhow::Result; use std::convert::TryFrom; +pub type WriteBatch = GWriteBatch, Vec>; + #[derive(Debug, Default, Clone)] -pub struct WriteBatch { - pub rows: Vec<(Vec, WriteOp>)>, +pub struct GWriteBatch { + pub rows: Vec<(K, WriteOp)>, } -impl WriteBatch { +impl GWriteBatch { /// Creates an empty batch. pub fn new() -> Self { Self::default() } - pub fn new_with_rows(rows: Vec<(Vec, WriteOp>)>) -> Self { + pub fn new_with_rows(rows: Vec<(K, WriteOp)>) -> Self { Self { rows } } /// Adds an insert/update operation to the batch. - pub fn put(&mut self, key: Vec, value: Vec) -> Result<()> { + pub fn put(&mut self, key: K, value: V) -> Result<()> { self.rows.push((key, WriteOp::Value(value))); Ok(()) } /// Adds a delete operation to the batch. - pub fn delete(&mut self, key: Vec) -> Result<()> { + pub fn delete(&mut self, key: K) -> Result<()> { self.rows.push((key, WriteOp::Deletion)); Ok(()) } diff --git a/storage/src/block/mod.rs b/storage/src/block/mod.rs index 9b2f162ba6..580550a9ee 100644 --- a/storage/src/block/mod.rs +++ b/storage/src/block/mod.rs @@ -1,7 +1,7 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::define_storage; use crate::storage::{CodecKVStore, StorageInstance, ValueCodec}; +use crate::{define_storage, BLOCK_TIPS_HEADER_PREFIX_NAME}; use crate::{ BLOCK_BODY_PREFIX_NAME, BLOCK_HEADER_PREFIX_NAME, BLOCK_PREFIX_NAME, BLOCK_TRANSACTIONS_PREFIX_NAME, BLOCK_TRANSACTION_INFOS_PREFIX_NAME, FAILED_BLOCK_PREFIX_NAME, @@ -82,6 +82,14 @@ define_storage!( BlockHeader, BLOCK_HEADER_PREFIX_NAME ); + +define_storage!( + BlockTipsHeaderStorage, + HashValue, + Vec, + BLOCK_TIPS_HEADER_PREFIX_NAME +); + define_storage!( BlockBodyStorage, HashValue, @@ -111,6 +119,7 @@ define_storage!( pub struct BlockStorage { block_store: BlockInnerStorage, pub(crate) header_store: BlockHeaderStorage, + pub(crate) tips_header_store: BlockTipsHeaderStorage, body_store: BlockBodyStorage, block_txns_store: BlockTransactionsStorage, block_txn_infos_store: BlockTransactionInfosStorage, @@ -137,6 +146,16 @@ impl ValueCodec for BlockHeader { } } +impl ValueCodec for Vec { + fn encode_value(&self) -> Result> { + self.encode() + } + + fn decode_value(data: &[u8]) -> Result { + Self::decode(data) + } +} + impl ValueCodec for BlockBody { fn encode_value(&self) -> Result> { self.encode() @@ -171,6 +190,7 @@ impl BlockStorage { BlockStorage { block_store: BlockInnerStorage::new(instance.clone()), header_store: BlockHeaderStorage::new(instance.clone()), + tips_header_store: BlockTipsHeaderStorage::new(instance.clone()), body_store: BlockBodyStorage::new(instance.clone()), block_txns_store: BlockTransactionsStorage::new(instance.clone()), block_txn_infos_store: BlockTransactionInfosStorage::new(instance.clone()), @@ -236,6 +256,13 @@ impl BlockStorage { self.header_store.get(block_id) } + pub fn get_block_tips_header_by_hash( + &self, + block_id: HashValue, + ) -> Result>> { + self.tips_header_store.get(block_id) + } + pub fn get_block_by_hash(&self, block_id: HashValue) -> Result> { self.get(block_id) } diff --git a/storage/src/cache_storage/mod.rs b/storage/src/cache_storage/mod.rs index 46001ba401..596fbd181d 100644 --- a/storage/src/cache_storage/mod.rs +++ b/storage/src/cache_storage/mod.rs @@ -1,34 +1,44 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::batch::WriteBatch; -use crate::metrics::{record_metrics, StorageMetrics}; -use crate::storage::{InnerStore, WriteOp}; +use crate::batch::GWriteBatch; +use crate::{ + batch::WriteBatch, + metrics::{record_metrics, StorageMetrics}, + storage::{InnerStore, WriteOp}, +}; use anyhow::{Error, Result}; +use core::hash::Hash; use lru::LruCache; use parking_lot::Mutex; use starcoin_config::DEFAULT_CACHE_SIZE; -pub struct CacheStorage { - cache: Mutex, Vec>>, + +pub type CacheStorage = GCacheStorage, Vec>; + +pub struct GCacheStorage { + cache: Mutex>, metrics: Option, } -impl CacheStorage { +impl GCacheStorage { pub fn new(metrics: Option) -> Self { - CacheStorage { - cache: Mutex::new(LruCache::new(DEFAULT_CACHE_SIZE)), + GCacheStorage { + cache: Mutex::new(LruCache::::new(DEFAULT_CACHE_SIZE)), metrics, } } pub fn new_with_capacity(size: usize, metrics: Option) -> Self { - CacheStorage { - cache: Mutex::new(LruCache::new(size)), + GCacheStorage { + cache: Mutex::new(LruCache::::new(size)), metrics, } } + pub fn remove_all(&self) { + self.cache.lock().clear(); + } } -impl Default for CacheStorage { +impl Default for GCacheStorage { fn default() -> Self { Self::new(None) } @@ -36,53 +46,47 @@ impl Default for CacheStorage { impl InnerStore for CacheStorage { fn get(&self, prefix_name: &str, key: Vec) -> Result>> { - record_metrics("cache", prefix_name, "get", self.metrics.as_ref()).call(|| { - Ok(self - .cache - .lock() - .get(&compose_key(prefix_name.to_string(), key)) - .cloned()) - }) + let composed_key = compose_key(Some(prefix_name), key); + record_metrics("cache", prefix_name, "get", self.metrics.as_ref()) + .call(|| Ok(self.get_inner(&composed_key))) } fn put(&self, prefix_name: &str, key: Vec, value: Vec) -> Result<()> { // remove record_metrics for performance // record_metrics add in write_batch to reduce Instant::now system call - let mut cache = self.cache.lock(); - cache.put(compose_key(prefix_name.to_string(), key), value); + let composed_key = compose_key(Some(prefix_name), key); + let len = self.put_inner(composed_key, value); if let Some(metrics) = self.metrics.as_ref() { - metrics.cache_items.set(cache.len() as u64); + metrics.cache_items.set(len as u64); } Ok(()) } fn contains_key(&self, prefix_name: &str, key: Vec) -> Result { - record_metrics("cache", prefix_name, "contains_key", self.metrics.as_ref()).call(|| { - Ok(self - .cache - .lock() - .contains(&compose_key(prefix_name.to_string(), key))) - }) + let composed_key = compose_key(Some(prefix_name), key); + record_metrics("cache", prefix_name, "contains_key", self.metrics.as_ref()) + .call(|| Ok(self.contains_key_inner(&composed_key))) } fn remove(&self, prefix_name: &str, key: Vec) -> Result<()> { // remove record_metrics for performance // record_metrics add in write_batch to reduce Instant::now system call - let mut cache = self.cache.lock(); - cache.pop(&compose_key(prefix_name.to_string(), key)); + let composed_key = compose_key(Some(prefix_name), key); + let len = self.remove_inner(&composed_key); if let Some(metrics) = self.metrics.as_ref() { - metrics.cache_items.set(cache.len() as u64); + metrics.cache_items.set(len as u64); } Ok(()) } fn write_batch(&self, prefix_name: &str, batch: WriteBatch) -> Result<()> { + let rows = batch + .rows + .into_iter() + .map(|(k, v)| (compose_key(Some(prefix_name), k), v)) + .collect(); + let batch = WriteBatch { rows }; record_metrics("cache", prefix_name, "write_batch", self.metrics.as_ref()).call(|| { - for (key, write_op) in &batch.rows { - match write_op { - WriteOp::Value(value) => self.put(prefix_name, key.to_vec(), value.to_vec())?, - WriteOp::Deletion => self.remove(prefix_name, key.to_vec())?, - }; - } + self.write_batch_inner(batch); Ok(()) }) } @@ -108,22 +112,76 @@ impl InnerStore for CacheStorage { } fn multi_get(&self, prefix_name: &str, keys: Vec>) -> Result>>> { + let composed_keys = keys + .into_iter() + .map(|k| compose_key(Some(prefix_name), k)) + .collect::>(); + Ok(self.multi_get_inner(composed_keys.as_slice())) + } +} + +fn compose_key(prefix_name: Option<&str>, source_key: Vec) -> Vec { + match prefix_name { + Some(prefix_name) => { + let temp_vec = prefix_name.as_bytes().to_vec(); + let mut compose = Vec::with_capacity(temp_vec.len() + source_key.len()); + compose.extend(temp_vec); + compose.extend(source_key); + compose + } + None => source_key, + } +} + +impl GCacheStorage { + pub fn get_inner(&self, key: &K) -> Option { + self.cache.lock().get(key).cloned() + } + + pub fn put_inner(&self, key: K, value: V) -> usize { + let mut cache = self.cache.lock(); + cache.put(key, value); + cache.len() + } + + pub fn contains_key_inner(&self, key: &K) -> bool { + self.cache.lock().contains(key) + } + + pub fn remove_inner(&self, key: &K) -> usize { + let mut cache = self.cache.lock(); + cache.pop(key); + cache.len() + } + + pub fn write_batch_inner(&self, batch: GWriteBatch) { + for (key, write_op) in batch.rows { + match write_op { + WriteOp::Value(value) => { + self.put_inner(key, value); + } + WriteOp::Deletion => { + self.remove_inner(&key); + } + }; + } + } + + pub fn put_sync_inner(&self, key: K, value: V) -> usize { + self.put_inner(key, value) + } + + pub fn write_batch_sync_inner(&self, batch: GWriteBatch) { + self.write_batch_inner(batch) + } + + pub fn multi_get_inner(&self, keys: &[K]) -> Vec> { let mut cache = self.cache.lock(); let mut result = vec![]; - for key in keys.into_iter() { - let item = cache - .get(&compose_key(prefix_name.to_string(), key)) - .cloned(); + for key in keys { + let item = cache.get(key).cloned(); result.push(item); } - Ok(result) + result } } - -fn compose_key(prefix_name: String, source_key: Vec) -> Vec { - let temp_vec = prefix_name.as_bytes().to_vec(); - let mut compose = Vec::with_capacity(temp_vec.len() + source_key.len()); - compose.extend(temp_vec); - compose.extend(source_key); - compose -} diff --git a/storage/src/chain_info/mod.rs b/storage/src/chain_info/mod.rs index 3f193be3f0..c83ce383ff 100644 --- a/storage/src/chain_info/mod.rs +++ b/storage/src/chain_info/mod.rs @@ -28,6 +28,22 @@ impl ChainInfoStorage { const STORAGE_VERSION_KEY: &'static str = "storage_version"; const SNAPSHOT_RANGE_KEY: &'static str = "snapshot_height"; const BARNARD_HARD_FORK: &'static str = "barnard_hard_fork"; + const FLEXI_DAG_STARTUP_INFO_KEY: &'static str = "flexi_dag_startup_info"; + + pub fn get_flexi_dag_startup_info(&self) -> Result> { + self.get(Self::FLEXI_DAG_STARTUP_INFO_KEY.as_bytes()) + .and_then(|bytes| match bytes { + Some(bytes) => Ok(Some(bytes.try_into()?)), + None => Ok(None), + }) + } + + pub fn save_flexi_dag_startup_info(&self, startup_info: StartupInfo) -> Result<()> { + self.put_sync( + Self::FLEXI_DAG_STARTUP_INFO_KEY.as_bytes().to_vec(), + startup_info.try_into()?, + ) + } pub fn get_startup_info(&self) -> Result> { self.get(Self::STARTUP_INFO_KEY.as_bytes()) diff --git a/storage/src/db_storage/mod.rs b/storage/src/db_storage/mod.rs index 20e6f82dbc..e80a870544 100644 --- a/storage/src/db_storage/mod.rs +++ b/storage/src/db_storage/mod.rs @@ -1,18 +1,20 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::batch::WriteBatch; -use crate::errors::StorageInitError; -use crate::metrics::{record_metrics, StorageMetrics}; -use crate::storage::{ColumnFamilyName, InnerStore, KeyCodec, ValueCodec, WriteOp}; -use crate::{StorageVersion, DEFAULT_PREFIX_NAME}; +use crate::{ + batch::WriteBatch, + errors::StorageInitError, + metrics::{record_metrics, StorageMetrics}, + storage::{ColumnFamilyName, InnerStore, KeyCodec, RawDBStorage, ValueCodec, WriteOp}, + StorageVersion, DEFAULT_PREFIX_NAME, +}; use anyhow::{ensure, format_err, Error, Result}; -use rocksdb::{Options, ReadOptions, WriteBatch as DBWriteBatch, WriteOptions, DB}; +use rocksdb::{ + DBIterator, DBPinnableSlice, IteratorMode, Options, ReadOptions, WriteBatch as DBWriteBatch, + WriteOptions, DB, +}; use starcoin_config::{check_open_fds_limit, RocksdbConfig}; -use std::collections::HashSet; -use std::iter; -use std::marker::PhantomData; -use std::path::Path; +use std::{collections::HashSet, iter, marker::PhantomData, path::Path}; const RES_FDS: u64 = 4096; @@ -213,6 +215,9 @@ impl DBStorage { // write buffer size db_opts.set_max_write_buffer_number(5); db_opts.set_max_background_jobs(5); + if config.parallelism > 1 { + db_opts.increase_parallelism(config.parallelism as i32); + } // cache // let cache = Cache::new_lru_cache(2 * 1024 * 1024 * 1024); // db_opts.set_row_cache(&cache.unwrap()); @@ -235,6 +240,16 @@ impl DBStorage { )) } + pub fn raw_iterator_cf_opt( + &self, + prefix_name: &str, + mode: IteratorMode, + readopts: ReadOptions, + ) -> Result { + let cf_handle = self.get_cf_handle(prefix_name)?; + Ok(self.db.iterator_cf_opt(cf_handle, readopts, mode)) + } + /// Returns a forward [`SchemaIterator`] on a certain schema. pub fn iter(&self, prefix_name: &str) -> Result> where @@ -460,3 +475,22 @@ impl InnerStore for DBStorage { }) } } + +impl RawDBStorage for DBStorage { + fn raw_get_pinned_cf>( + &self, + prefix: &str, + key: K, + ) -> Result> { + let cf = self.get_cf_handle(prefix)?; + let res = self + .db + .get_pinned_cf_opt(cf, key, &ReadOptions::default())?; + Ok(res) + } + + fn raw_write_batch(&self, batch: DBWriteBatch) -> Result<()> { + self.db.write(batch)?; + Ok(()) + } +} diff --git a/storage/src/flexi_dag/mod.rs b/storage/src/flexi_dag/mod.rs new file mode 100644 index 0000000000..789dc31fe1 --- /dev/null +++ b/storage/src/flexi_dag/mod.rs @@ -0,0 +1,112 @@ +use std::{ + collections::{BTreeSet, BinaryHeap}, + sync::Arc, +}; + +use crate::{ + accumulator::{AccumulatorStorage, DagBlockAccumulatorStorage}, + define_storage, + storage::{CodecKVStore, StorageInstance, ValueCodec}, + SYNC_FLEXI_DAG_SNAPSHOT_PREFIX_NAME, +}; +use anyhow::Result; +use bcs_ext::BCSCodec; +use serde::{Deserialize, Serialize}; +use starcoin_accumulator::accumulator_info::AccumulatorInfo; +use starcoin_crypto::HashValue; +use starcoin_types::dag_block::KTotalDifficulty; +use starcoin_uint::U256; + +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] +pub struct SyncFlexiDagSnapshot { + pub child_hashes: Vec, // child nodes(tips), to get the relationship, use dag's relationship store + pub accumulator_info: AccumulatorInfo, + pub head_block_id: HashValue, // to initialize the BlockInfo + pub k_total_difficulties: BTreeSet, // the k-th smallest total difficulty +} + +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] +pub struct SyncFlexiDagSnapshotHasher { + pub child_hashes: Vec, // child nodes(tips), to get the relationship, use dag's relationship store + pub head_block_id: HashValue, // to initialize the BlockInfo + pub k_total_difficulties: BTreeSet, // the k-th smallest total difficulty +} + +impl SyncFlexiDagSnapshotHasher { + pub fn to_snapshot(self, accumulator_info: AccumulatorInfo) -> SyncFlexiDagSnapshot { + SyncFlexiDagSnapshot { + child_hashes: self.child_hashes, + accumulator_info, + head_block_id: self.head_block_id, + k_total_difficulties: self.k_total_difficulties, + } + } +} + +impl From for SyncFlexiDagSnapshotHasher { + fn from(mut value: SyncFlexiDagSnapshot) -> Self { + value.child_hashes.sort(); + SyncFlexiDagSnapshotHasher { + child_hashes: value.child_hashes, + head_block_id: value.head_block_id, + k_total_difficulties: value.k_total_difficulties + } + } +} + +impl ValueCodec for SyncFlexiDagSnapshot { + fn encode_value(&self) -> Result> { + self.encode() + } + + fn decode_value(data: &[u8]) -> Result { + Self::decode(data) + } +} + +define_storage!( + SyncFlexiDagSnapshotStorage, + HashValue, // accumulator leaf node + SyncFlexiDagSnapshot, + SYNC_FLEXI_DAG_SNAPSHOT_PREFIX_NAME +); + +#[derive(Clone)] +pub struct SyncFlexiDagStorage { + snapshot_storage: Arc, + accumulator_storage: AccumulatorStorage, +} + +impl SyncFlexiDagStorage { + pub fn new(instance: StorageInstance) -> Self { + let snapshot_storage = Arc::new(SyncFlexiDagSnapshotStorage::new(instance.clone())); + let accumulator_storage = + AccumulatorStorage::::new_dag_block_accumulator_storage( + instance, + ); + + SyncFlexiDagStorage { + snapshot_storage, + accumulator_storage, + } + } + + pub fn get_accumulator_storage(&self) -> AccumulatorStorage { + self.accumulator_storage.clone() + } + + pub fn get_snapshot_storage(&self) -> Arc { + self.snapshot_storage.clone() + } + + pub fn put_hashes(&self, key: HashValue, accumulator_info: SyncFlexiDagSnapshot) -> Result<()> { + self.snapshot_storage.put(key, accumulator_info) + } + + pub fn get_hashes_by_hash( + &self, + hash: HashValue, + ) -> std::result::Result, anyhow::Error> { + self.snapshot_storage.get(hash) + } +} diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 0246b6e7f4..3741e78ecb 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -1,41 +1,59 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::accumulator::{ - AccumulatorStorage, BlockAccumulatorStorage, TransactionAccumulatorStorage, +use crate::{ + accumulator::{AccumulatorStorage, BlockAccumulatorStorage, TransactionAccumulatorStorage}, + block::BlockStorage, + block_info::{BlockInfoStorage, BlockInfoStore}, + chain_info::ChainInfoStorage, + contract_event::ContractEventStorage, + state_node::StateStorage, + storage::{CodecKVStore, CodecWriteBatch, ColumnFamilyName, StorageInstance}, }; -use crate::block::BlockStorage; -use crate::block_info::{BlockInfoStorage, BlockInfoStore}; -use crate::chain_info::ChainInfoStorage; -use crate::contract_event::ContractEventStorage; -use crate::state_node::StateStorage; -use crate::storage::{CodecKVStore, CodecWriteBatch, ColumnFamilyName, StorageInstance}; -use crate::table_info::{TableInfoStorage, TableInfoStore}; -use crate::transaction::TransactionStorage; -use crate::transaction_info::{TransactionInfoHashStorage, TransactionInfoStorage}; -use anyhow::{bail, format_err, Error, Result}; +//use crate::table_info::{TableInfoStorage, TableInfoStore}; +use crate::{ + transaction::TransactionStorage, + transaction_info::{TransactionInfoHashStorage, TransactionInfoStorage}, +}; +use anyhow::{anyhow, bail, format_err, Error, Ok, Result}; +use flexi_dag::{SyncFlexiDagSnapshot, SyncFlexiDagSnapshotStorage, SyncFlexiDagStorage}; use network_p2p_types::peer_id::PeerId; use num_enum::{IntoPrimitive, TryFromPrimitive}; use once_cell::sync::Lazy; -use starcoin_accumulator::node::AccumulatorStoreType; -use starcoin_accumulator::AccumulatorTreeStore; +use starcoin_accumulator::{ + accumulator_info::{self, AccumulatorInfo}, + node::AccumulatorStoreType, + AccumulatorTreeStore, MerkleAccumulator, Accumulator, +}; +use starcoin_config::ChainNetworkID; use starcoin_crypto::HashValue; +use starcoin_logger::prelude::info; use starcoin_state_store_api::{StateNode, StateNodeStore}; -use starcoin_types::contract_event::ContractEvent; -use starcoin_types::startup_info::{ChainInfo, ChainStatus, SnapshotRange}; -use starcoin_types::transaction::{RichTransactionInfo, Transaction}; use starcoin_types::{ - block::{Block, BlockBody, BlockHeader, BlockInfo}, - startup_info::StartupInfo, + block::{Block, BlockBody, BlockHeader, BlockInfo, BlockNumber}, + blockhash::ORIGIN, + contract_event::ContractEvent, + dag_block::KTotalDifficulty, + header, + startup_info::{self, ChainInfo, ChainStatus, SnapshotRange, StartupInfo}, + transaction::{RichTransactionInfo, Transaction}, +}; +use starcoin_vm_types::{ + account_address::AccountAddress, + dag_block_metadata, + state_store::table::{TableHandle, TableInfo}, +}; +use std::{ + collections::{BTreeMap, BTreeSet}, + fmt::{Debug, Display, Formatter}, + sync::Arc, +}; +use table_info::{TableInfoStorage, TableInfoStore}; +use upgrade::{ + BARNARD_FLEXIDAG_FORK_HEIGHT, DEV_FLEXIDAG_FORK_HEIGHT, HALLEY_FLEXIDAG_FORK_HEIGHT, + MAIN_FLEXIDAG_FORK_HEIGHT, PROXIMA_FLEXIDAG_FORK_HEIGHT, TEST_FLEXIDAG_FORK_HEIGHT, }; -//use starcoin_vm_types::state_store::table::{TableHandle, TableInfo}; -use starcoin_types::account_address::AccountAddress; -use starcoin_vm_types::state_store::table::{TableHandle, TableInfo}; -use std::collections::BTreeMap; -use std::fmt::{Debug, Display, Formatter}; -use std::sync::Arc; -pub use upgrade::BARNARD_HARD_FORK_HASH; -pub use upgrade::BARNARD_HARD_FORK_HEIGHT; +pub use upgrade::{BARNARD_HARD_FORK_HASH, BARNARD_HARD_FORK_HEIGHT}; pub mod accumulator; pub mod batch; @@ -46,6 +64,7 @@ pub mod chain_info; pub mod contract_event; pub mod db_storage; pub mod errors; +pub mod flexi_dag; pub mod metrics; pub mod state_node; pub mod storage; @@ -64,6 +83,7 @@ pub const BLOCK_ACCUMULATOR_NODE_PREFIX_NAME: ColumnFamilyName = "acc_node_block pub const TRANSACTION_ACCUMULATOR_NODE_PREFIX_NAME: ColumnFamilyName = "acc_node_transaction"; pub const BLOCK_PREFIX_NAME: ColumnFamilyName = "block"; pub const BLOCK_HEADER_PREFIX_NAME: ColumnFamilyName = "block_header"; +pub const BLOCK_TIPS_HEADER_PREFIX_NAME: ColumnFamilyName = "block_tips_header"; pub const BLOCK_BODY_PREFIX_NAME: ColumnFamilyName = "block_body"; pub const BLOCK_INFO_PREFIX_NAME: ColumnFamilyName = "block_info"; pub const BLOCK_TRANSACTIONS_PREFIX_NAME: ColumnFamilyName = "block_txns"; @@ -78,6 +98,8 @@ pub const TRANSACTION_INFO_HASH_PREFIX_NAME: ColumnFamilyName = "transaction_inf pub const CONTRACT_EVENT_PREFIX_NAME: ColumnFamilyName = "contract_event"; pub const FAILED_BLOCK_PREFIX_NAME: ColumnFamilyName = "failed_block"; pub const TABLE_INFO_PREFIX_NAME: ColumnFamilyName = "table_info"; +pub const SYNC_FLEXI_DAG_ACCUMULATOR_PREFIX_NAME: ColumnFamilyName = "sync_flexi_dag_accumulator"; +pub const SYNC_FLEXI_DAG_SNAPSHOT_PREFIX_NAME: ColumnFamilyName = "sync_flexi_dag_snapshot"; ///db storage use prefix_name vec to init /// Please note that adding a prefix needs to be added in vec simultaneously, remember!! @@ -143,17 +165,43 @@ static VEC_PREFIX_NAME_V3: Lazy> = Lazy::new(|| { TABLE_INFO_PREFIX_NAME, ] }); + +static VEC_PREFIX_NAME_V4: Lazy> = Lazy::new(|| { + vec![ + BLOCK_ACCUMULATOR_NODE_PREFIX_NAME, + TRANSACTION_ACCUMULATOR_NODE_PREFIX_NAME, + BLOCK_PREFIX_NAME, + BLOCK_HEADER_PREFIX_NAME, + BLOCK_BODY_PREFIX_NAME, // unused column + BLOCK_INFO_PREFIX_NAME, + BLOCK_TRANSACTIONS_PREFIX_NAME, + BLOCK_TRANSACTION_INFOS_PREFIX_NAME, + STATE_NODE_PREFIX_NAME, + CHAIN_INFO_PREFIX_NAME, + TRANSACTION_PREFIX_NAME, + TRANSACTION_INFO_PREFIX_NAME, // unused column + TRANSACTION_INFO_PREFIX_NAME_V2, + TRANSACTION_INFO_HASH_PREFIX_NAME, + CONTRACT_EVENT_PREFIX_NAME, + FAILED_BLOCK_PREFIX_NAME, + SYNC_FLEXI_DAG_ACCUMULATOR_PREFIX_NAME, + SYNC_FLEXI_DAG_SNAPSHOT_PREFIX_NAME, + TABLE_INFO_PREFIX_NAME, + ] +}); + #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, IntoPrimitive, TryFromPrimitive)] #[repr(u8)] pub enum StorageVersion { V1 = 1, V2 = 2, V3 = 3, + V4 = 4, } impl StorageVersion { pub fn current_version() -> StorageVersion { - StorageVersion::V3 + StorageVersion::V4 } pub fn get_column_family_names(&self) -> &'static [ColumnFamilyName] { @@ -161,10 +209,18 @@ impl StorageVersion { StorageVersion::V1 => &VEC_PREFIX_NAME_V1, StorageVersion::V2 => &VEC_PREFIX_NAME_V2, StorageVersion::V3 => &VEC_PREFIX_NAME_V3, + StorageVersion::V4 => &VEC_PREFIX_NAME_V4, } } } +// pub trait DagBlockStore { +// fn get_flexi_dag_startup_info(&self) -> Result>; +// fn save_flexi_dag_startup_info(&self, startup_info: StartupInfo) -> Result<()>; +// fn get_dag_accumulator_info(&self) -> Result; +// fn get_last_tips(&self) -> Result>>; +// } + pub trait BlockStore { fn get_startup_info(&self) -> Result>; fn save_startup_info(&self, startup_info: StartupInfo) -> Result<()>; @@ -173,7 +229,7 @@ pub trait BlockStore { fn save_genesis(&self, genesis_hash: HashValue) -> Result<()>; - fn get_chain_info(&self) -> Result>; + fn get_chain_info(&self, id: ChainNetworkID) -> Result>; fn get_block(&self, block_id: HashValue) -> Result>; @@ -188,6 +244,11 @@ pub trait BlockStore { fn get_block_header_by_hash(&self, block_id: HashValue) -> Result>; + fn get_block_tips_header_by_hash( + &self, + block_id: HashValue, + ) -> Result>>; + fn get_block_by_hash(&self, block_id: HashValue) -> Result>; fn save_block_transaction_ids( @@ -263,6 +324,24 @@ pub trait TransactionStore { fn get_transactions(&self, txn_hash_vec: Vec) -> Result>>; } +pub trait SyncFlexiDagStore { + fn put_hashes(&self, key: HashValue, accumulator_snapshot: SyncFlexiDagSnapshot) -> Result<()>; + fn query_by_hash(&self, key: HashValue) -> Result>; + fn get_accumulator_snapshot_storage(&self) -> std::sync::Arc; + fn append_dag_accumulator_leaf( + &self, + key: HashValue, + new_tips: Vec, + accumulator_info: AccumulatorInfo, + head_block_id: HashValue, + k_total_difficulties: BTreeSet, + ) -> Result<()>; + fn get_dag_accumulator_info(&self) -> Result>; + fn get_tips_by_block_id(&self, block_id: HashValue) -> Result>; + fn dag_fork_height(&self, id: ChainNetworkID) -> BlockNumber; + fn get_lastest_snapshot(&self) -> Result>; +} + // TODO: remove Arc, we can clone Storage directly. #[derive(Clone)] pub struct Storage { @@ -276,6 +355,7 @@ pub struct Storage { block_info_storage: BlockInfoStorage, event_storage: ContractEventStorage, chain_info_storage: ChainInfoStorage, + flexi_dag_storage: SyncFlexiDagStorage, table_info_storage: TableInfoStorage, // instance: StorageInstance, } @@ -296,6 +376,7 @@ impl Storage { block_info_storage: BlockInfoStorage::new(instance.clone()), event_storage: ContractEventStorage::new(instance.clone()), chain_info_storage: ChainInfoStorage::new(instance.clone()), + flexi_dag_storage: SyncFlexiDagStorage::new(instance.clone()), table_info_storage: TableInfoStorage::new(instance), // instance, }; @@ -361,7 +442,7 @@ impl BlockStore for Storage { self.chain_info_storage.save_genesis(genesis_hash) } - fn get_chain_info(&self) -> Result> { + fn get_chain_info(&self, id: ChainNetworkID) -> Result> { let genesis_hash = match self.get_genesis()? { Some(genesis_hash) => genesis_hash, None => return Ok(None), @@ -376,11 +457,15 @@ impl BlockStore for Storage { let head_block_info = self.get_block_info(head_block.id())?.ok_or_else(|| { format_err!("Startup block info {:?} should exist", startup_info.main) })?; - Ok(Some(ChainInfo::new( + let snapshot = self.get_lastest_snapshot()?.ok_or_else(error || anyhow!("latest snapshot is none"))?; + let chain_info = ChainInfo::new( head_block.chain_id(), genesis_hash, - ChainStatus::new(head_block, head_block_info), - ))) + ChainStatus::new(head_block.clone(), head_block_info), + Some(snapshot.accumulator_info), + Some(snapshot.k_total_difficulties), + ); + Ok(Some(chain_info)) } fn get_block(&self, block_id: HashValue) -> Result> { @@ -408,6 +493,13 @@ impl BlockStore for Storage { self.block_storage.get_block_header_by_hash(block_id) } + fn get_block_tips_header_by_hash( + &self, + block_id: HashValue, + ) -> Result>> { + self.block_storage.get_block_tips_header_by_hash(block_id) + } + fn get_block_by_hash(&self, block_id: HashValue) -> Result> { self.block_storage.get_block_by_hash(block_id) } @@ -571,9 +663,103 @@ impl TransactionStore for Storage { } } +impl SyncFlexiDagStore for Storage { + fn put_hashes(&self, key: HashValue, accumulator_snapshot: SyncFlexiDagSnapshot) -> Result<()> { + self.flexi_dag_storage.put_hashes(key, accumulator_snapshot) + } + + fn query_by_hash(&self, key: HashValue) -> Result> { + self.flexi_dag_storage.get_hashes_by_hash(key) + } + + fn get_accumulator_snapshot_storage(&self) -> std::sync::Arc { + self.flexi_dag_storage.get_snapshot_storage() + } + + fn get_lastest_snapshot(&self) -> Result> { + let info = self.get_dag_accumulator_info()?.ok_or_else(error || anyhow!("dag startup info is none"))?; + let merkle_tree = MerkleAccumulator::new_with_info(info, storage.get_accumulator_store(AccumulatorStoreType::SyncDag)); + let key = merkle_tree.get_leaf(merkle_tree.num_leaves() - 1)?.ok_or_else(errors || anyhow!("faile to get the key since it is none"))?; + self.query_by_hash(key) + } + + fn get_dag_accumulator_info(&self) -> Result> { + let startup_info = self.get_startup_info()?; + if startup_info.is_none() { + return Ok(None); + } + + let dag_main = startup_info.unwrap().get_dag_main(); + if dag_main.is_none() { + return Ok(None); + } + + let dag_main = dag_main.unwrap(); + + Ok(Some( + self.flexi_dag_storage + .get_snapshot_storage() + .get(dag_main)? + .expect("snapshot should not be none") + .accumulator_info, + )) + } + + // update dag accumulator + fn append_dag_accumulator_leaf( + &self, + key: HashValue, + new_tips: Vec, + accumulator_info: AccumulatorInfo, + head_block_id: HashValue, + k_total_difficulties: BTreeSet, + ) -> Result<()> { + let snapshot = SyncFlexiDagSnapshot { + child_hashes: new_tips.clone(), + accumulator_info: accumulator_info.clone(), + head_block_id, + k_total_difficulties, + }; + // for sync + if let Some(t) = self.flexi_dag_storage.get_hashes_by_hash(key)? { + if t != snapshot { + panic!("the accumulator differ from other"); + } + } else { + self.flexi_dag_storage.put_hashes(key, snapshot)?; + } + + Ok(()) + } + + fn get_tips_by_block_id(&self, key: HashValue) -> Result> { + match self.query_by_hash(key)? { + Some(snapshot) => Ok(snapshot.child_hashes), + None => { + bail!("failed to get snapshot by hash: {}", key); + } + } + } + + fn dag_fork_height(&self, id: ChainNetworkID) -> BlockNumber { + match id { + ChainNetworkID::Builtin(network_id) => match network_id { + starcoin_config::BuiltinNetworkID::Test => TEST_FLEXIDAG_FORK_HEIGHT, + starcoin_config::BuiltinNetworkID::Dev => DEV_FLEXIDAG_FORK_HEIGHT, + starcoin_config::BuiltinNetworkID::Halley => HALLEY_FLEXIDAG_FORK_HEIGHT, + starcoin_config::BuiltinNetworkID::Proxima => PROXIMA_FLEXIDAG_FORK_HEIGHT, + starcoin_config::BuiltinNetworkID::Barnard => BARNARD_FLEXIDAG_FORK_HEIGHT, + starcoin_config::BuiltinNetworkID::Main => MAIN_FLEXIDAG_FORK_HEIGHT, + }, + ChainNetworkID::Custom(_) => DEV_FLEXIDAG_FORK_HEIGHT, + } + } +} + /// Chain storage define pub trait Store: StateNodeStore + + SyncFlexiDagStore + BlockStore + BlockInfoStore + TransactionStore @@ -653,6 +839,9 @@ impl Store for Storage { AccumulatorStoreType::Transaction => { Arc::new(self.transaction_accumulator_storage.clone()) } + AccumulatorStoreType::SyncDag => { + Arc::new(self.flexi_dag_storage.get_accumulator_storage()) + } } } } diff --git a/storage/src/storage.rs b/storage/src/storage.rs index cddd7269b1..7cc4fe1abe 100644 --- a/storage/src/storage.rs +++ b/storage/src/storage.rs @@ -2,19 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 pub use crate::batch::WriteBatch; -use crate::cache_storage::CacheStorage; -use crate::db_storage::{DBStorage, SchemaIterator}; -use crate::upgrade::DBUpgrade; +use crate::{ + cache_storage::CacheStorage, + db_storage::{DBStorage, SchemaIterator}, + upgrade::DBUpgrade, +}; use anyhow::{bail, format_err, Result}; use byteorder::{BigEndian, ReadBytesExt}; +use rocksdb::{DBPinnableSlice, WriteBatch as DBWriteBatch}; use starcoin_config::NodeConfig; use starcoin_crypto::HashValue; use starcoin_logger::prelude::info; use starcoin_vm_types::state_store::table::TableHandle; -use std::convert::TryInto; -use std::fmt::Debug; -use std::marker::PhantomData; -use std::sync::Arc; +use std::{convert::TryInto, fmt::Debug, marker::PhantomData, sync::Arc}; /// Type alias to improve readability. pub type ColumnFamilyName = &'static str; @@ -46,6 +46,16 @@ pub trait InnerStore: Send + Sync { fn multi_get(&self, prefix_name: &str, keys: Vec>) -> Result>>>; } +pub trait RawDBStorage: Send + Sync { + fn raw_get_pinned_cf>( + &self, + prefix: &str, + key: K, + ) -> Result>; + + fn raw_write_batch(&self, batch: DBWriteBatch) -> Result<()>; +} + ///Storage instance type define #[derive(Clone)] #[allow(clippy::upper_case_acronyms)] diff --git a/storage/src/tests/mod.rs b/storage/src/tests/mod.rs index abf8a51ed0..7c781ecc4e 100644 --- a/storage/src/tests/mod.rs +++ b/storage/src/tests/mod.rs @@ -3,4 +3,5 @@ mod test_accumulator; mod test_batch; mod test_block; +mod test_dag; mod test_storage; diff --git a/storage/src/tests/test_block.rs b/storage/src/tests/test_block.rs index 4e663c57b7..0024af03de 100644 --- a/storage/src/tests/test_block.rs +++ b/storage/src/tests/test_block.rs @@ -43,6 +43,7 @@ fn test_block() { ChainId::test(), 0, BlockHeaderExtra::new([0u8; 4]), + None, ); storage .block_storage @@ -102,6 +103,7 @@ fn test_block_number() { ChainId::test(), 0, BlockHeaderExtra::new([0u8; 4]), + None, ); storage .block_storage @@ -149,6 +151,7 @@ fn test_old_failed_block_decode() { ChainId::test(), 0, BlockHeaderExtra::new([0u8; 4]), + None, ); let block_body = BlockBody::new(vec![SignedUserTransaction::mock()], None); @@ -185,6 +188,7 @@ fn test_save_failed_block() { ChainId::test(), 0, BlockHeaderExtra::new([0u8; 4]), + None, ); let block_body = BlockBody::new(vec![SignedUserTransaction::mock()], None); diff --git a/storage/src/tests/test_dag.rs b/storage/src/tests/test_dag.rs new file mode 100644 index 0000000000..159c905ba2 --- /dev/null +++ b/storage/src/tests/test_dag.rs @@ -0,0 +1,347 @@ +use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator}; +use starcoin_config::RocksdbConfig; +use starcoin_crypto::HashValue; + +use crate::{ + cache_storage::CacheStorage, db_storage::DBStorage, flexi_dag::SyncFlexiDagSnapshot, + storage::StorageInstance, Storage, Store, SyncFlexiDagStore, +}; +use anyhow::{Ok, Result}; + +trait SyncFlexiDagManager { + fn insert_hashes(&self, hashes: Vec) -> Result; + fn query_by_hash(&self, hash: HashValue) -> Result>; + fn fork(&mut self, accumulator_info: AccumulatorInfo) -> Result<()>; + fn get_hash_by_position(&self, position: u64) -> Result>; + fn get_accumulator_info(&self) -> AccumulatorInfo; +} + +struct SyncFlexiDagManagerImp { + flexi_dag_storage: Box, + accumulator: MerkleAccumulator, +} + +impl SyncFlexiDagManagerImp { + pub fn new() -> Self { + let flexi_dag_storage = Storage::new(StorageInstance::new_cache_and_db_instance( + CacheStorage::default(), + DBStorage::new( + starcoin_config::temp_dir().as_ref(), + RocksdbConfig::default(), + None, + ) + .unwrap(), + )) + .unwrap(); + let accumulator = MerkleAccumulator::new_empty( + flexi_dag_storage + .get_accumulator_store(starcoin_accumulator::node::AccumulatorStoreType::SyncDag), + ); + SyncFlexiDagManagerImp { + flexi_dag_storage: Box::new(flexi_dag_storage), + accumulator, + } + } + + fn hash_for_hashes(mut hashes: Vec) -> HashValue { + hashes.sort(); + HashValue::sha3_256_of(&hashes.into_iter().fold([].to_vec(), |mut collect, hash| { + collect.extend(hash.into_iter()); + collect + })) + } +} + +impl SyncFlexiDagManager for SyncFlexiDagManagerImp { + fn insert_hashes(&self, mut child_hashes: Vec) -> Result { + child_hashes.sort(); + let accumulator_key = Self::hash_for_hashes(child_hashes.clone()); + self.accumulator.append(&[accumulator_key])?; + self.flexi_dag_storage.put_hashes( + accumulator_key, + SyncFlexiDagSnapshot { + child_hashes, + accumulator_info: self.get_accumulator_info(), + }, + )?; + Ok(accumulator_key) + } + + fn query_by_hash(&self, hash: HashValue) -> Result> { + self.flexi_dag_storage.query_by_hash(hash) + } + + fn fork(&mut self, accumulator_info: AccumulatorInfo) -> Result<()> { + self.accumulator = self.accumulator.fork(Some(accumulator_info)); + Ok(()) + } + + fn get_hash_by_position(&self, position: u64) -> Result> { + self.accumulator.get_leaf(position) + } + + fn get_accumulator_info(&self) -> AccumulatorInfo { + self.accumulator.get_info() + } +} + +#[test] +fn test_syn_dag_accumulator_insert_and_find() { + let syn_accumulator = SyncFlexiDagManagerImp::new(); + let genesis = HashValue::sha3_256_of(b"genesis"); + let b = HashValue::sha3_256_of(b"b"); + let c = HashValue::sha3_256_of(b"c"); + let d = HashValue::sha3_256_of(b"d"); + let e = HashValue::sha3_256_of(b"e"); + let f = HashValue::sha3_256_of(b"f"); + let h = HashValue::sha3_256_of(b"h"); + let i = HashValue::sha3_256_of(b"i"); + let j = HashValue::sha3_256_of(b"j"); + let k = HashValue::sha3_256_of(b"k"); + let l = HashValue::sha3_256_of(b"l"); + let m = HashValue::sha3_256_of(b"m"); + + let genesis_key = syn_accumulator.insert_hashes([genesis].to_vec()).unwrap(); + let layer1 = syn_accumulator + .insert_hashes([b, c, d, e].to_vec()) + .unwrap(); + let layer2 = syn_accumulator + .insert_hashes([f, h, i, k].to_vec()) + .unwrap(); + let layer3 = syn_accumulator + .insert_hashes([j, m, k, l].to_vec()) + .unwrap(); + let layer4 = syn_accumulator.insert_hashes([j, m, l].to_vec()).unwrap(); + + assert_eq!(5, syn_accumulator.get_accumulator_info().get_num_leaves()); + + assert_eq!( + genesis_key, + syn_accumulator.get_hash_by_position(0).unwrap().unwrap() + ); + assert_eq!( + layer1, + syn_accumulator.get_hash_by_position(1).unwrap().unwrap() + ); + assert_eq!( + layer2, + syn_accumulator.get_hash_by_position(2).unwrap().unwrap() + ); + assert_eq!( + layer3, + syn_accumulator.get_hash_by_position(3).unwrap().unwrap() + ); + assert_eq!( + layer4, + syn_accumulator.get_hash_by_position(4).unwrap().unwrap() + ); + + assert_eq!( + [genesis].to_vec(), + syn_accumulator + .query_by_hash(syn_accumulator.get_hash_by_position(0).unwrap().unwrap()) + .unwrap() + .unwrap() + .child_hashes + ); + assert_eq!( + { + let mut v = [b, c, d, e].to_vec(); + v.sort(); + v + }, + syn_accumulator + .query_by_hash(syn_accumulator.get_hash_by_position(1).unwrap().unwrap()) + .unwrap() + .unwrap() + .child_hashes + ); + assert_eq!( + { + let mut v = [f, h, i, k].to_vec(); + v.sort(); + v + }, + syn_accumulator + .query_by_hash(syn_accumulator.get_hash_by_position(2).unwrap().unwrap()) + .unwrap() + .unwrap() + .child_hashes + ); + assert_eq!( + { + let mut v = [j, m, k, l].to_vec(); + v.sort(); + v + }, + syn_accumulator + .query_by_hash(syn_accumulator.get_hash_by_position(3).unwrap().unwrap()) + .unwrap() + .unwrap() + .child_hashes + ); + assert_eq!( + { + let mut v = [j, m, l].to_vec(); + v.sort(); + v + }, + syn_accumulator + .query_by_hash(syn_accumulator.get_hash_by_position(4).unwrap().unwrap()) + .unwrap() + .unwrap() + .child_hashes + ); +} + +#[test] +fn test_syn_dag_accumulator_fork() { + let mut syn_accumulator = SyncFlexiDagManagerImp::new(); + let syn_accumulator_target = SyncFlexiDagManagerImp::new(); + + let genesis = HashValue::sha3_256_of(b"genesis"); + let b = HashValue::sha3_256_of(b"b"); + let c = HashValue::sha3_256_of(b"c"); + let d = HashValue::sha3_256_of(b"d"); + let e = HashValue::sha3_256_of(b"e"); + let f = HashValue::sha3_256_of(b"f"); + let h = HashValue::sha3_256_of(b"h"); + let i = HashValue::sha3_256_of(b"i"); + let j = HashValue::sha3_256_of(b"j"); + let k = HashValue::sha3_256_of(b"k"); + let l = HashValue::sha3_256_of(b"l"); + let m = HashValue::sha3_256_of(b"m"); + let p = HashValue::sha3_256_of(b"p"); + let v = HashValue::sha3_256_of(b"v"); + + let _genesis_key = syn_accumulator.insert_hashes([genesis].to_vec()).unwrap(); + let _genesis_key = syn_accumulator_target + .insert_hashes([genesis].to_vec()) + .unwrap(); + + let layer1 = syn_accumulator + .insert_hashes([b, c, d, e].to_vec()) + .unwrap(); + let layer2 = syn_accumulator + .insert_hashes([f, h, i, k].to_vec()) + .unwrap(); + let layer3 = syn_accumulator + .insert_hashes([j, m, k, l].to_vec()) + .unwrap(); + let layer4 = syn_accumulator.insert_hashes([j, m, l].to_vec()).unwrap(); + + let target_layer1 = syn_accumulator_target + .insert_hashes([b, c, d, e].to_vec()) + .unwrap(); + let target_layer2 = syn_accumulator_target + .insert_hashes([f, h, i, k].to_vec()) + .unwrap(); + let target_layer3 = syn_accumulator_target + .insert_hashes([j, m, k, l].to_vec()) + .unwrap(); + let target_layer4 = syn_accumulator_target + .insert_hashes([p, m, v].to_vec()) + .unwrap(); + let target_layer5 = syn_accumulator_target + .insert_hashes([p, v].to_vec()) + .unwrap(); + + assert_eq!(layer1, target_layer1); + assert_eq!(layer2, target_layer2); + assert_eq!(layer3, target_layer3); + + assert_ne!(layer4, target_layer4); + assert_ne!( + syn_accumulator.get_accumulator_info().get_num_leaves(), + syn_accumulator_target + .get_accumulator_info() + .get_num_leaves() + ); + assert_ne!( + syn_accumulator.get_accumulator_info(), + syn_accumulator_target.get_accumulator_info() + ); + + let info = syn_accumulator_target + .query_by_hash(layer3) + .unwrap() + .unwrap() + .accumulator_info; + + println!("{:?}", info); + assert_eq!( + layer3, + syn_accumulator.get_hash_by_position(3).unwrap().unwrap() + ); + + syn_accumulator.fork(info).unwrap(); + + assert_eq!( + layer3, + syn_accumulator.get_hash_by_position(3).unwrap().unwrap() + ); + + let new_layer4 = syn_accumulator.insert_hashes([p, m, v].to_vec()).unwrap(); + let new_layer5 = syn_accumulator.insert_hashes([p, v].to_vec()).unwrap(); + + assert_eq!(new_layer4, target_layer4); + assert_eq!(new_layer5, target_layer5); + assert_eq!( + syn_accumulator.get_accumulator_info().get_num_leaves(), + syn_accumulator_target + .get_accumulator_info() + .get_num_leaves() + ); + assert_eq!( + syn_accumulator.get_accumulator_info(), + syn_accumulator_target.get_accumulator_info() + ); +} + +#[test] +fn test_accumulator_temp() { + let flexi_dag_storage = Storage::new(StorageInstance::new_cache_and_db_instance( + CacheStorage::default(), + DBStorage::new( + starcoin_config::temp_dir().as_ref(), + RocksdbConfig::default(), + None, + ) + .unwrap(), + )) + .unwrap(); + let mut accumulator = MerkleAccumulator::new_empty( + flexi_dag_storage + .get_accumulator_store(starcoin_accumulator::node::AccumulatorStoreType::SyncDag), + ); + let _hash1 = accumulator.append(&[HashValue::sha3_256_of(b"a")]).unwrap(); + let _hash2 = accumulator.append(&[HashValue::sha3_256_of(b"b")]).unwrap(); + let _hash3 = accumulator.append(&[HashValue::sha3_256_of(b"c")]).unwrap(); + let accumulator_info = accumulator.get_info(); + let _hash4 = accumulator.append(&[HashValue::sha3_256_of(b"d")]).unwrap(); + + assert_eq!( + HashValue::sha3_256_of(b"b"), + accumulator.get_leaf(1).unwrap().unwrap() + ); + accumulator.flush().unwrap(); + accumulator = accumulator.fork(Some(accumulator_info)); + let _hash5 = accumulator.append(&[HashValue::sha3_256_of(b"e")]).unwrap(); + + assert_eq!( + HashValue::sha3_256_of(b"b"), + accumulator.get_leaf(1).unwrap().unwrap() + ); + assert_eq!( + HashValue::sha3_256_of(b"c"), + accumulator.get_leaf(2).unwrap().unwrap() + ); + assert_eq!( + HashValue::sha3_256_of(b"e"), + accumulator.get_leaf(3).unwrap().unwrap() + ); + assert_ne!( + HashValue::sha3_256_of(b"d"), + accumulator.get_leaf(3).unwrap().unwrap() + ); +} diff --git a/storage/src/tests/test_storage.rs b/storage/src/tests/test_storage.rs index be7a2eaa44..1c098eeba8 100644 --- a/storage/src/tests/test_storage.rs +++ b/storage/src/tests/test_storage.rs @@ -18,15 +18,18 @@ use anyhow::Result; use starcoin_accumulator::accumulator_info::AccumulatorInfo; use starcoin_config::RocksdbConfig; use starcoin_crypto::HashValue; -use starcoin_types::{ - account_address::AccountAddress, - block::{Block, BlockBody, BlockHeader, BlockInfo}, - language_storage::TypeTag, - startup_info::SnapshotRange, - transaction::{RichTransactionInfo, SignedUserTransaction, Transaction, TransactionInfo}, - vm_error::KeptVMStatus, +use starcoin_types::block::{Block, BlockBody, BlockHeader, BlockInfo}; +//use starcoin_types::language_storage::TypeTag; +use starcoin_types::startup_info::SnapshotRange; +use starcoin_types::transaction::{ + RichTransactionInfo, SignedUserTransaction, Transaction, TransactionInfo, }; +use starcoin_types::vm_error::KeptVMStatus; +use starcoin_vm_types::account_address::AccountAddress; +use starcoin_vm_types::language_storage::TypeTag; use starcoin_vm_types::state_store::table::{TableHandle, TableInfo}; +//use starcoin_vm_types::account_address::AccountAddress; +//use starcoin_vm_types::state_store::table::{TableHandle, TableInfo}; use std::path::Path; #[test] diff --git a/storage/src/upgrade.rs b/storage/src/upgrade.rs index b8fcd18b43..b29c1d2545 100644 --- a/storage/src/upgrade.rs +++ b/storage/src/upgrade.rs @@ -29,6 +29,12 @@ pub static BARNARD_HARD_FORK_HASH: Lazy = Lazy::new(|| { ) .expect("") }); +pub static DEV_FLEXIDAG_FORK_HEIGHT: BlockNumber = 4; +pub static TEST_FLEXIDAG_FORK_HEIGHT: BlockNumber = 4; +pub static PROXIMA_FLEXIDAG_FORK_HEIGHT: BlockNumber = 4; +pub static HALLEY_FLEXIDAG_FORK_HEIGHT: BlockNumber = 4; +pub static BARNARD_FLEXIDAG_FORK_HEIGHT: BlockNumber = 4; +pub static MAIN_FLEXIDAG_FORK_HEIGHT: BlockNumber = 4; impl DBUpgrade { pub fn check_upgrade(instance: &mut StorageInstance) -> Result<()> { @@ -163,6 +169,11 @@ impl DBUpgrade { Ok(()) } + fn db_upgrade_v3_v4(_instance: &mut StorageInstance) -> Result<()> { + // https://github.com/facebook/rocksdb/issues/1295 + Ok(()) + } + pub fn do_upgrade( version_in_db: StorageVersion, version_in_code: StorageVersion, @@ -185,6 +196,12 @@ impl DBUpgrade { (StorageVersion::V2, StorageVersion::V3) => { Self::db_upgrade_v2_v3(instance)?; } + (StorageVersion::V3, StorageVersion::V4) + | (StorageVersion::V1, StorageVersion::V4) + | (StorageVersion::V2, StorageVersion::V4) => { + // just for testing. todo + Self::db_upgrade_v3_v4(instance)?; + } _ => bail!( "Can not upgrade db from {:?} to {:?}", version_in_db, diff --git a/sync/Cargo.toml b/sync/Cargo.toml index fdff574ab8..7210d528ca 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -42,6 +42,9 @@ stest = { workspace = true } stream-task = { workspace = true } sysinfo = { workspace = true } thiserror = { workspace = true } +starcoin-consensus = { workspace = true } +timeout-join-handler = { workspace = true } +starcoin-flexidag = { workspace = true } [dev-dependencies] hex = { workspace = true } @@ -57,6 +60,7 @@ starcoin-txpool-mock-service = { workspace = true } starcoin-executor = { workspace = true } test-helper = { workspace = true } tokio = { features = ["full"], workspace = true } +starcoin-genesis = { workspace = true } [package] authors = { workspace = true } diff --git a/sync/api/src/lib.rs b/sync/api/src/lib.rs index 284bbce588..60f4c869b2 100644 --- a/sync/api/src/lib.rs +++ b/sync/api/src/lib.rs @@ -1,6 +1,9 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 +use std::any; +use std::sync::Arc; + use anyhow::Result; use network_api::PeerId; use network_api::PeerStrategy; @@ -9,6 +12,7 @@ use serde::{Deserialize, Serialize}; pub use service::{SyncAsyncService, SyncServiceHandler}; use starcoin_crypto::HashValue; use starcoin_service_registry::ServiceRequest; +use starcoin_types::block::ExecutedBlock; use starcoin_types::block::{Block, BlockIdAndNumber, BlockInfo, BlockNumber}; use starcoin_types::sync_status::SyncStatus; use starcoin_types::U256; diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index d35d9e4757..7890f58fd9 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -1,40 +1,59 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 +#[cfg(test)] +use super::CheckBlockConnectorHashValue; +use crate::block_connector::write_block_chain::ConnectOk; use crate::block_connector::{ExecuteRequest, ResetRequest, WriteBlockChainService}; use crate::sync::{CheckSyncEvent, SyncService}; -use crate::tasks::{BlockConnectedEvent, BlockDiskCheckEvent}; -use anyhow::{format_err, Result}; +use crate::tasks::{BlockConnectedEvent, BlockConnectedFinishEvent, BlockDiskCheckEvent}; +#[cfg(test)] +use anyhow::bail; +use anyhow::{format_err, Ok, Result}; use network_api::PeerProvider; -use starcoin_chain_api::{ConnectBlockError, WriteableChainService}; +use starcoin_chain_api::{ChainReader, ConnectBlockError, WriteableChainService}; use starcoin_config::{NodeConfig, G_CRATE_VERSION}; +use starcoin_consensus::dag::blockdag::InitDagState; +use starcoin_consensus::BlockDAG; +use starcoin_crypto::HashValue; use starcoin_executor::VMMetrics; +use starcoin_flexidag::FlexidagService; use starcoin_logger::prelude::*; use starcoin_network::NetworkServiceRef; use starcoin_service_registry::{ ActorService, EventHandler, ServiceContext, ServiceFactory, ServiceHandler, }; -use starcoin_storage::{BlockStore, Storage}; +use starcoin_storage::{flexi_dag, BlockStore, Storage}; use starcoin_sync_api::PeerNewBlock; use starcoin_txpool::TxPoolService; +use starcoin_txpool_api::TxPoolSyncService; +#[cfg(test)] +use starcoin_txpool_mock_service::MockTxPoolService; use starcoin_types::block::ExecutedBlock; use starcoin_types::sync_status::SyncStatus; use starcoin_types::system_events::{MinedBlock, SyncStatusChangeEvent, SystemShutdown}; -use std::sync::Arc; +use std::result; +use std::sync::{Arc, Mutex}; use sysinfo::{DiskExt, System, SystemExt}; const DISK_CHECKPOINT_FOR_PANIC: u64 = 1024 * 1024 * 1024 * 3; const DISK_CHECKPOINT_FOR_WARN: u64 = 1024 * 1024 * 1024 * 5; -pub struct BlockConnectorService { - chain_service: WriteBlockChainService, +pub struct BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ + chain_service: WriteBlockChainService, sync_status: Option, config: Arc, } -impl BlockConnectorService { +impl BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ pub fn new( - chain_service: WriteBlockChainService, + chain_service: WriteBlockChainService, config: Arc, ) -> Self { Self { @@ -51,6 +70,10 @@ impl BlockConnectorService { } } + pub fn chain_head_id(&self) -> HashValue { + self.chain_service.get_main().status().head.id() + } + pub fn check_disk_space(&mut self) -> Option> { if System::IS_SUPPORTED { let mut sys = System::new_all(); @@ -97,11 +120,17 @@ impl BlockConnectorService { } } -impl ServiceFactory for BlockConnectorService { - fn create(ctx: &mut ServiceContext) -> Result { +impl ServiceFactory + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ + fn create( + ctx: &mut ServiceContext>, + ) -> Result> { let config = ctx.get_shared::>()?; let bus = ctx.bus_ref().clone(); - let txpool = ctx.get_shared::()?; + let txpool = ctx.get_shared::()?; let storage = ctx.get_shared::>()?; let startup_info = storage .get_startup_info()? @@ -114,13 +143,17 @@ impl ServiceFactory for BlockConnectorService { txpool, bus, vm_metrics, + ctx.service_ref::()?.clone(), )?; Ok(Self::new(chain_service, config)) } } -impl ActorService for BlockConnectorService { +impl ActorService for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn started(&mut self, ctx: &mut ServiceContext) -> Result<()> { //TODO figure out a more suitable value. ctx.set_mailbox_capacity(1024); @@ -141,15 +174,19 @@ impl ActorService for BlockConnectorService { } } -impl EventHandler for BlockConnectorService { +impl EventHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle_event( &mut self, _: BlockDiskCheckEvent, - ctx: &mut ServiceContext, + ctx: &mut ServiceContext>, ) { if let Some(res) = self.check_disk_space() { match res { - Ok(available_space) => { + std::result::Result::Ok(available_space) => { warn!("Available diskspace only {}/GB left ", available_space) } Err(e) => { @@ -161,30 +198,83 @@ impl EventHandler for BlockConnectorService { } } -impl EventHandler for BlockConnectorService { +impl EventHandler for BlockConnectorService { + fn handle_event( + &mut self, + msg: BlockConnectedEvent, + ctx: &mut ServiceContext>, + ) { + //because this block has execute at sync task, so just try connect to select head chain. + //TODO refactor connect and execute + + let block = msg.block; + let feedback = msg.feedback; + + match msg.action { + crate::tasks::BlockConnectAction::ConnectNewBlock => { + if let Err(e) = self.chain_service.try_connect(block) { + error!("Process connected new block from sync error: {:?}", e); + } + } + crate::tasks::BlockConnectAction::ConnectExecutedBlock => { + if let Err(e) = self.chain_service.switch_new_main(block.header().id(), ctx) { + error!("Process connected executed block from sync error: {:?}", e); + } + } + } + + feedback.map(|f| f.unbounded_send(BlockConnectedFinishEvent)); + } +} + +#[cfg(test)] +impl EventHandler for BlockConnectorService { fn handle_event( &mut self, msg: BlockConnectedEvent, - _ctx: &mut ServiceContext, + ctx: &mut ServiceContext>, ) { //because this block has execute at sync task, so just try connect to select head chain. //TODO refactor connect and execute let block = msg.block; - if let Err(e) = self.chain_service.try_connect(block) { - error!("Process connected block error: {:?}", e); + let feedback = msg.feedback; + + match msg.action { + crate::tasks::BlockConnectAction::ConnectNewBlock => { + if let Err(e) = self.chain_service.apply_failed(block) { + error!("Process connected new block from sync error: {:?}", e); + } + } + crate::tasks::BlockConnectAction::ConnectExecutedBlock => { + if let Err(e) = self.chain_service.switch_new_main(block.header().id(), ctx) { + error!("Process connected executed block from sync error: {:?}", e); + } + } } + + feedback.map(|f| f.unbounded_send(BlockConnectedFinishEvent)); } } -impl EventHandler for BlockConnectorService { - fn handle_event(&mut self, msg: MinedBlock, _ctx: &mut ServiceContext) { - let MinedBlock(new_block) = msg; +impl EventHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ + fn handle_event(&mut self, msg: MinedBlock, ctx: &mut ServiceContext) { + let MinedBlock(new_block, tips_headers) = msg; + let block_header = new_block.header().clone(); let id = new_block.header().id(); debug!("try connect mined block: {}", id); - match self.chain_service.try_connect(new_block.as_ref().clone()) { - Ok(_) => debug!("Process mined block {} success.", id), + match self.chain_service.try_connect(block) { + std::result::Result::Ok(ConnectOk::DagConnected) => { + match self.chain_service.dump_tips(block_header) { + std::result::Result::Ok(_) => (), + Err(e) => error!("failed to dump tips to dag accumulator: {}", e), + } + } Err(e) => { warn!("Process mined block {} fail, error: {:?}", id, e); } @@ -192,13 +282,21 @@ impl EventHandler for BlockConnectorService { } } -impl EventHandler for BlockConnectorService { +impl EventHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle_event(&mut self, msg: SyncStatusChangeEvent, _ctx: &mut ServiceContext) { self.sync_status = Some(msg.0); } } -impl EventHandler for BlockConnectorService { +impl EventHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle_event(&mut self, msg: PeerNewBlock, ctx: &mut ServiceContext) { if !self.is_synced() { debug!("[connector] Ignore PeerNewBlock event because the node has not been synchronized yet."); @@ -207,11 +305,15 @@ impl EventHandler for BlockConnectorService { let peer_id = msg.get_peer_id(); if let Err(e) = self.chain_service.try_connect(msg.get_block().clone()) { match e.downcast::() { - Ok(connect_error) => { + std::result::Result::Ok(connect_error) => { match connect_error { ConnectBlockError::FutureBlock(block) => { + self.chain_service + .update_tips(msg.get_block().header().clone())?; //TODO cache future block - if let Ok(sync_service) = ctx.service_ref::() { + if let std::result::Result::Ok(sync_service) = + ctx.service_ref::() + { info!( "BlockConnector try connect future block ({:?},{}), peer_id:{:?}, notify Sync service check sync.", block.id(), @@ -257,22 +359,52 @@ impl EventHandler for BlockConnectorService { } } -impl ServiceHandler for BlockConnectorService { +impl ServiceHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle( &mut self, msg: ResetRequest, - _ctx: &mut ServiceContext, + _ctx: &mut ServiceContext>, ) -> Result<()> { - self.chain_service.reset(msg.block_hash) + self.chain_service + .reset(msg.block_hash, msg.dag_block_parent) } } -impl ServiceHandler for BlockConnectorService { +impl ServiceHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ fn handle( &mut self, msg: ExecuteRequest, - _ctx: &mut ServiceContext, + _ctx: &mut ServiceContext>, ) -> Result { - self.chain_service.execute(msg.block) + self.chain_service.execute(msg.block, msg.block_parent) + } +} + +#[cfg(test)] +impl ServiceHandler + for BlockConnectorService +where + TransactionPoolServiceT: TxPoolSyncService + 'static, +{ + fn handle( + &mut self, + msg: CheckBlockConnectorHashValue, + _ctx: &mut ServiceContext>, + ) -> Result<()> { + if self.chain_service.get_main().status().head().id() == msg.head_hash { + info!("the branch in chain service is the same as target's branch"); + Ok(()) + } else { + info!("mock branch in chain service is not the same as target's branch"); + bail!("blockchain in chain service is not the same as target!"); + } } } diff --git a/sync/src/block_connector/mod.rs b/sync/src/block_connector/mod.rs index 05b7cfd2b2..a567f76c35 100644 --- a/sync/src/block_connector/mod.rs +++ b/sync/src/block_connector/mod.rs @@ -11,6 +11,8 @@ mod metrics; mod test_illegal_block; #[cfg(test)] mod test_write_block_chain; +#[cfg(test)] +mod test_write_dag_block_chain; mod write_block_chain; pub use block_connector_service::BlockConnectorService; @@ -26,6 +28,7 @@ pub use test_write_block_chain::new_block; #[derive(Debug, Clone)] pub struct ResetRequest { pub block_hash: HashValue, + pub dag_block_parent: Option>, } impl ServiceRequest for ResetRequest { @@ -40,3 +43,14 @@ pub struct ExecuteRequest { impl ServiceRequest for ExecuteRequest { type Response = anyhow::Result; } + +#[cfg(test)] +#[derive(Debug, Clone)] +pub struct CheckBlockConnectorHashValue { + pub head_hash: HashValue, +} + +#[cfg(test)] +impl ServiceRequest for CheckBlockConnectorHashValue { + type Response = anyhow::Result<()>; +} diff --git a/sync/src/block_connector/test_illegal_block.rs b/sync/src/block_connector/test_illegal_block.rs index ec2b662895..84f642c0ef 100644 --- a/sync/src/block_connector/test_illegal_block.rs +++ b/sync/src/block_connector/test_illegal_block.rs @@ -50,7 +50,8 @@ async fn new_block_and_main() -> (Block, BlockChain) { .get_main() .current_header() .id(); - let main = BlockChain::new(net.time_service(), head_id, storage, None).unwrap(); + let main = + BlockChain::new(net.time_service(), head_id, storage, net.id().clone(), None).unwrap(); let new_block = new_block( None, &mut writeable_block_chain_service, @@ -87,7 +88,14 @@ async fn uncle_block_and_writeable_block_chain( .unwrap() .id(); - let new_branch = BlockChain::new(net.time_service(), tmp_head, storage.clone(), None).unwrap(); + let new_branch = BlockChain::new( + net.time_service(), + tmp_head, + storage.clone(), + net.id().clone(), + None, + ) + .unwrap(); let (block_template, _) = new_branch .create_block_template(*miner_account.address(), None, Vec::new(), vec![], None) .unwrap(); @@ -122,8 +130,8 @@ fn apply_with_illegal_uncle( .get_main() .current_header() .id(); - let mut main = BlockChain::new(net.time_service(), head_id, storage, None)?; - main.apply(new_block.clone())?; + let mut main = BlockChain::new(net.time_service(), head_id, storage, net.id().clone(), None)?; + main.apply(new_block.clone(), None)?; Ok(new_block) } @@ -160,7 +168,7 @@ async fn test_verify_gas_limit(succ: bool) -> Result<()> { .with_gas_used(u64::MAX) .build(); } - main.apply(new_block)?; + main.apply(new_block, None)?; Ok(()) } @@ -183,7 +191,7 @@ async fn test_verify_body_hash(succ: bool) -> Result<()> { .with_body_hash(HashValue::random()) .build(); } - main.apply(new_block)?; + main.apply(new_block, None)?; Ok(()) } @@ -206,7 +214,7 @@ async fn test_verify_parent_id(succ: bool) -> Result<()> { .with_parent_hash(HashValue::random()) .build(); } - main.apply(new_block)?; + main.apply(new_block, None)?; Ok(()) } @@ -234,7 +242,7 @@ async fn test_verify_timestamp(succ: bool) -> Result<()> { .with_timestamp(main.current_header().timestamp()) .build(); } - main.apply(new_block)?; + main.apply(new_block, None)?; Ok(()) } @@ -263,7 +271,7 @@ async fn test_verify_future_timestamp(succ: bool) -> Result<()> { ) .build(); } - main.apply(new_block)?; + main.apply(new_block, None)?; Ok(()) } @@ -360,8 +368,14 @@ async fn test_verify_can_not_be_uncle_check_ancestor_failed() { .unwrap() .unwrap() .id(); - let mut new_branch = - BlockChain::new(net.time_service(), tmp_head, storage.clone(), None).unwrap(); + let mut new_branch = BlockChain::new( + net.time_service(), + tmp_head, + storage.clone(), + net.id().clone(), + None, + ) + .unwrap(); for _i in 0..2 { let (block_template, _) = new_branch @@ -371,7 +385,7 @@ async fn test_verify_can_not_be_uncle_check_ancestor_failed() { .consensus() .create_block(block_template, net.time_service().as_ref()) .unwrap(); - new_branch.apply(new_block).unwrap(); + new_branch.apply(new_block, None, &mut None).unwrap(); } // 3. new block @@ -468,7 +482,7 @@ async fn test_verify_illegal_uncle_consensus(succ: bool) -> Result<()> { .create_block(block_template, net.time_service().as_ref()) .unwrap(); - main_block_chain.apply(new_block)?; + main_block_chain.apply(new_block, None, &mut None)?; Ok(()) } @@ -491,7 +505,7 @@ async fn test_verify_state_root(succ: bool) -> Result<()> { .with_state_root(HashValue::random()) .build(); } - main.apply(new_block)?; + main.apply(new_block, None)?; Ok(()) } @@ -510,7 +524,7 @@ async fn test_verify_block_used_gas(succ: bool) -> Result<()> { if !succ { new_block.header = new_block.header().as_builder().with_gas_used(1).build(); } - main.apply(new_block)?; + main.apply(new_block, None)?; Ok(()) } @@ -532,7 +546,7 @@ async fn test_verify_txn_count_failed() { let mut body = new_block.body.clone(); body.transactions = txns; new_block.body = body; - let apply_failed = main.apply(new_block); + let apply_failed = main.apply(new_block, None, &mut None); assert!(apply_failed.is_err()); if let Err(apply_err) = apply_failed { error!("apply failed : {:?}", apply_err); @@ -548,7 +562,7 @@ async fn test_verify_accumulator_root(succ: bool) -> Result<()> { .with_accumulator_root(HashValue::random()) .build(); } - main.apply(new_block)?; + main.apply(new_block, None)?; Ok(()) } @@ -571,7 +585,7 @@ async fn test_verify_block_accumulator_root(succ: bool) -> Result<()> { .with_parent_block_accumulator_root(HashValue::random()) .build(); } - main.apply(new_block)?; + main.apply(new_block, None)?; Ok(()) } @@ -602,7 +616,7 @@ async fn test_verify_block_number_failed(succ: bool, order: bool) { .build(); } } - let apply_failed = main.apply(new_block); + let apply_failed = main.apply(new_block, None); if !succ { assert!(apply_failed.is_err()); if let Err(apply_err) = apply_failed { diff --git a/sync/src/block_connector/test_write_block_chain.rs b/sync/src/block_connector/test_write_block_chain.rs index c94ebe91b9..73b78a3dfa 100644 --- a/sync/src/block_connector/test_write_block_chain.rs +++ b/sync/src/block_connector/test_write_block_chain.rs @@ -7,6 +7,8 @@ use starcoin_chain::{BlockChain, ChainReader}; use starcoin_chain_service::WriteableChainService; use starcoin_config::NodeConfig; use starcoin_consensus::Consensus; +use starcoin_consensus::{BlockDAG, Consensus, FlexiDagStorage, FlexiDagStorageConfig}; +use starcoin_crypto::HashValue; use starcoin_genesis::Genesis as StarcoinGenesis; use starcoin_service_registry::bus::BusService; use starcoin_service_registry::{RegistryAsyncService, RegistryService}; @@ -14,8 +16,10 @@ use starcoin_storage::Store; use starcoin_time_service::TimeService; use starcoin_txpool_mock_service::MockTxPoolService; use starcoin_types::block::Block; +use starcoin_types::blockhash::ORIGIN; +use starcoin_types::header::Header; use starcoin_types::startup_info::StartupInfo; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; pub async fn create_writeable_block_chain() -> ( WriteBlockChainService, @@ -25,11 +29,34 @@ pub async fn create_writeable_block_chain() -> ( let node_config = NodeConfig::random_for_test(); let node_config = Arc::new(node_config); - let (storage, chain_info, _) = StarcoinGenesis::init_storage_for_test(node_config.net()) + let (storage, chain_info, genesis) = StarcoinGenesis::init_storage_for_test(node_config.net()) .expect("init storage by genesis fail."); let registry = RegistryService::launch(); let bus = registry.service_ref::().await.unwrap(); let txpool_service = MockTxPoolService::new(); + + genesis.save(node_config.data_dir()).unwrap(); + + let (chain_info, genesis) = StarcoinGenesis::init_and_check_storage( + node_config.net(), + storage.clone(), + node_config.data_dir(), + ) + .expect("init chain and genesis error"); + + let flex_dag_config = FlexiDagStorageConfig::create_with_params(1, 0, 1024); + let flex_dag_db = FlexiDagStorage::create_from_path("./smolstc", flex_dag_config) + .expect("Failed to create flexidag storage"); + + let dag = BlockDAG::new( + Header::new( + genesis.block().header().clone(), + vec![HashValue::new(ORIGIN)], + ), + 3, + flex_dag_db, + ); + ( WriteBlockChainService::new( node_config.clone(), @@ -38,6 +65,7 @@ pub async fn create_writeable_block_chain() -> ( txpool_service, bus, None, + Some(Arc::new(Mutex::new(dag))), ) .unwrap(), node_config, @@ -58,7 +86,8 @@ pub fn gen_blocks( writeable_block_chain_service, time_service, ); - writeable_block_chain_service.try_connect(block).unwrap(); + let e = writeable_block_chain_service.try_connect(block); + println!("try_connect result: {:?}", e) } } } @@ -79,7 +108,7 @@ pub fn new_block( .unwrap(); block_chain .consensus() - .create_block(block_template, time_service) + .create_single_chain_block(block_template, time_service) .unwrap() } @@ -100,6 +129,7 @@ async fn test_block_chain_apply() { .number(), times ); + println!("finish test_block_chain_apply"); } fn gen_fork_block_chain( @@ -121,6 +151,7 @@ fn gen_fork_block_chain( net.time_service(), parent_id, writeable_block_chain_service.get_main().get_storage(), + net.id().clone(), None, ) .unwrap(); @@ -129,7 +160,7 @@ fn gen_fork_block_chain( .unwrap(); let block = block_chain .consensus() - .create_block(block_template, net.time_service().as_ref()) + .create_single_chain_block(block_template, net.time_service().as_ref()) .unwrap(); parent_id = block.id(); @@ -227,7 +258,7 @@ async fn test_block_chain_reset() -> anyhow::Result<()> { .get_main() .get_block_by_number(3)? .unwrap(); - writeable_block_chain_service.reset(block.id())?; + writeable_block_chain_service.reset(block.id(), None)?; assert_eq!( writeable_block_chain_service .get_main() diff --git a/sync/src/block_connector/test_write_dag_block_chain.rs b/sync/src/block_connector/test_write_dag_block_chain.rs new file mode 100644 index 0000000000..20d3479214 --- /dev/null +++ b/sync/src/block_connector/test_write_dag_block_chain.rs @@ -0,0 +1,212 @@ +// Copyright (c) The Starcoin Core Contributors +// SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::integer_arithmetic)] +use crate::block_connector::test_write_block_chain::create_writeable_block_chain; +use crate::block_connector::WriteBlockChainService; +use starcoin_account_api::AccountInfo; +use starcoin_chain::{BlockChain, ChainReader}; +use starcoin_chain_service::WriteableChainService; +use starcoin_config::NodeConfig; +use starcoin_consensus::{BlockDAG, Consensus, FlexiDagStorage, FlexiDagStorageConfig}; +use starcoin_crypto::HashValue; +use starcoin_genesis::Genesis as StarcoinGenesis; +use starcoin_service_registry::bus::BusService; +use starcoin_service_registry::{RegistryAsyncService, RegistryService}; +use starcoin_storage::Store; +use starcoin_time_service::TimeService; +use starcoin_txpool_mock_service::MockTxPoolService; +use starcoin_types::block::Block; +use starcoin_types::blockhash::ORIGIN; +use starcoin_types::header::Header; +use starcoin_types::startup_info::StartupInfo; +use std::sync::{Arc, Mutex}; + +pub fn gen_dag_blocks( + times: u64, + writeable_block_chain_service: &mut WriteBlockChainService, + time_service: &dyn TimeService, +) -> Option { + let miner_account = AccountInfo::random(); + let mut last_block_hash = None; + if times > 0 { + for i in 0..times { + let block = new_dag_block( + Some(&miner_account), + writeable_block_chain_service, + time_service, + ); + last_block_hash = Some(block.id()); + let e = writeable_block_chain_service.try_connect(block); + println!("try_connect result: {:?}", e); + assert!(e.is_ok()); + if (i + 1) % 3 == 0 { + writeable_block_chain_service.time_sleep(5); + } + } + } + + let result = writeable_block_chain_service.execute_dag_block_pool(); + let result = result.unwrap(); + match result { + super::write_block_chain::ConnectOk::Duplicate(block) + | super::write_block_chain::ConnectOk::ExeConnectMain(block) + | super::write_block_chain::ConnectOk::ExeConnectBranch(block) + | super::write_block_chain::ConnectOk::Connect(block) => Some(block.header().id()), + super::write_block_chain::ConnectOk::DagConnected + | super::write_block_chain::ConnectOk::MainDuplicate + | super::write_block_chain::ConnectOk::DagPending + | super::write_block_chain::ConnectOk::DagConnectMissingBlock => { + unreachable!("should not reach here, result: {:?}", result); + } + } +} + +pub fn new_dag_block( + miner_account: Option<&AccountInfo>, + writeable_block_chain_service: &mut WriteBlockChainService, + time_service: &dyn TimeService, +) -> Block { + let miner = match miner_account { + Some(m) => m.clone(), + None => AccountInfo::random(), + }; + let miner_address = *miner.address(); + let block_chain = writeable_block_chain_service.get_main(); + let (block_template, _) = block_chain + .create_block_template(miner_address, None, Vec::new(), vec![], None) + .unwrap(); + block_chain + .consensus() + .create_block(block_template, time_service) + .unwrap() +} + +#[stest::test] +async fn test_dag_block_chain_apply() { + let times = 12; + let (mut writeable_block_chain_service, node_config, _) = create_writeable_block_chain().await; + let net = node_config.net(); + let last_header_id = gen_dag_blocks( + times, + &mut writeable_block_chain_service, + net.time_service().as_ref(), + ); + assert_eq!( + writeable_block_chain_service + .get_main() + .current_header() + .id(), + last_header_id.unwrap() + ); + println!("finish test_block_chain_apply"); +} + +fn gen_fork_dag_block_chain( + fork_number: u64, + node_config: Arc, + times: u64, + writeable_block_chain_service: &mut WriteBlockChainService, +) -> Option { + let miner_account = AccountInfo::random(); + if let Some(block_header) = writeable_block_chain_service + .get_main() + .get_header_by_number(fork_number) + .unwrap() + { + let mut parent_id = block_header.id(); + let net = node_config.net(); + for _i in 0..times { + let block_chain = BlockChain::new( + net.time_service(), + parent_id, + writeable_block_chain_service.get_main().get_storage(), + net.id().clone(), + None, + ) + .unwrap(); + let (block_template, _) = block_chain + .create_block_template(*miner_account.address(), None, Vec::new(), vec![], None) + .unwrap(); + let block = block_chain + .consensus() + .create_block(block_template, net.time_service().as_ref()) + .unwrap(); + parent_id = block.id(); + + writeable_block_chain_service.try_connect(block).unwrap(); + } + return Some(parent_id); + } + return None; +} + +#[stest::test(timeout = 120)] +async fn test_block_chain_switch_main() { + let times = 12; + let (mut writeable_block_chain_service, node_config, _) = create_writeable_block_chain().await; + let net = node_config.net(); + let mut last_block = gen_dag_blocks( + times, + &mut writeable_block_chain_service, + net.time_service().as_ref(), + ); + assert_eq!( + writeable_block_chain_service + .get_main() + .current_header() + .id(), + last_block.unwrap() + ); + + last_block = gen_fork_dag_block_chain( + 0, + node_config, + 2 * times, + &mut writeable_block_chain_service, + ); + + assert_eq!( + writeable_block_chain_service + .get_main() + .current_header() + .id(), + last_block.unwrap() + ); +} + +#[stest::test] +async fn test_block_chain_reset() -> anyhow::Result<()> { + let times = 10; + let (mut writeable_block_chain_service, node_config, _) = create_writeable_block_chain().await; + let net = node_config.net(); + let mut last_block = gen_dag_blocks( + times, + &mut writeable_block_chain_service, + net.time_service().as_ref(), + ); + assert_eq!( + writeable_block_chain_service + .get_main() + .current_header() + .id(), + last_block.unwrap() + ); + let block = writeable_block_chain_service + .get_main() + .get_block_by_number(3)? + .unwrap(); + writeable_block_chain_service.reset(block.id(), None)?; + assert_eq!( + writeable_block_chain_service + .get_main() + .current_header() + .number(), + 3 + ); + + assert!(writeable_block_chain_service + .get_main() + .get_block_by_number(2)? + .is_some()); + Ok(()) +} diff --git a/sync/src/block_connector/write_block_chain.rs b/sync/src/block_connector/write_block_chain.rs index c22ff42408..0095cde74c 100644 --- a/sync/src/block_connector/write_block_chain.rs +++ b/sync/src/block_connector/write_block_chain.rs @@ -2,25 +2,43 @@ // SPDX-License-Identifier: Apache-2.0 use crate::block_connector::metrics::ChainMetrics; -use anyhow::{format_err, Result}; +use crate::tasks::BlockDiskCheckEvent; +use anyhow::{bail, format_err, Ok, Result}; +use async_std::stream::StreamExt; +use futures::executor::block_on; use starcoin_chain::BlockChain; use starcoin_chain_api::{ChainReader, ChainWriter, ConnectBlockError, WriteableChainService}; use starcoin_config::NodeConfig; +use starcoin_consensus::dag::blockdag::InitDagState; +use starcoin_consensus::dag::ghostdag; +use starcoin_consensus::dag::ghostdag::protocol::ColoringOutput; +use starcoin_consensus::dag::types::ghostdata::GhostdagData; +use starcoin_consensus::{BlockDAG, FlexiDagStorage, FlexiDagStorageConfig}; use starcoin_crypto::HashValue; use starcoin_executor::VMMetrics; +use starcoin_flexidag::flexidag_service::{AddToDag, DumpTipsToAccumulator, UpdateDagTips}; +use starcoin_flexidag::FlexidagService; use starcoin_logger::prelude::*; use starcoin_service_registry::bus::{Bus, BusService}; -use starcoin_service_registry::ServiceRef; +use starcoin_service_registry::{ServiceContext, ServiceRef}; +use starcoin_storage::flexi_dag::KTotalDifficulty; +use starcoin_storage::storage::CodecKVStore; use starcoin_storage::Store; +use starcoin_time_service::{DagBlockTimeWindowService, TimeWindowResult}; use starcoin_txpool_api::TxPoolSyncService; use starcoin_types::block::BlockInfo; +use starcoin_types::blockhash::BlockHashMap; +use starcoin_types::dag_block::KTotalDifficulty; +use starcoin_types::header::DagHeader; use starcoin_types::{ block::{Block, BlockHeader, ExecutedBlock}, startup_info::StartupInfo, system_events::{NewBranch, NewHeadBlock}, }; use std::fmt::Formatter; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; + +use super::BlockConnectorService; const MAX_ROLL_BACK_BLOCK: usize = 10; @@ -36,26 +54,55 @@ where bus: ServiceRef, metrics: Option, vm_metrics: Option, + dag_block_pool: Arc)>>>, + flexidag_service: ServiceRef, } -#[derive(Copy, Clone, Debug)] +#[derive(Clone, Debug)] pub enum ConnectOk { - Duplicate, + Duplicate(ExecutedBlock), //Execute block and connect to main - ExeConnectMain, + ExeConnectMain(ExecutedBlock), //Execute block and connect to branch. - ExeConnectBranch, + ExeConnectBranch(ExecutedBlock), + //Block has executed, just connect. + Connect(ExecutedBlock), + //Block has executed, just connect. - Connect, + DagConnected, + // the retry block + MainDuplicate, + // the dag block waiting for the time window end + DagPending, + DagConnectMissingBlock, +} + +impl ConnectOk { + pub fn block(&self) -> Option { + match self { + ConnectOk::Duplicate(block) => Some(block.clone()), + ConnectOk::ExeConnectMain(block) => Some(block.clone()), + ConnectOk::ExeConnectBranch(block) => Some(block.clone()), + ConnectOk::Connect(block) => Some(block.clone()), + ConnectOk::DagConnected + | ConnectOk::MainDuplicate + | ConnectOk::DagPending + | ConnectOk::DagConnectMissingBlock => None, + } + } } impl std::fmt::Display for ConnectOk { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let s = match self { - ConnectOk::Duplicate => "Duplicate", - ConnectOk::ExeConnectMain => "ExeConnectMain", - ConnectOk::ExeConnectBranch => "ExeConnectBranch", - ConnectOk::Connect => "Connect", + ConnectOk::Duplicate(_) => "Duplicate", + ConnectOk::ExeConnectMain(_) => "ExeConnectMain", + ConnectOk::ExeConnectBranch(_) => "ExeConnectBranch", + ConnectOk::Connect(_) => "Connect", + ConnectOk::DagConnected => "DagConnect", + ConnectOk::MainDuplicate => "MainDuplicate", + ConnectOk::DagPending => "DagPending", + ConnectOk::DagConnectMissingBlock => "DagConnectMissingBlock", }; write!(f, "{}", s) } @@ -71,11 +118,19 @@ where .as_ref() .map(|metrics| metrics.chain_block_connect_time.start_timer()); - let result = self.connect_inner(block); + let result = if block.header().parents_hash().is_some() { + assert!( + transaction_parent.is_some(), + "in dag branch, the transaction parent should not be none" + ); + self.connect_dag_inner(block) + } else { + self.connect_inner(block) + }; if let Some(metrics) = self.metrics.as_ref() { let result = match result.as_ref() { - Ok(connect) => format!("Ok_{}", connect), + std::result::Result::Ok(connect) => format!("Ok_{}", connect), Err(err) => { if let Some(connect_err) = err.downcast_ref::() { format!("Err_{}", connect_err.reason()) @@ -93,23 +148,25 @@ where } } -impl

WriteBlockChainService

+impl WriteBlockChainService where - P: TxPoolSyncService + 'static, + TransactionPoolServiceT: TxPoolSyncService + 'static, { pub fn new( config: Arc, startup_info: StartupInfo, storage: Arc, - txpool: P, + txpool: TransactionPoolServiceT, bus: ServiceRef, vm_metrics: Option, + flexidag_service: ServiceRef, ) -> Result { let net = config.net(); let main = BlockChain::new( net.time_service(), startup_info.main, storage.clone(), + config.net().id().clone(), vm_metrics.clone(), )?; let metrics = config @@ -126,12 +183,16 @@ where bus, metrics, vm_metrics, + dag_block_pool: Arc::new(Mutex::new(vec![])), + flexidag_service, }) } fn find_or_fork( &self, header: &BlockHeader, + dag_block_next_parent: Option, + dag_block_parents: Option>, ) -> Result<(Option, Option)> { let block_id = header.id(); let block_info = self.storage.get_block_info(block_id)?; @@ -144,15 +205,17 @@ where net.time_service(), block_id, self.storage.clone(), + net.id().clone(), self.vm_metrics.clone(), )?) } - } else if self.block_exist(header.parent_hash())? { + } else if self.block_exist(header.parent_hash())? || self.blocks_exist(dag_block_parents)? { let net = self.config.net(); Some(BlockChain::new( net.time_service(), - header.parent_hash(), + dag_block_next_parent.unwrap_or(header.parent_hash()), self.storage.clone(), + net.id().clone(), self.vm_metrics.clone(), )?) } else { @@ -165,10 +228,68 @@ where Ok(matches!(self.storage.get_block_info(block_id)?, Some(_))) } + fn blocks_exist(&self, block_id: Option>) -> Result { + if let Some(block_id) = block_id { + if block_id.is_empty() { + return Ok(false); + } + return Ok(matches!(self.storage.get_block_infos(block_id)?, _)); + } + return Ok(false); + } + pub fn get_main(&self) -> &BlockChain { &self.main } + #[cfg(test)] + pub fn time_sleep(&self, sec: u64) { + self.config.net().time_service().sleep(sec * 1000000); + } + + #[cfg(test)] + pub fn apply_failed(&mut self, block: Block) -> Result<()> { + use anyhow::bail; + use starcoin_chain::verifier::FullVerifier; + + // apply but no connection + let verified_block = self.main.verify_with_verifier::(block)?; + let _executed_block = self.main.execute(verified_block)?; + + bail!("failed to apply for tesing the connection later!"); + } + + // for sync task to connect to its chain, if chain's total difficulties is larger than the main + // switch by: + // 1, update the startup info + // 2, broadcast the new header + pub fn switch_new_main( + &mut self, + new_head_block: HashValue, + ctx: &mut ServiceContext>, + ) -> Result<()> + where + TransactionPoolServiceT: TxPoolSyncService, + { + let new_branch = BlockChain::new( + self.config.net().time_service(), + new_head_block, + self.storage.clone(), + self.config.net().id().clone(), + self.vm_metrics.clone(), + )?; + + let main_total_difficulty = self.main.get_total_difficulty()?; + let branch_total_difficulty = new_branch.get_total_difficulty()?; + if branch_total_difficulty > main_total_difficulty { + self.update_startup_info(new_branch.head_block().header())?; + ctx.broadcast(NewHeadBlock(Arc::new(new_branch.head_block()))); + Ok(()) + } else { + bail!("no need to switch"); + } + } + pub fn select_head(&mut self, new_branch: BlockChain) -> Result<()> { let executed_block = new_branch.head_block(); let main_total_difficulty = self.main.get_total_difficulty()?; @@ -206,8 +327,21 @@ where retracted_count: u64, retracted_blocks: Vec, ) -> Result<()> { + if enacted_blocks.is_empty() { + error!("enacted_blocks is empty."); + bail!("enacted_blocks is empty."); + } + if enacted_blocks.last().unwrap().header != executed_block.block().header { + error!("enacted_blocks.last().unwrap().header: {:?}, executed_block.block().header: {:?} are different!", + enacted_blocks.last().unwrap().header, executed_block.block().header); + bail!("enacted_blocks.last().unwrap().header: {:?}, executed_block.block().header: {:?} are different!", + enacted_blocks.last().unwrap().header, executed_block.block().header); + } debug_assert!(!enacted_blocks.is_empty()); - debug_assert_eq!(enacted_blocks.last().unwrap(), executed_block.block()); + debug_assert_eq!( + enacted_blocks.last().unwrap().header, + executed_block.block().header + ); self.update_startup_info(executed_block.block().header())?; if retracted_count > 0 { if let Some(metrics) = self.metrics.as_ref() { @@ -231,13 +365,19 @@ where .set(executed_block.block_info.txn_accumulator_info.num_leaves); } - self.broadcast_new_head(executed_block); + // self.broadcast_new_head(executed_block, dag_parents, next_tips); Ok(()) } + pub fn do_new_head_with_broadcast() {} + /// Reset the node to `block_id`, and replay blocks after the block - pub fn reset(&mut self, block_id: HashValue) -> Result<()> { + pub fn reset( + &mut self, + block_id: HashValue, + dag_block_parents: Option>, + ) -> Result<()> { let new_head_block = self .main .get_block(block_id)? @@ -246,6 +386,7 @@ where self.config.net().time_service(), block_id, self.storage.clone(), + self.config.net().id().clone(), self.vm_metrics.clone(), )?; @@ -268,21 +409,33 @@ where let (enacted_count, enacted_blocks, retracted_count, retracted_blocks) = (1, vec![executed_block.block.clone()], 0, vec![]); self.do_new_head( - executed_block, + executed_block.clone(), enacted_count, enacted_blocks, retracted_count, retracted_blocks, )?; + + let next_tips = self + .storage + .get_tips_by_block_id(executed_block.block.header().id()) + .ok(); + self.broadcast_new_head(executed_block); + Ok(()) } ///Directly execute the block and save result, do not try to connect. - pub fn execute(&mut self, block: Block) -> Result { + pub fn execute( + &mut self, + block: Block, + transaction_parent: Option, + ) -> Result { let chain = BlockChain::new( self.config.net().time_service(), block.header().parent_hash(), self.storage.clone(), + self.config.net().id().clone(), self.vm_metrics.clone(), )?; let verify_block = chain.verify(block)?; @@ -290,7 +443,10 @@ where } fn is_main_head(&self, parent_id: &HashValue) -> bool { - parent_id == &self.startup_info.main + if parent_id == &self.startup_info.main { + return true; + } + return false; } fn update_startup_info(&mut self, main_head: &BlockHeader) -> Result<()> { @@ -304,6 +460,105 @@ where } } + fn find_ancestors_from_dag_accumulator( + &self, + new_branch: &BlockChain, + ) -> Result<(u64, Vec, u64, Vec)> { + let mut min_leaf_index = std::cmp::min( + self.main.get_dag_current_leaf_number()?, + new_branch.get_dag_current_leaf_number()?, + ) - 1; + + let mut retracted = vec![]; + let mut enacted = vec![]; + + let snapshot = + new_branch.get_dag_accumulator_snapshot(new_branch.head_block().header().id())?; + let mut children = snapshot.child_hashes.clone(); + children.sort(); + for child in children { + match self.storage.get_block(child)? { + Some(block) => enacted.push(block), + None => bail!( + "the block{} dose not exist in new branch, ignore", + child.clone() + ), + } + } + enacted.reverse(); + + loop { + if min_leaf_index == 0 { + break; + } + let main_snapshot = self + .main + .get_dag_accumulator_snapshot_by_index(min_leaf_index)?; + let new_branch_snapshot = + new_branch.get_dag_accumulator_snapshot_by_index(min_leaf_index)?; + + if main_snapshot.accumulator_info.get_accumulator_root() + == new_branch_snapshot.accumulator_info.get_accumulator_root() + { + break; + } + + let mut temp_retracted = vec![]; + temp_retracted.extend( + main_snapshot + .child_hashes + .iter() + .try_fold(Vec::::new(), |mut rollback_blocks, child| { + let block = self.storage.get_block(child.clone()); + if let anyhow::Result::Ok(Some(block)) = block { + rollback_blocks.push(block); + } else { + bail!( + "the block{} dose not exist in main branch, ignore", + child.clone() + ); + } + return Ok(rollback_blocks); + })? + .into_iter(), + ); + temp_retracted.sort_by(|a, b| b.header().id().cmp(&a.header().id())); + retracted.extend(temp_retracted.into_iter()); + + let mut temp_enacted = vec![]; + temp_enacted.extend( + new_branch_snapshot + .child_hashes + .iter() + .try_fold(Vec::::new(), |mut rollback_blocks, child| { + let block = self.storage.get_block(child.clone()); + if let anyhow::Result::Ok(Some(block)) = block { + rollback_blocks.push(block); + } else { + bail!( + "the block{} dose not exist in new branch, ignore", + child.clone() + ); + } + return Ok(rollback_blocks); + })? + .into_iter(), + ); + temp_enacted.sort_by(|a, b| b.header().id().cmp(&a.header().id())); + enacted.extend(temp_enacted.into_iter()); + + min_leaf_index = min_leaf_index.saturating_sub(1); + } + enacted.reverse(); + retracted.reverse(); + Ok(( + enacted.len() as u64, + enacted, + retracted.len() as u64, + retracted, + )) + } + fn find_ancestors_from_accumulator( &self, new_branch: &BlockChain, @@ -381,7 +636,10 @@ where .inc() } - if let Err(e) = self.bus.broadcast(NewHeadBlock(Arc::new(block))) { + if let Err(e) = self + .bus + .broadcast(NewHeadBlock(Arc::new(block))) + { error!("Broadcast NewHeadBlock error: {:?}", e); } } @@ -398,57 +656,299 @@ where } } - fn connect_inner(&mut self, block: Block) -> Result { - let block_id = block.id(); - if block_id == *starcoin_storage::BARNARD_HARD_FORK_HASH - && block.header().number() == starcoin_storage::BARNARD_HARD_FORK_HEIGHT - { - debug!("barnard hard fork {}", block_id); - return Err(ConnectBlockError::BarnardHardFork(Box::new(block)).into()); - } - if self.main.current_header().id() == block_id { - debug!("Repeat connect, current header is {} already.", block_id); - return Ok(ConnectOk::Duplicate); - } - if self.main.current_header().id() == block.header().parent_hash() - && !self.block_exist(block_id)? - { - let executed_block = self.main.apply(block)?; - let enacted_blocks = vec![executed_block.block().clone()]; - self.do_new_head(executed_block, 1, enacted_blocks, 0, vec![])?; - return Ok(ConnectOk::ExeConnectMain); - } - let (block_info, fork) = self.find_or_fork(block.header())?; + fn switch_branch(&mut self, block: Block) -> Result { + let (block_info, fork) = self.find_or_fork( + block.header(), + dag_block_next_parent, + dag_block_parents.clone(), + )?; match (block_info, fork) { //block has been processed in some branch, so just trigger a head selection. - (Some(_block_info), Some(branch)) => { + (Some(block_info), Some(branch)) => { + // both are different, select one debug!( "Block {} has been processed, trigger head selection, total_difficulty: {}", - block_id, + block_info.block_id(), branch.get_total_difficulty()? ); + let exe_block = branch.head_block(); self.select_head(branch)?; - Ok(ConnectOk::Duplicate) + if let Some(new_tips) = next_tips { + new_tips.push(block_info.block_id().clone()); + } + Ok(ConnectOk::Duplicate(exe_block)) } //block has been processed, and its parent is main chain, so just connect it to main chain. (Some(block_info), None) => { + // both are identical + let block_id: HashValue = block_info.block_id().clone(); let executed_block = self.main.connect(ExecutedBlock { block: block.clone(), block_info, })?; info!( "Block {} main has been processed, trigger head selection", - block_id + block_id, ); - self.do_new_head(executed_block, 1, vec![block], 0, vec![])?; - Ok(ConnectOk::Connect) + self.do_new_head(executed_block.clone(), 1, vec![block], 0, vec![])?; + Ok(ConnectOk::Connect(executed_block)) } (None, Some(mut branch)) => { - let _executed_block = branch.apply(block)?; + // the block is not in the block, but the parent is + let result = branch.apply(block); + let executed_block = result?; self.select_head(branch)?; - Ok(ConnectOk::ExeConnectBranch) + Ok(ConnectOk::ExeConnectBranch(executed_block)) } (None, None) => Err(ConnectBlockError::FutureBlock(Box::new(block)).into()), } } + + fn connect_to_main(&mut self, block: Block) -> Result { + let block_id = block.id(); + if block_id == *starcoin_storage::BARNARD_HARD_FORK_HASH + && block.header().number() == starcoin_storage::BARNARD_HARD_FORK_HEIGHT + { + debug!("barnard hard fork {}", block_id); + return Err(ConnectBlockError::BarnardHardFork(Box::new(block)).into()); + } + if self.main.current_header().id() == block_id { + debug!("Repeat connect, current header is {} already.", block_id); + return Ok(ConnectOk::MainDuplicate); + } + + if self.main.current_header().id() == block.header().parent_hash() + && !self.block_exist(block_id)? + { + return self.apply_and_select_head(block); + } + // todo: should switch dag together + self.switch_branch(block) + } + + fn apply_and_select_head(&mut self, block: Block) -> Result { + let executed_block = self.main.apply(block)?; + let enacted_blocks = vec![executed_block.block().clone()]; + self.do_new_head(executed_block.clone(), 1, enacted_blocks, 0, vec![])?; + return Ok(ConnectOk::ExeConnectMain(executed_block)); + } + + fn add_to_dag(&mut self, header: &BlockHeader) -> Result> { + let dag = self.dag.as_mut().expect("dag must be inited before using"); + match dag + .lock() + .expect("dag must be inited before using") + .get_ghostdag_data(header.id()) + { + std::result::Result::Ok(ghost_dag_data) => Ok(ghost_dag_data), + Err(_) => std::result::Result::Ok(Arc::new( + dag.lock() + .expect("failed to lock the dag") + .add_to_dag(DagHeader::new(header.clone()))?, + )), + } + } + + fn connect_dag_inner(&mut self, block: Block) -> Result { + let add_dag_result = async_std::task::block_on(self.flexidag_service.send(AddToDag { + block_header: block.header().clone(), + }))??; + let selected_parent = self + .storage + .get_block_by_hash(add_dag_result.selected_parent)? + .expect("selected parent should in storage"); + let mut chain = self.main.fork(selected_parent.header.parent_hash())?; + let mut transaction_parent = chain.status().head().id().clone(); + for blue_hash in add_dag_result.mergeset_blues.mergeset_blues.iter() { + if let Some(blue_block) = self.storage.get_block(blue_hash.to_owned())? { + match chain.apply(blue_block) { + Ok(executed_block) => transaction_parent = executed_block, + Err(_) => warn!("failed to connect dag block: {:?}", e), + } + } else { + error!("Failed to get block {:?}", blue_hash); + return Ok(ConnectOk::DagConnectMissingBlock); + } + } + // select new head and update startup info(main but dag main) + self.select_head_for_dag(chain)?; + Ok(ConnectOk::DagConnected(KTotalDifficulty { + head_block_id: self.main.status().head().id(), + total_difficulty: self.main.status().info().get_total_difficulty(), + })) + } + + fn select_head_for_dag(&self, new_chain: BlockChain) -> Result<()> { + if new_chain.status().info.get_total_difficulty() + > self.main.status().info.get_total_difficulty() + { + let new_head_block = new_chain.head_block(); + self.update_startup_info(new_head_block.header())?; + self.main = new_chain; + self.broadcast_new_head(new_head_block); + } + + Ok(()) + } + + pub fn dump_tips(&self, block_header: BlockHeader) -> Result<()> { + if block_header.number() < self.storage.dag_fork_height(self.config.net().id().clone()) { + Ok(()) + } else { + self.flexidag_service.send(DumpTipsToAccumulator { + block_header, + current_head_block_id: self.main.status().head().id().clone(), + k_total_difficulty: KTotalDifficulty { + head_block_id: self.main.status().info().id(), + total_difficulty: self.main.status().info().get_total_difficulty(), + }, + }) + } + } + + pub fn update_tips(&self, block_header: BlockHeader) -> Result<()> { + if block_header.number() >= self.storage.dag_fork_height(self.config.net().id().clone()) { + self.flexidag_service.send(UpdateDagTips { + block_header, + current_head_block_id: self.main.status().head().id().clone(), + k_total_difficulty: KTotalDifficulty { + head_block_id: self.main.status().head().id().clone(), + total_difficulty: self.main.status().info().get_total_difficulty(), + }, + }) + } else { + Ok(()) // nothing to do + } + } + + fn connect_inner(&mut self, block: Block) -> Result { + let block_id = block.id(); + if block_id == *starcoin_storage::BARNARD_HARD_FORK_HASH + && block.header().number() == starcoin_storage::BARNARD_HARD_FORK_HEIGHT + { + debug!("barnard hard fork {}", block_id); + return Err(ConnectBlockError::BarnardHardFork(Box::new(block)).into()); + } + if self.main.current_header().id() == block_id { + debug!("Repeat connect, current header is {} already.", block_id); + return Ok(ConnectOk::MainDuplicate); + } + // normal block, just connect to main + // let mut next_tips = Some(vec![]); + let executed_block = self.connect_to_main(block)?.clone(); + if let Some(block) = executed_block.block() { + self.broadcast_new_head(block.clone()); + } + return Ok(executed_block); + } + + #[cfg(test)] + pub fn execute_dag_block_pool(&mut self) -> Result { + let mut dag_blocks = self.dag_block_pool.lock().unwrap().clone(); + self.dag_block_pool.lock().unwrap().clear(); + return self.execute_dag_block_in_pool( + dag_blocks, + self.main + .status() + .tips_hash + .expect("dag block must has current tips") + .clone(), + ); + } + + // pub fn execute_dag_block_in_pool( + // &mut self, + // mut dag_blocks: Vec<(Block, Vec)>, + // current_tips: Vec, + // ) -> Result { + // // 3, process the blocks that are got from the pool + // // sort by id + // dag_blocks.sort_by_key(|(block, _)| block.header().id()); + + // let mut dag_block_next_parent = current_tips + // .iter() + // .max() + // .expect("tips must be larger than 0") + // .clone(); + // let mut next_tips = Some(vec![]); + // let mut executed_blocks = vec![]; + // // connect the block one by one + // dag_blocks + // .into_iter() + // .try_fold((), |_, (block, dag_block_parents)| { + // let next_transaction_parent = block.header().id(); + // let result = self.connect_to_main( + // block, + // Some(dag_block_parents.clone()), + // Some(dag_block_next_parent), + // &mut next_tips, + // ); + // match result { + // std::result::Result::Ok(connect_ok) => { + // executed_blocks.push((connect_ok.block().clone(), dag_block_parents)); + // dag_block_next_parent = next_transaction_parent; + // Ok(()) + // } + // Err(error) => { + // bail!("apply_and_select_head failed, error: {}", error.to_string()) + // } + // } + // })?; + + // match next_tips { + // Some(new_tips) => { + // if new_tips.is_empty() { + // bail!("no new block has been executed successfully!"); + // } + + // let mut connected = self.main.is_head_of_dag_accumulator(new_tips.clone())?; + // if self.main.dag_parents_in_tips(new_tips.clone())? { + // // 1, write to disc + // if !connected { + // self.main.append_dag_accumulator_leaf(new_tips.clone())?; + // connected = true; + // } + // } + + // if connected { + // // 2, broadcast the blocks sorted by their id + // executed_blocks + // .iter() + // .for_each(|(exe_block, dag_block_parents)| { + // if let Some(block) = exe_block { + // self.broadcast_new_head( + // block.clone(), + // Some(dag_block_parents.clone()), + // Some(new_tips.clone()), + // ); + // } + // }); + // } + + // return executed_blocks + // .last() + // .map(|(exe_block, _)| { + // if connected { + // ConnectOk::ExeConnectMain( + // exe_block + // .as_ref() + // .expect("exe block should not be None!") + // .clone(), + // ) + // } else { + // ConnectOk::ExeConnectBranch( + // exe_block + // .as_ref() + // .expect("exe block should not be None!") + // .clone(), + // ) + // } + // }) + // .ok_or_else(|| format_err!("no block has been executed successfully!")); + // } + // None => { + // unreachable!("next tips should not be None"); + // } + // }; + // } } diff --git a/sync/src/sync.rs b/sync/src/sync.rs index dd4bb57f3c..367896773a 100644 --- a/sync/src/sync.rs +++ b/sync/src/sync.rs @@ -3,34 +3,41 @@ use crate::block_connector::BlockConnectorService; use crate::sync_metrics::SyncMetrics; -use crate::tasks::{full_sync_task, AncestorEvent, SyncFetcher}; +use crate::tasks::{full_sync_task, sync_dag_full_task, AncestorEvent, SyncFetcher}; use crate::verified_rpc_client::{RpcVerifyError, VerifiedRpcClient}; -use anyhow::{format_err, Result}; +use anyhow::{format_err, Ok, Result}; +use futures::executor::block_on; use futures::FutureExt; use futures_timer::Delay; use network_api::peer_score::PeerScoreMetrics; use network_api::{PeerId, PeerProvider, PeerSelector, PeerStrategy, ReputationChange}; +use starcoin_accumulator::node::AccumulatorStoreType; use starcoin_chain::BlockChain; -use starcoin_chain_api::ChainReader; +use starcoin_chain_api::{ChainReader, ChainWriter}; use starcoin_config::NodeConfig; +use starcoin_consensus::BlockDAG; use starcoin_executor::VMMetrics; +use starcoin_flexidag::flexidag_service::GetDagAccumulatorInfo; +use starcoin_flexidag::{flexidag_service, FlexidagService}; use starcoin_logger::prelude::*; use starcoin_network::NetworkServiceRef; use starcoin_network::PeerEvent; use starcoin_service_registry::{ - ActorService, EventHandler, ServiceContext, ServiceFactory, ServiceHandler, + ActorService, EventHandler, ServiceContext, ServiceFactory, ServiceHandler, ServiceRef, }; use starcoin_storage::block_info::BlockInfoStore; -use starcoin_storage::{BlockStore, Storage}; +use starcoin_storage::{BlockStore, Storage, Store, SyncFlexiDagStore}; use starcoin_sync_api::{ PeerScoreRequest, PeerScoreResponse, SyncCancelRequest, SyncProgressReport, SyncProgressRequest, SyncServiceHandler, SyncStartRequest, SyncStatusRequest, SyncTarget, }; +use starcoin_txpool::TxPoolService; use starcoin_types::block::BlockIdAndNumber; use starcoin_types::startup_info::ChainStatus; use starcoin_types::sync_status::SyncStatus; use starcoin_types::system_events::{NewHeadBlock, SyncStatusChangeEvent, SystemStarted}; -use std::sync::Arc; +use std::result::Result::Ok; +use std::sync::{Arc, Mutex}; use std::time::Duration; use stream_task::{TaskError, TaskEventCounterHandle, TaskHandle}; @@ -60,6 +67,7 @@ pub struct SyncService { storage: Arc, metrics: Option, peer_score_metrics: Option, + flexidag_service: ServiceRef, vm_metrics: Option, } @@ -67,6 +75,7 @@ impl SyncService { pub fn new( config: Arc, storage: Arc, + flexidag_service: ServiceRef, vm_metrics: Option, ) -> Result { let startup_info = storage @@ -88,13 +97,30 @@ impl SyncService { .metrics .registry() .and_then(|registry| PeerScoreMetrics::register(registry).ok()); + // let genesis = storage + // .get_genesis()? + // .ok_or_else(|| format_err!("Can not find genesis hash in storage."))?; + let dag_accumulator_info = match storage.get_dag_accumulator_info()? { + Some(info) => Some(info), + None => { + warn!( + "Can not find dag accumulator info by head block id: {}, use genesis info.", + head_block_info.block_id(), + ); + None + } + }; Ok(Self { - sync_status: SyncStatus::new(ChainStatus::new(head_block.header, head_block_info)), + sync_status: SyncStatus::new( + ChainStatus::new(head_block.header.clone(), head_block_info), + dag_accumulator_info, + ), stage: SyncStage::NotStart, config, storage, metrics, peer_score_metrics, + flexidag_service, vm_metrics, }) } @@ -142,13 +168,31 @@ impl SyncService { } let network = ctx.get_shared::()?; + let block_chain_service = ctx + .service_ref::>()? + .clone(); let storage = self.storage.clone(); let self_ref = ctx.self_ref(); - let connector_service = ctx.service_ref::()?.clone(); + let connector_service = ctx + .service_ref::>()? + .clone(); let config = self.config.clone(); let peer_score_metrics = self.peer_score_metrics.clone(); let sync_metrics = self.metrics.clone(); let vm_metrics = self.vm_metrics.clone(); + + let dag_accumulator_store = ctx + .get_shared::>() + .expect("storage must exist") + .get_accumulator_store(AccumulatorStoreType::SyncDag); + let dag_accumulator_snapshot = ctx + .get_shared::>() + .expect("storage must exist") + .get_accumulator_snapshot_storage(); + + let dag = ctx.get_shared::>>()?; + + let test_storage = storage.clone(); let fut = async move { let peer_select_strategy = peer_strategy.unwrap_or_else(|| config.sync.peer_select_strategy()); @@ -217,39 +261,88 @@ impl SyncService { peer_selector.clone(), network.clone(), )); - if let Some(target) = - rpc_client.get_best_target(current_block_info.get_total_difficulty())? - { - info!("[sync] Find target({}), total_difficulty:{}, current head({})'s total_difficulty({})", target.target_id.id(), target.block_info.total_difficulty, current_block_id, current_block_info.total_difficulty); - - let (fut, task_handle, task_event_handle) = full_sync_task( - current_block_id, - target.clone(), - skip_pow_verify, - config.net().time_service(), - storage.clone(), - connector_service.clone(), - rpc_client.clone(), - self_ref.clone(), - network.clone(), - config.sync.max_retry_times(), - sync_metrics.clone(), - vm_metrics.clone(), - )?; - - self_ref.notify(SyncBeginEvent { - target, - task_handle, - task_event_handle, - peer_selector, - })?; - if let Some(sync_task_total) = sync_task_total.as_ref() { - sync_task_total.with_label_values(&["start"]).inc(); + + let op_local_dag_accumulator_info = + self.flexidag_service.send(GetDagAccumulatorInfo).await??; + + if let Some(local_dag_accumulator_info) = op_local_dag_accumulator_info { + let dag_sync_futs = rpc_client + .get_dag_targets(current_block_info.get_total_difficulty(), local_dag_accumulator_info.get_num_leaves())? + .into_iter() + .fold(Ok(vec![]), |mut futs, target_accumulator_infos| { + let (fut, task_handle, task_event_handle) = sync_dag_full_task( + local_dag_accumulator_info, + target_accumulator_info, + rpc_client.clone(), + dag_accumulator_store, + dag_accumulator_snapshot, + storage.clone(), + config.net().time_service(), + vm_metrics.clone(), + connector_service.clone(), + network.clone(), + skip_pow_verify, + dag.clone(), + block_chain_service.clone(), + config.net().id().clone(), + )?; + self_ref.notify(SyncBeginEvent { + target, + task_handle, + task_event_handle, + peer_selector, + })?; + if let Some(sync_task_total) = sync_task_total.as_ref() { + sync_task_total.with_label_values(&["start"]).inc(); + } + futs.and_then(|v| v.push(fut)) + })? + .into_iter() + .fold(Ok(vec![]), |chain, fut| Ok(vec![fut.await?]))?; + assert!(dag_sync_futs.len() <= 1); + if dag_sync_futs.len() == 1 { + Ok(Some(dag_sync_futs[0])) + } else { + debug!("[sync]No best peer to request, current is beast."); + Ok(None) } - Ok(Some(fut.await?)) } else { - debug!("[sync]No best peer to request, current is beast."); - Ok(None) + if let Some((target, _)) = + rpc_client.get_best_target(current_block_info.get_total_difficulty())? + { + info!("[sync] Find target({}), total_difficulty:{}, current head({})'s total_difficulty({})", target.target_id.id(), target.block_info.total_difficulty, current_block_id, current_block_info.total_difficulty); + + let (fut, task_handle, task_event_handle) = full_sync_task( + current_block_id, + target.clone(), + skip_pow_verify, + config.net().time_service(), + storage.clone(), + connector_service.clone(), + rpc_client.clone(), + self_ref.clone(), + network.clone(), + config.sync.max_retry_times(), + block_chain_service.clone(), + config.net().id().clone(), + sync_metrics.clone(), + vm_metrics.clone(), + )?; + + self_ref.notify(SyncBeginEvent { + target, + task_handle, + task_event_handle, + peer_selector, + })?; + if let Some(sync_task_total) = sync_task_total.as_ref() { + sync_task_total.with_label_values(&["start"]).inc(); + } + Ok(Some(fut.await?)) + } else { + debug!("[sync]No best peer to request, current is beast."); + Ok(None) + } } }; let network = ctx.get_shared::()?; @@ -268,7 +361,18 @@ impl SyncService { |result: Result, anyhow::Error>| async move { let cancel = match result { Ok(Some(chain)) => { - info!("[sync] Sync to latest block: {:?}", chain.current_header()); + info!("[sync] Sync to latest block: {:?}", chain.status()); + info!("[sync] Sync to latest accumulator info: {:?}", chain.get_current_dag_accumulator_info()); + + let startup_info = test_storage + .get_startup_info().unwrap() + .ok_or_else(|| format_err!("Startup info should exist.")).unwrap(); + let current_block_id = startup_info.main; + + let local_dag_accumulator_info = test_storage + .get_dag_accumulator_info().unwrap() + .expect("current dag accumulator info should exist"); + if let Some(sync_task_total) = sync_task_total.as_ref() { sync_task_total.with_label_values(&["done"]).inc(); } @@ -365,8 +469,9 @@ impl ServiceFactory for SyncService { fn create(ctx: &mut ServiceContext) -> Result { let config = ctx.get_shared::>()?; let storage = ctx.get_shared::>()?; + let flexidag_service = ctx.service_ref::()?.clone(); let vm_metrics = ctx.get_shared_opt::()?; - Self::new(config, storage, vm_metrics) + Self::new(config, storage, flexidag_service, vm_metrics) } } @@ -520,6 +625,8 @@ impl CheckSyncEvent { impl EventHandler for SyncService { fn handle_event(&mut self, msg: CheckSyncEvent, ctx: &mut ServiceContext) { + // comment temporarily, for the dag branch, starcoin will sync dag only + // it will add some logic to determine which part to sync in the future if let Err(e) = self.check_and_start_sync(msg.peers, msg.skip_pow_verify, msg.strategy, ctx) { error!("[sync] Check sync error: {:?}", e); @@ -579,6 +686,11 @@ impl EventHandler for SyncService { block.header().clone(), block.block_info.clone(), )) { + self.sync_status.update_dag_accumulator_info( + self.storage + .get_dag_accumulator_info() + .expect("dag accumulator info must exist"), + ); ctx.broadcast(SyncStatusChangeEvent(self.sync_status.clone())); } } diff --git a/sync/src/tasks/block_sync_task.rs b/sync/src/tasks/block_sync_task.rs index 57f6703a9d..7f0e0cdaba 100644 --- a/sync/src/tasks/block_sync_task.rs +++ b/sync/src/tasks/block_sync_task.rs @@ -1,38 +1,62 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::tasks::{BlockConnectedEvent, BlockConnectedEventHandle, BlockFetcher, BlockLocalStore}; +use crate::tasks::{BlockConnectedEventHandle, BlockFetcher, BlockLocalStore}; use crate::verified_rpc_client::RpcVerifyError; -use anyhow::{format_err, Result}; +use anyhow::{format_err, Ok, Result}; use futures::future::BoxFuture; use futures::FutureExt; use network_api::PeerId; use network_api::PeerProvider; +use starcoin_accumulator::accumulator_info::AccumulatorInfo; use starcoin_accumulator::{Accumulator, MerkleAccumulator}; use starcoin_chain::{verifier::BasicVerifier, BlockChain}; use starcoin_chain_api::{ChainReader, ChainWriter, ConnectBlockError, ExecutedBlock}; -use starcoin_config::G_CRATE_VERSION; +use starcoin_config::{Connect, G_CRATE_VERSION}; +use starcoin_consensus::BlockDAG; +use starcoin_crypto::HashValue; +use starcoin_flexidag::flexidag_service::{AddToDag, GetDagTips, ForkDagAccumulator, FinishSync}; +use starcoin_flexidag::FlexidagService; use starcoin_logger::prelude::*; +use starcoin_service_registry::ServiceRef; use starcoin_storage::BARNARD_HARD_FORK_HASH; use starcoin_sync_api::SyncTarget; use starcoin_types::block::{Block, BlockIdAndNumber, BlockInfo, BlockNumber}; use std::collections::HashMap; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; +use std::time::Duration; use stream_task::{CollectorState, TaskError, TaskResultCollector, TaskState}; +use super::{BlockConnectAction, BlockConnectedEvent, BlockConnectedFinishEvent}; + #[derive(Clone, Debug)] pub struct SyncBlockData { pub(crate) block: Block, pub(crate) info: Option, pub(crate) peer_id: Option, + pub(crate) accumulator_root: Option, // the block belongs to this dag accumulator leaf + pub(crate) count_in_leaf: u64, // the count of the block in the dag accumulator leaf + pub(crate) dag_accumulator_index: Option, // the index of the accumulator leaf which the block belogs to } + + impl SyncBlockData { - pub fn new(block: Block, block_info: Option, peer_id: Option) -> Self { + pub fn new( + block: Block, + block_info: Option, + peer_id: Option, + accumulator_root: Option, + count_in_leaf: u64, + dag_acccumulator_index: Option, + ) -> Self { Self { block, info: block_info, peer_id, + accumulator_root, + count_in_leaf, + dag_accumulator_index, } } } @@ -125,8 +149,11 @@ impl TaskState for BlockSyncTask { .fetch_blocks(no_exist_block_ids) .await? .into_iter() - .fold(result_map, |mut result_map, (block, peer_id)| { - result_map.insert(block.id(), SyncBlockData::new(block, None, peer_id)); + .fold(result_map, |mut result_map, (block, peer_id, _, _)| { + result_map.insert( + block.id(), + SyncBlockData::new(block, None, peer_id, None, 1, None), + ); result_map }) }; @@ -146,7 +173,9 @@ impl TaskState for BlockSyncTask { .fetch_blocks(block_ids) .await? .into_iter() - .map(|(block, peer_id)| SyncBlockData::new(block, None, peer_id)) + .map(|(block, peer_id, _, _)| { + SyncBlockData::new(block, None, peer_id, None, 1, None) + }) .collect()) } } @@ -181,12 +210,17 @@ impl TaskState for BlockSyncTask { pub struct BlockCollector { //node's current block info current_block_info: BlockInfo, - target: SyncTarget, + target: Option, // single chain use only // the block chain init by ancestor chain: BlockChain, event_handle: H, peer_provider: N, skip_pow_verify: bool, + last_accumulator_root: HashValue, + dag_block_pool: Vec, + target_accumulator_root: HashValue, + flexidag_service: ServiceRef, + new_dag_accumulator_info: Option, } impl BlockCollector @@ -196,12 +230,19 @@ where { pub fn new_with_handle( current_block_info: BlockInfo, - target: SyncTarget, + target: Option, chain: BlockChain, event_handle: H, peer_provider: N, skip_pow_verify: bool, + target_accumulator_root: HashValue, + flexidag_service: ServiceRef, ) -> Self { + if let Some(dag) = &dag { + dag.lock() + .expect("failed to lock the dag") + .clear_missing_block(); + } Self { current_block_info, target, @@ -209,14 +250,91 @@ where event_handle, peer_provider, skip_pow_verify, + last_accumulator_root: HashValue::zero(), + dag_block_pool: Vec::new(), + target_accumulator_root, + flexidag_service, + new_dag_accumulator_info: None, } } + pub fn check_if_became_dag(&self) -> Result { + Ok(async_std::task::block_on(self.flexidag_service.send(GetDagTips))??.is_some()) + } + #[cfg(test)] - pub fn apply_block_for_test(&mut self, block: Block) -> Result<()> { + pub fn apply_block_for_test( + &mut self, + block: Block, + parents_hash: Option>, + next_tips: &mut Option>, + ) -> Result<()> { self.apply_block(block, None) } + fn notify_connected_block( + &mut self, + block: Block, + block_info: BlockInfo, + action: BlockConnectAction, + state: CollectorState, + ) -> Result { + let total_difficulty = block_info.get_total_difficulty(); + + // if the new block's total difficulty is smaller than the current, + // do nothing because we do not need to update the current chain in any other services. + if total_difficulty <= self.current_block_info.total_difficulty { + return Ok(state); // nothing to do + } + + // only try connect block when sync chain total_difficulty > node's current chain. + + // first, create the sender and receiver for ensuring that + // the last block is connected before the next synchronization is triggered. + // if the block is not the last one, we do not want to do this. + let (sender, mut receiver) = match state { + CollectorState::Enough => { + let (s, r) = futures::channel::mpsc::unbounded::(); + (Some(s), Some(r)) + } + CollectorState::Need => (None, None), + }; + + // second, construct the block connect event. + let block_connect_event = BlockConnectedEvent { + block, + feedback: sender, + action, + }; + + // third, broadcast it. + if let Err(e) = self.event_handle.handle(block_connect_event.clone()) { + error!( + "Send BlockConnectedEvent error: {:?}, block_id: {}", + e, + block_info.block_id() + ); + } + + // finally, if it is the last one, wait for the last block to be processed. + if block_connect_event.feedback.is_some() && receiver.is_some() { + let mut count: i32 = 0; + while count < 3 { + count = count.saturating_add(1); + match receiver.as_mut().unwrap().try_next() { + std::result::Result::Ok(_) => { + break; + } + Err(_) => { + info!("Waiting for last block to be processed"); + async_std::task::block_on(async_std::task::sleep(Duration::from_secs(10))); + } + } + } + } + Ok(state) + } + fn apply_block(&mut self, block: Block, peer_id: Option) -> Result<()> { if let Some((_failed_block, pre_peer_id, err, version)) = self .chain @@ -257,7 +375,7 @@ where error_msg, peer_id ); match err.downcast::() { - Ok(connect_error) => match connect_error { + std::result::Result::Ok(connect_error) => match connect_error { ConnectBlockError::FutureBlock(block) => { Err(ConnectBlockError::FutureBlock(block).into()) } @@ -282,69 +400,192 @@ where Ok(()) } } -} -impl TaskResultCollector for BlockCollector -where - N: PeerProvider + 'static, - H: BlockConnectedEventHandle + 'static, -{ - type Output = BlockChain; + fn broadcast_dag_chain_block( + &mut self, + broadcast_blocks: Vec<(Block, BlockInfo, BlockConnectAction)>, + start_index: u64, + ) -> Result { + let state = if self.last_accumulator_root == self.target_accumulator_root { + CollectorState::Enough + } else { + CollectorState::Need + }; - fn collect(&mut self, item: SyncBlockData) -> Result { + self.new_dag_accumulator_info = Some(async_std::task::block_on(self.flexidag_service.send(ForkDagAccumulator { + new_blocks: broadcast_blocks.into_iter().map(|(block, _, _)| block.id()).collect(), + dag_accumulator_index: start_index, + block_header_id: self.chain.head_block().id(), + }))??); + if state == State::Enough { + async_std::task::block_on(self.flexidag_service.send(FinishSync { + dag_accumulator_info: self.new_dag_accumulator_info.clone(), + }))?? + } + return Ok(state); + } + + fn broadcast_single_chain_block( + &mut self, + block: Block, + block_info: BlockInfo, + action: BlockConnectAction, + ) -> Result { + let target = self + .target + .as_ref() + .expect("the process is for single chain"); + let state = if block_info.block_accumulator_info.num_leaves + == target.block_info.block_accumulator_info.num_leaves + { + if block_info != target.block_info { + Err(TaskError::BreakError( + RpcVerifyError::new_with_peers( + target.peers.clone(), + format!( + "Verify target error, expect target: {:?}, collect target block_info:{:?}", + target.block_info, + block_info + ), + ) + .into(), + ) + .into()) + } else { + Ok(CollectorState::Enough) + } + } else { + Ok(CollectorState::Need) + }; + + let result = self.notify_connected_block(block, block_info, action, state?); + match result { + Ok(state) => {} + Err(e) => { + error!("notify connected block error: {:?}", e); + Err(e) + } + } + } + + fn collect_dag_item(&mut self, item: SyncBlockData) -> Result<()> { + let (block, block_info, peer_id) = item.into(); + let block_id = block.id(); + let timestamp = block.header().timestamp(); + + let add_dag_result = async_std::task::block_on(self.flexidag_service.send(AddToDag { + block_header: block.header().clone(), + }))??; + let selected_parent = self + .storage + .get_block_by_hash(add_dag_result.selected_parent)? + .expect("selected parent should in storage"); + let mut chain = self.chain.fork(selected_parent.header.parent_hash())?; + for blue_hash in add_dag_result.mergeset_blues.mergeset_blues.iter() { + if let Some(blue_block) = self.storage.get_block(blue_hash.to_owned())? { + match chain.apply(blue_block) { + Ok(_executed_block) => (), + Err(e) => warn!("failed to connect dag block: {:?}", e), + } + } else { + error!("Failed to get block {:?}", blue_hash); + } + } + + if chain.status().info().total_difficulty > self.chain.status().info().total_difficulty { + self.chain = chain; + } + + Ok(()) + } + + fn collect_item( + &mut self, + item: SyncBlockData, + ) -> Result<(Block, BlockInfo, BlockConnectAction)> { let (block, block_info, peer_id) = item.into(); let block_id = block.id(); let timestamp = block.header().timestamp(); - let block_info = match block_info { + + return match block_info { Some(block_info) => { - //If block_info exists, it means that this block was already executed and try connect in the previous sync, but the sync task was interrupted. + //If block_info exists, it means that this block was already executed and + // try connect in the previous sync, but the sync task was interrupted. //So, we just need to update chain and continue self.chain.connect(ExecutedBlock { - block, + block: block.clone(), block_info: block_info.clone(), })?; - block_info + let block_info = self.chain.status().info; + Ok((block, block_info, BlockConnectAction::ConnectExecutedBlock)) } None => { self.apply_block(block.clone(), peer_id)?; self.chain.time_service().adjust(timestamp); let block_info = self.chain.status().info; - let total_difficulty = block_info.get_total_difficulty(); - // only try connect block when sync chain total_difficulty > node's current chain. - if total_difficulty > self.current_block_info.total_difficulty { - if let Err(e) = self.event_handle.handle(BlockConnectedEvent { block }) { - error!( - "Send BlockConnectedEvent error: {:?}, block_id: {}", - e, block_id - ); - } - } - block_info + Ok((block, block_info, BlockConnectAction::ConnectNewBlock)) } }; + } +} - //verify target - if block_info.block_accumulator_info.num_leaves - == self.target.block_info.block_accumulator_info.num_leaves - { - if block_info != self.target.block_info { - Err(TaskError::BreakError( - RpcVerifyError::new_with_peers( - self.target.peers.clone(), - format!( - "Verify target error, expect target: {:?}, collect target block_info:{:?}", - self.target.block_info, - block_info - ), - ) - .into(), - ) - .into()) +impl TaskResultCollector for BlockCollector +where + N: PeerProvider + 'static, + H: BlockConnectedEventHandle + 'static, +{ + type Output = BlockChain; + + fn collect(&mut self, item: SyncBlockData) -> Result { + let mut process_block_pool = vec![]; + if item.accumulator_root.is_some() { + // it is a flexidag + self.dag_block_pool.push(item.clone()); + self.last_accumulator_root = item.accumulator_root.unwrap(); + + if item.count_in_leaf != self.dag_block_pool.len() as u64 { + return Ok(CollectorState::Need); } else { - Ok(CollectorState::Enough) + process_block_pool = std::mem::take(&mut self.dag_block_pool); } } else { - Ok(CollectorState::Need) + // it is a single chain + process_block_pool.push(item); + } + + assert!(!process_block_pool.is_empty()); + + // let mut next_tips = Some(vec![]); + let mut block_to_broadcast = vec![]; + if item.accumulator_root.is_some() { + for item in process_block_pool { + self.collect_dag_item(item)? + } + } else { + for item in process_block_pool { + block_to_broadcast.push(self.collect_item(item)?) + } + } + //verify target + match self.target { + Some(_) => { + assert_eq!( + block_to_broadcast.len(), + 1, + "in single chain , block_info should exist!" + ); + let (block, block_info, _, action) = block_to_broadcast.pop().unwrap(); + // self.check_if_sync_complete(block_info) + match self.broadcast_single_chain_block(block, block_info, action) { + Ok(_) => { + if self.check_if_became_dag()? { + Ok(CollectorState::Enough) + } + } + Err(e) => Err(e), + } + } + None => self.broadcast_dag_chain_block(block_to_broadcast, item.dag_accumulator_index), } } diff --git a/sync/src/tasks/inner_sync_task.rs b/sync/src/tasks/inner_sync_task.rs index 7552656417..b893b3da89 100644 --- a/sync/src/tasks/inner_sync_task.rs +++ b/sync/src/tasks/inner_sync_task.rs @@ -1,15 +1,22 @@ -use crate::tasks::{ - AccumulatorCollector, BlockAccumulatorSyncTask, BlockCollector, BlockConnectedEventHandle, - BlockFetcher, BlockIdFetcher, BlockSyncTask, PeerOperator, +use crate::{ + block_connector::BlockConnectorService, + tasks::{ + AccumulatorCollector, BlockAccumulatorSyncTask, BlockCollector, BlockConnectedEventHandle, + BlockFetcher, BlockIdFetcher, BlockSyncTask, PeerOperator, + }, }; use anyhow::format_err; use network_api::PeerProvider; use starcoin_accumulator::node::AccumulatorStoreType; use starcoin_chain::BlockChain; +use starcoin_config::ChainNetworkID; +use starcoin_crypto::HashValue; use starcoin_executor::VMMetrics; +use starcoin_service_registry::ServiceRef; use starcoin_storage::Store; use starcoin_sync_api::SyncTarget; use starcoin_time_service::TimeService; +use starcoin_txpool::TxPoolService; use starcoin_types::block::{BlockIdAndNumber, BlockInfo}; use std::cmp::min; use std::sync::Arc; @@ -32,6 +39,7 @@ where time_service: Arc, peer_provider: N, custom_error_handle: Arc, + net_id: ChainNetworkID, } impl InnerSyncTask @@ -50,6 +58,7 @@ where time_service: Arc, peer_provider: N, custom_error_handle: Arc, + net_id: ChainNetworkID, ) -> Self { Self { ancestor, @@ -61,6 +70,7 @@ where time_service, peer_provider, custom_error_handle, + net_id, } } @@ -81,6 +91,7 @@ where max_retry_times: u64, delay_milliseconds_on_error: u64, skip_pow_verify_when_sync: bool, + block_chain_service: ServiceRef>, vm_metrics: Option, ) -> Result<(BlockChain, TaskHandle), TaskError> { let buffer_size = self.target.peers.len(); @@ -131,15 +142,18 @@ where self.time_service.clone(), ancestor.id, self.storage.clone(), + self.net_id.clone(), vm_metrics, )?; let block_collector = BlockCollector::new_with_handle( current_block_info.clone(), - self.target.clone(), + Some(self.target.clone()), chain, self.block_event_handle.clone(), self.peer_provider.clone(), skip_pow_verify_when_sync, + HashValue::zero(), + None, ); Ok(TaskGenerator::new( block_sync_task, diff --git a/sync/src/tasks/mock.rs b/sync/src/tasks/mock.rs index 5f5c66034d..2a52e3679f 100644 --- a/sync/src/tasks/mock.rs +++ b/sync/src/tasks/mock.rs @@ -4,7 +4,7 @@ use crate::tasks::{ BlockConnectedEvent, BlockFetcher, BlockIdFetcher, BlockInfoFetcher, PeerOperator, SyncFetcher, }; -use anyhow::{format_err, Context, Result}; +use anyhow::{format_err, Context, Ok, Result}; use async_std::task::JoinHandle; use futures::channel::mpsc::UnboundedReceiver; use futures::future::BoxFuture; @@ -14,6 +14,7 @@ use network_api::messages::NotificationMessage; use network_api::{PeerId, PeerInfo, PeerSelector, PeerStrategy}; use network_p2p_core::{NetRpcError, RpcErrorCode}; use rand::Rng; +use starcoin_account_api::AccountInfo; use starcoin_accumulator::{Accumulator, MerkleAccumulator}; use starcoin_chain::BlockChain; use starcoin_chain_api::ChainReader; @@ -21,8 +22,10 @@ use starcoin_chain_mock::MockChain; use starcoin_config::ChainNetwork; use starcoin_crypto::HashValue; use starcoin_network_rpc_api::G_RPC_INFO; +use starcoin_storage::Storage; use starcoin_sync_api::SyncTarget; use starcoin_types::block::{Block, BlockIdAndNumber, BlockInfo, BlockNumber}; +use starcoin_types::startup_info::ChainInfo; use std::sync::Arc; use std::time::Duration; @@ -162,6 +165,33 @@ impl SyncNodeMocker { )) } + pub fn new_with_storage( + net: ChainNetwork, + storage: Arc, + chain_info: ChainInfo, + miner: AccountInfo, + delay_milliseconds: u64, + random_error_percent: u32, + ) -> Result { + let chain = MockChain::new_with_storage(net, storage, chain_info.head().id(), miner)?; + let peer_id = PeerId::random(); + let peer_info = PeerInfo::new( + peer_id.clone(), + chain.chain_info(), + NotificationMessage::protocols(), + G_RPC_INFO.clone().into_protocols(), + None, + ); + let peer_selector = PeerSelector::new(vec![peer_info], PeerStrategy::default(), None); + Ok(Self::new_inner( + peer_id, + chain, + ErrorStrategy::Timeout(delay_milliseconds), + random_error_percent, + peer_selector, + )) + } + pub fn new_with_strategy( net: ChainNetwork, error_strategy: ErrorStrategy, @@ -308,12 +338,29 @@ impl BlockFetcher for SyncNodeMocker { fn fetch_blocks( &self, block_ids: Vec, - ) -> BoxFuture<'_, Result)>>> { - let result: Result)>> = block_ids + ) -> BoxFuture< + '_, + Result< + Vec<( + Block, + Option, + Option>, + Option, + )>, + >, + > { + let result: Result< + Vec<( + Block, + Option, + Option>, + Option, + )>, + > = block_ids .into_iter() .map(|block_id| { if let Some(block) = self.chain().get_block(block_id)? { - Ok((block, None)) + Ok((block, None, None, None)) } else { Err(format_err!("Can not find block by id: {}", block_id)) } diff --git a/sync/src/tasks/mod.rs b/sync/src/tasks/mod.rs index 1ed2424924..678722f2f6 100644 --- a/sync/src/tasks/mod.rs +++ b/sync/src/tasks/mod.rs @@ -1,6 +1,7 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 +use crate::block_connector::BlockConnectorService; use crate::tasks::block_sync_task::SyncBlockData; use crate::tasks::inner_sync_task::InnerSyncTask; use crate::verified_rpc_client::{RpcVerifyError, VerifiedRpcClient}; @@ -10,18 +11,24 @@ use futures::future::BoxFuture; use futures::{FutureExt, TryFutureExt}; use network_api::{PeerId, PeerProvider, PeerSelector}; use network_p2p_core::{NetRpcError, RpcErrorCode}; +use starcoin_accumulator::accumulator_info::AccumulatorInfo; use starcoin_accumulator::node::AccumulatorStoreType; use starcoin_accumulator::MerkleAccumulator; use starcoin_chain::{BlockChain, ChainReader}; +use starcoin_config::ChainNetworkID; use starcoin_crypto::HashValue; use starcoin_logger::prelude::*; use starcoin_service_registry::{ActorService, EventHandler, ServiceRef}; use starcoin_storage::Store; use starcoin_sync_api::SyncTarget; use starcoin_time_service::TimeService; +use starcoin_txpool::TxPoolService; +#[cfg(test)] +use starcoin_txpool_mock_service::MockTxPoolService; use starcoin_types::block::{Block, BlockIdAndNumber, BlockInfo, BlockNumber}; use starcoin_types::startup_info::ChainStatus; use starcoin_types::U256; +use std::result::Result::Ok; use std::str::FromStr; use std::sync::mpsc::Sender; use std::sync::Arc; @@ -32,43 +39,78 @@ use stream_task::{ }; pub trait SyncFetcher: PeerOperator + BlockIdFetcher + BlockFetcher + BlockInfoFetcher { - fn get_best_target(&self, min_difficulty: U256) -> Result> { + fn get_dag_targets(&self, total_difficulty: U256, local_dag_accumulator_leaf_num: u64) -> Result> { + Ok(self + .peer_selector() + .peer_infos() + .into_iter() + .filter(|peer_info| { + match (peer_info.chain_info().dag_accumulator_info(), peer_info.chain_info().k_total_difficulties()) { + (Some(info), Some(k)) => { + k.first() <= total_difficulty || info.get_num_leaves() > local_dag_accumulator_leaf_num + } + (NOne, None) => false, + _ => { + warn!("dag accumulator is inconsistent with k total difficulty"); + false + } + } + }) + .map(|peer_info| { + peer_info.chain_info().dag_accumulator_info().clone() + }) + .collect()); + } + + fn get_best_target( + &self, + min_difficulty: U256, + ) -> Result)>> { if let Some(best_peers) = self.peer_selector().bests(min_difficulty) { //TODO fast verify best peers by accumulator - let mut chain_statuses: Vec<(ChainStatus, Vec)> = + let mut chain_statuses: Vec<(ChainStatus, Vec, Option)> = best_peers .into_iter() .fold(vec![], |mut chain_statuses, peer| { let update = chain_statuses .iter_mut() - .find(|(chain_status, _peers)| { + .find(|(chain_status, _peers, _)| { peer.chain_info().status() == chain_status }) - .map(|(_chain_status, peers)| { + .map(|(_chain_status, peers, _)| { peers.push(peer.peer_id()); true }) .unwrap_or(false); if !update { - chain_statuses - .push((peer.chain_info().status().clone(), vec![peer.peer_id()])) + chain_statuses.push(( + peer.chain_info().status().clone(), + vec![peer.peer_id()], + peer.chain_info().dag_accumulator_info().clone(), + )) } chain_statuses }); //if all best peers block info is same, block_infos len should been 1, other use majority peers block_info if chain_statuses.len() > 1 { - chain_statuses.sort_by(|(_chain_status_1, peers_1), (_chain_status_2, peers_2)| { - peers_1.len().cmp(&peers_2.len()) - }); + chain_statuses.sort_by( + |(_chain_status_1, peers_1, _), (_chain_status_2, peers_2, _)| { + peers_1.len().cmp(&peers_2.len()) + }, + ); } - let (chain_status, peers) = chain_statuses.pop().expect("chain statuses should exist"); + let (chain_status, peers, dag_accumulator_info) = + chain_statuses.pop().expect("chain statuses should exist"); let header = chain_status.head; - Ok(Some(SyncTarget { - target_id: BlockIdAndNumber::new(header.id(), header.number()), - block_info: chain_status.info, - peers, - })) + Ok(Some(( + SyncTarget { + target_id: BlockIdAndNumber::new(header.id(), header.number()), + block_info: chain_status.info, + peers, + }, + dag_accumulator_info, + ))) } else { debug!( "get_best_target return None, total_peers_in_selector: {}, min_difficulty: {}", @@ -301,7 +343,7 @@ impl BlockFetcher for VerifiedRpcClient { ) -> BoxFuture<'_, Result)>>> { self.get_blocks(block_ids.clone()) .and_then(|blocks| async move { - let results: Result)>> = block_ids + let results = block_ids .iter() .zip(blocks) .map(|(id, block)| { @@ -372,7 +414,10 @@ impl BlockLocalStore for Arc { Some(block) => { let id = block.id(); let block_info = self.get_block_info(id)?; - Ok(Some(SyncBlockData::new(block, block_info, None))) + + Ok(Some(SyncBlockData::new( + block, block_info, None, None, 1, None, + ))) } None => Ok(None), }) @@ -380,11 +425,22 @@ impl BlockLocalStore for Arc { } } +#[derive(Clone, Debug)] +pub enum BlockConnectAction { + ConnectNewBlock, + ConnectExecutedBlock, +} + #[derive(Clone, Debug)] pub struct BlockConnectedEvent { pub block: Block, + pub feedback: Option>, + pub action: BlockConnectAction, } +#[derive(Clone, Debug)] +pub struct BlockConnectedFinishEvent; + #[derive(Clone, Debug)] pub struct BlockDiskCheckEvent {} @@ -392,10 +448,15 @@ pub trait BlockConnectedEventHandle: Send + Clone + std::marker::Unpin { fn handle(&mut self, event: BlockConnectedEvent) -> Result<()>; } -impl BlockConnectedEventHandle for ServiceRef -where - S: ActorService + EventHandler, -{ +impl BlockConnectedEventHandle for ServiceRef> { + fn handle(&mut self, event: BlockConnectedEvent) -> Result<()> { + self.notify(event)?; + Ok(()) + } +} + +#[cfg(test)] +impl BlockConnectedEventHandle for ServiceRef> { fn handle(&mut self, event: BlockConnectedEvent) -> Result<()> { self.notify(event)?; Ok(()) @@ -459,6 +520,24 @@ impl BlockConnectedEventHandle for UnboundedSender { } } +#[derive(Debug, Clone)] +pub struct BlockConnectEventHandleMock { + sender: UnboundedSender, +} + +impl BlockConnectEventHandleMock { + pub fn new(sender: UnboundedSender) -> Result { + Ok(Self { sender }) + } +} + +impl BlockConnectedEventHandle for BlockConnectEventHandleMock { + fn handle(&mut self, event: BlockConnectedEvent) -> Result<()> { + self.sender.start_send(event)?; + Ok(()) + } +} + pub struct ExtSyncTaskErrorHandle where F: SyncFetcher + 'static, @@ -508,6 +587,11 @@ mod find_ancestor_task; mod inner_sync_task; #[cfg(test)] pub(crate) mod mock; +mod sync_dag_accumulator_task; +mod sync_dag_block_task; +mod sync_dag_full_task; +mod sync_dag_protocol_trait; +mod sync_find_ancestor_task; #[cfg(test)] mod tests; @@ -516,6 +600,7 @@ pub use accumulator_sync_task::{AccumulatorCollector, BlockAccumulatorSyncTask}; pub use block_sync_task::{BlockCollector, BlockSyncTask}; pub use find_ancestor_task::{AncestorCollector, FindAncestorTask}; use starcoin_executor::VMMetrics; +pub use sync_dag_full_task::sync_dag_full_task; pub fn full_sync_task( current_block_id: HashValue, @@ -528,6 +613,8 @@ pub fn full_sync_task( ancestor_event_handle: A, peer_provider: N, max_retry_times: u64, + block_chain_service: ServiceRef>, + net_id: ChainNetworkID, sync_metrics: Option, vm_metrics: Option, ) -> Result<( @@ -635,6 +722,7 @@ where time_service.clone(), peer_provider.clone(), ext_error_handle.clone(), + net_id.clone(), ); let start_now = Instant::now(); let (block_chain, _) = inner @@ -643,6 +731,7 @@ where max_retry_times, delay_milliseconds_on_error, skip_pow_verify, + block_chain_service.clone(), vm_metrics.clone(), ) .await?; diff --git a/sync/src/tasks/sync_dag_accumulator_task.rs b/sync/src/tasks/sync_dag_accumulator_task.rs new file mode 100644 index 0000000000..b029e4a363 --- /dev/null +++ b/sync/src/tasks/sync_dag_accumulator_task.rs @@ -0,0 +1,169 @@ +use anyhow::{bail, ensure, Chain, Result}; +use bcs_ext::BCSCodec; +use futures::{future::BoxFuture, FutureExt}; +use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator}; +use starcoin_chain::BlockChain; +use starcoin_crypto::HashValue; +use starcoin_logger::prelude::info; +use starcoin_network_rpc_api::dag_protocol::{self, TargetDagAccumulatorLeafDetail}; +use starcoin_storage::{ + flexi_dag::{SyncFlexiDagSnapshot, SyncFlexiDagSnapshotStorage}, + storage::CodecKVStore, +}; +use std::sync::Arc; +use stream_task::{CollectorState, TaskResultCollector, TaskState}; + +use crate::verified_rpc_client::VerifiedRpcClient; + +#[derive(Clone)] +pub struct SyncDagAccumulatorTask { + leaf_index: u64, + batch_size: u64, + target_index: u64, + fetcher: Arc, +} +impl SyncDagAccumulatorTask { + pub fn new( + leaf_index: u64, + batch_size: u64, + target_index: u64, + fetcher: Arc, + ) -> Self { + SyncDagAccumulatorTask { + leaf_index, + batch_size, + target_index, + fetcher, + } + } +} + +impl TaskState for SyncDagAccumulatorTask { + type Item = TargetDagAccumulatorLeafDetail; + + fn new_sub_task(self) -> BoxFuture<'static, Result>> { + async move { + let target_details = match self + .fetcher + .get_accumulator_leaf_detail(dag_protocol::GetTargetDagAccumulatorLeafDetail { + leaf_index: self.leaf_index, + batch_size: self.batch_size, + }) + .await? + { + Some(details) => details, + None => { + bail!("return None when sync accumulator for dag"); + } + }; + Ok(target_details) + } + .boxed() + } + + fn next(&self) -> Option { + //this should never happen, because all node's genesis block should same. + if self.leaf_index == 0 { + // it is genesis + return None; + } + + let next_number = self.leaf_index.saturating_add(self.batch_size); + if next_number > self.target_index - 1 { + // genesis leaf doesn't need synchronization + return None; + } + Some(Self { + fetcher: self.fetcher.clone(), + leaf_index: next_number, + batch_size: self.batch_size, + target_index: self.target_index, + }) + } +} + +pub struct SyncDagAccumulatorCollector { + accumulator: MerkleAccumulator, + accumulator_snapshot: Arc, + target: AccumulatorInfo, + start_leaf_index: u64, +} + +impl SyncDagAccumulatorCollector { + pub fn new( + accumulator: MerkleAccumulator, + accumulator_snapshot: Arc, + target: AccumulatorInfo, + start_leaf_index: u64, + ) -> Self { + Self { + accumulator, + accumulator_snapshot, + target, + start_leaf_index, + } + } +} + +impl TaskResultCollector for SyncDagAccumulatorCollector { + type Output = (u64, MerkleAccumulator); + + fn collect( + &mut self, + mut item: TargetDagAccumulatorLeafDetail, + ) -> anyhow::Result { + let accumulator_leaf = BlockChain::calculate_dag_accumulator_key(item.tips.clone())?; + self.accumulator.append(&[accumulator_leaf])?; + let accumulator_info = self.accumulator.get_info(); + if accumulator_info.accumulator_root != item.accumulator_root { + bail!( + "sync occurs error for the accumulator root differs from other!, local {}, peer {}", + accumulator_info.accumulator_root, + item.accumulator_root + ) + } + self.accumulator.flush()?; + + let num_leaves = accumulator_info.num_leaves; + self.accumulator_snapshot.put( + accumulator_leaf, + SyncFlexiDagSnapshot { + child_hashes: item.tips.clone(), + accumulator_info: accumulator_info.clone(), + }, + )?; + + item.tips.iter().try_fold((), |_, block_id| { + self.accumulator_snapshot.put( + block_id.clone(), + SyncFlexiDagSnapshot { + child_hashes: item.tips.clone(), + accumulator_info: accumulator_info.clone(), + }, + ) + })?; + + if num_leaves == self.target.num_leaves { + Ok(CollectorState::Enough) + } else { + Ok(CollectorState::Need) + } + } + + fn finish(self) -> Result { + let accumulator_info = self.accumulator.get_info(); + + ensure!( + accumulator_info == self.target, + "local accumulator info: {:?}, peer's: {:?}", + accumulator_info, + self.target + ); + info!( + "finish to sync accumulator, its info is: {:?}", + accumulator_info + ); + + Ok((self.start_leaf_index, self.accumulator)) + } +} diff --git a/sync/src/tasks/sync_dag_block_task.rs b/sync/src/tasks/sync_dag_block_task.rs new file mode 100644 index 0000000000..2428898926 --- /dev/null +++ b/sync/src/tasks/sync_dag_block_task.rs @@ -0,0 +1,136 @@ +use crate::{tasks::BlockFetcher, verified_rpc_client::VerifiedRpcClient}; +use anyhow::{Ok, Result}; +use futures::{future::BoxFuture, FutureExt}; +use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator}; +use starcoin_chain::BlockChain; +use starcoin_chain_api::{ChainWriter, ExecutedBlock}; +use starcoin_logger::prelude::info; +use starcoin_network_rpc_api::dag_protocol::{GetSyncDagBlockInfo, SyncDagBlockInfo}; +use starcoin_storage::{ + block_info, flexi_dag::SyncFlexiDagSnapshotStorage, storage::CodecKVStore, Store, +}; +use starcoin_types::block::Block; +use std::{collections::HashMap, sync::Arc}; +use stream_task::{CollectorState, TaskResultCollector, TaskState}; + +use super::{block_sync_task::SyncBlockData, BlockLocalStore}; + +#[derive(Clone)] +pub struct SyncDagBlockTask { + accumulator: Arc, + start_index: u64, + target: AccumulatorInfo, + fetcher: Arc, + accumulator_snapshot: Arc, + local_store: Arc, +} +impl SyncDagBlockTask { + pub fn new( + accumulator: MerkleAccumulator, + start_index: u64, + target: AccumulatorInfo, + fetcher: Arc, + accumulator_snapshot: Arc, + local_store: Arc, + ) -> Self { + SyncDagBlockTask { + accumulator: Arc::new(accumulator), + start_index, + target, + fetcher, + accumulator_snapshot: accumulator_snapshot.clone(), + local_store: local_store.clone(), + } + } +} + +impl SyncDagBlockTask { + async fn fetch_absent_dag_block(&self, index: u64) -> Result> { + let leaf = self + .accumulator + .get_leaf(index) + .expect(format!("index: {} must be valid", index).as_str()) + .expect(format!("index: {} should not be None", index).as_str()); + + let snapshot = self + .accumulator_snapshot + .get(leaf) + .expect(format!("index: {} must be valid for getting snapshot", index).as_str()) + .expect(format!("index: {} should not be None for getting snapshot", index).as_str()); + + let mut absent_block = vec![]; + let mut result = vec![]; + snapshot.child_hashes.iter().for_each(|block_id| { + absent_block.push(block_id.clone()); + result.push(SyncDagBlockInfo { + block_id: block_id.clone(), + block: None, + peer_id: None, + }); + }); + + let fetched_block_info = self + .fetcher + .fetch_blocks(absent_block) + .await? + .iter() + .map(|(block, peer_info)| (block.header().id(), (block.clone(), peer_info.clone()))) + .collect::>(); + + // should return the block in order + result.iter_mut().for_each(|block_info| { + block_info.block = Some( + fetched_block_info + .get(&block_info.block_id) + .expect("the block should be got from peer already") + .0 + .to_owned(), + ); + block_info.peer_id = fetched_block_info + .get(&block_info.block_id) + .expect("the block should be got from peer already") + .1 + .to_owned(); + }); + + let block_info = self + .local_store + .get_block_infos(result.iter().map(|item| item.block_id).collect())?; + + Ok(result + .into_iter() + .zip(block_info) + .map(|(item, block_info)| SyncBlockData { + block: item.block.expect("block should exists"), + info: block_info, + peer_id: item.peer_id, + accumulator_root: Some(snapshot.accumulator_info.get_accumulator_root().clone()), + count_in_leaf: snapshot.child_hashes.len() as u64, + dag_accumulator_index: Some(index), + }) + .collect()) + } +} + +impl TaskState for SyncDagBlockTask { + type Item = SyncBlockData; + + fn new_sub_task(self) -> BoxFuture<'static, Result>> { + async move { self.fetch_absent_dag_block(self.start_index).await }.boxed() + } + + fn next(&self) -> Option { + let next_number = self.start_index.saturating_add(1); + if next_number >= self.target.num_leaves { + return None; + } + Some(Self { + accumulator: self.accumulator.clone(), + start_index: next_number, + target: self.target.clone(), + fetcher: self.fetcher.clone(), + accumulator_snapshot: self.accumulator_snapshot.clone(), + local_store: self.local_store.clone(), + }) + } +} diff --git a/sync/src/tasks/sync_dag_full_task.rs b/sync/src/tasks/sync_dag_full_task.rs new file mode 100644 index 0000000000..f13e78275c --- /dev/null +++ b/sync/src/tasks/sync_dag_full_task.rs @@ -0,0 +1,349 @@ +use std::sync::{Arc, Mutex}; + +use anyhow::{anyhow, format_err, Ok}; +use futures::{future::BoxFuture, FutureExt}; +use network_api::PeerProvider; +use starcoin_accumulator::{ + accumulator_info::AccumulatorInfo, Accumulator, AccumulatorTreeStore, MerkleAccumulator, +}; +use starcoin_chain::BlockChain; +use starcoin_chain_api::ChainReader; +use starcoin_config::ChainNetworkID; +use starcoin_consensus::BlockDAG; +use starcoin_crypto::HashValue; +use starcoin_executor::VMMetrics; +use starcoin_logger::prelude::{debug, info}; +use starcoin_network::NetworkServiceRef; +use starcoin_service_registry::ServiceRef; +use starcoin_storage::{flexi_dag::SyncFlexiDagSnapshotStorage, storage::CodecKVStore, Store}; +use starcoin_time_service::TimeService; +use starcoin_txpool::TxPoolService; +use stream_task::{ + Generator, TaskError, TaskEventCounterHandle, TaskFuture, TaskGenerator, TaskHandle, +}; + +use crate::{block_connector::BlockConnectorService, verified_rpc_client::VerifiedRpcClient}; + +use super::{ + sync_dag_accumulator_task::{SyncDagAccumulatorCollector, SyncDagAccumulatorTask}, + sync_dag_block_task::SyncDagBlockTask, + sync_find_ancestor_task::{AncestorCollector, FindAncestorTask}, + BlockCollector, BlockConnectedEventHandle, ExtSyncTaskErrorHandle, +}; + +pub async fn find_dag_ancestor_task( + local_accumulator_info: AccumulatorInfo, + target_accumulator_info: AccumulatorInfo, + fetcher: Arc, + accumulator_store: Arc, + accumulator_snapshot: Arc, + event_handle: Arc, +) -> anyhow::Result { + let max_retry_times = 10; // in startcoin, it is in config + let delay_milliseconds_on_error = 100; + + let ext_error_handle = Arc::new(ExtSyncTaskErrorHandle::new(fetcher.clone())); + + // here should compare the dag's node not accumulator leaf node + let sync_task = TaskGenerator::new( + FindAncestorTask::new( + local_accumulator_info.num_leaves - 1, + target_accumulator_info.num_leaves, + fetcher, + ), + 2, + max_retry_times, + delay_milliseconds_on_error, + AncestorCollector::new( + Arc::new(MerkleAccumulator::new_with_info( + local_accumulator_info, + accumulator_store.clone(), + )), + accumulator_snapshot.clone(), + ), + event_handle.clone(), + ext_error_handle.clone(), + ) + .generate(); + let (fut, _handle) = sync_task.with_handle(); + match fut.await { + anyhow::Result::Ok(ancestor) => { + return Ok(ancestor); + } + Err(error) => { + return Err(anyhow!(error)); + } + } +} + +async fn sync_accumulator( + local_accumulator_info: AccumulatorInfo, + target_accumulator_info: AccumulatorInfo, + fetcher: Arc, + accumulator_store: Arc, + accumulator_snapshot: Arc, +) -> anyhow::Result<(u64, MerkleAccumulator)> { + let max_retry_times = 10; // in startcoin, it is in config + let delay_milliseconds_on_error = 100; + + let start_index = local_accumulator_info.get_num_leaves().saturating_sub(1); + + let event_handle = Arc::new(TaskEventCounterHandle::new()); + + let ext_error_handle = Arc::new(ExtSyncTaskErrorHandle::new(fetcher.clone())); + + let sync_task = TaskGenerator::new( + SyncDagAccumulatorTask::new( + start_index.saturating_add(1), + 3, + target_accumulator_info.num_leaves, + fetcher.clone(), + ), + 2, + max_retry_times, + delay_milliseconds_on_error, + SyncDagAccumulatorCollector::new( + MerkleAccumulator::new_with_info(local_accumulator_info, accumulator_store.clone()), + accumulator_snapshot.clone(), + target_accumulator_info, + start_index, + ), + event_handle.clone(), + ext_error_handle, + ) + .generate(); + let (fut, handle) = sync_task.with_handle(); + match fut.await { + anyhow::Result::Ok((start_index, full_accumulator)) => { + return anyhow::Result::Ok((start_index, full_accumulator)); + } + Err(error) => { + return Err(anyhow!(error)); + } + } + + // TODO: we need to talk about this + // .and_then(|sync_accumulator_result, event_handle| { + // let sync_dag_accumulator_task = TaskGenerator::new( + // SyncDagBlockTask::new(), + // 2, + // max_retry_times, + // delay_milliseconds_on_error, + // SyncDagAccumulatorCollector::new(), + // event_handle.clone(), + // ext_error_handle, + // ); + // Ok(sync_dag_accumulator_task) + // }); + // return Ok(async_std::task::block_on(sync)); + // match async_std::task::block_on(sync) { + // std::result::Result::Ok((index, accumulator)) => { + // debug!("sync accumulator success, target accumulator info's leaf count = {}, root hash = {}, begin index = {}", + // accumulator.get_info().get_num_leaves(), accumulator.get_info().get_accumulator_root(), index); + // return Ok((index, accumulator)); + // } + // Err(error) => { + // println!("sync accumulator error: {}", error.to_string()); + // Err(error.into()) + // } + // } +} + +fn get_start_block_id( + accumulator: &MerkleAccumulator, + start_index: u64, + local_store: Arc, +) -> anyhow::Result { + let last_block_id = accumulator + .get_leaf(start_index)? + .expect("last block id should not be None"); + + let mut snapshot = local_store + .query_by_hash(last_block_id)? + .expect("tips should not be None"); + snapshot.child_hashes.sort(); + Ok(snapshot + .child_hashes + .iter() + .last() + .expect("last block id should not be None") + .clone()) +} + +async fn sync_dag_block2( + start_index: u64, + accumulator: MerkleAccumulator, +) -> anyhow::Result { +} + +async fn sync_dag_block( + start_index: u64, + accumulator: MerkleAccumulator, + fetcher: Arc, + accumulator_snapshot: Arc, + local_store: Arc, + time_service: Arc, + block_event_handle: H, + network: N, + skip_pow_verify_when_sync: bool, + dag: Arc>, + block_chain_service: ServiceRef>, + net_id: ChainNetworkID, + vm_metrics: Option, +) -> anyhow::Result +where + H: BlockConnectedEventHandle + Sync + 'static, + N: PeerProvider + Clone + 'static, +{ + let max_retry_times = 10; // in startcoin, it is in config + let delay_milliseconds_on_error = 100; + let event_handle = Arc::new(TaskEventCounterHandle::new()); + let ext_error_handle = Arc::new(ExtSyncTaskErrorHandle::new(fetcher.clone())); + + // let start_block_id = get_start_block_id(&accumulator, start_index, local_store.clone()) + // .map_err(|err| TaskError::BreakError(anyhow!(err))); + // let chain = BlockChain::new( + // time_service.clone(), + // start_block_id?, + // local_store.clone(), + // net_id, + // vm_metrics, + // ) + // .map_err(|err| TaskError::BreakError(anyhow!(err))); + + let leaf = accumulator + .get_leaf(start_index) + .expect(format!("index: {} must be valid", start_index).as_str()) + .expect(format!("index: {} should not be None", start_index).as_str()); + + let mut snapshot = accumulator_snapshot + .get(leaf) + .expect(format!("index: {} must be valid for getting snapshot", start_index).as_str()) + .expect( + format!( + "index: {} should not be None for getting snapshot", + start_index + ) + .as_str(), + ); + + let chain = BlockChain::new( + time_service.clone(), + snapshot.head_block_id?, + local_store.clone(), + net_id, + vm_metrics, + )?; + + let current_block_info = local_store + .get_block_info(snapshot.head_block_id)? + .ok_or_else(|| format_err!("Can not find block info by id: {}", last_chain_block)) + .map_err(|err| TaskError::BreakError(anyhow!(err))); + + let accumulator_info = accumulator.get_info(); + let accumulator_root = accumulator.root_hash(); + let sync_task = TaskGenerator::new( + SyncDagBlockTask::new( + accumulator, + start_index.saturating_add(1), + accumulator_info, + fetcher.clone(), + accumulator_snapshot.clone(), + local_store.clone(), + ), + 2, + max_retry_times, + delay_milliseconds_on_error, + BlockCollector::new_with_handle( + current_block_info?.clone(), + None, + chain?, + block_event_handle.clone(), + network.clone(), + skip_pow_verify_when_sync, + accumulator_root, + Some(dag.clone()), + ), + event_handle.clone(), + ext_error_handle, + ) + .generate(); + let (fut, handle) = sync_task.with_handle(); + match fut.await { + anyhow::Result::Ok(block_chain) => { + return anyhow::Result::Ok(block_chain); + } + Err(error) => { + return Err(anyhow!(error)); + } + }; +} + +pub fn sync_dag_full_task( + local_accumulator_info: AccumulatorInfo, + target_accumulator_info: AccumulatorInfo, + fetcher: Arc, + accumulator_store: Arc, + accumulator_snapshot: Arc, + local_store: Arc, + time_service: Arc, + vm_metrics: Option, + connector_service: ServiceRef>, + network: NetworkServiceRef, + skip_pow_verify_when_sync: bool, + dag: Arc>, + block_chain_service: ServiceRef>, + net_id: ChainNetworkID, +) -> anyhow::Result<( + BoxFuture<'static, anyhow::Result>, + TaskHandle, + Arc, +)> { + let event_handle = Arc::new(TaskEventCounterHandle::new()); + let task_event_handle = event_handle.clone(); + let all_fut = async move { + let ancestor = find_dag_ancestor_task( + local_accumulator_info.clone(), + target_accumulator_info.clone(), + fetcher.clone(), + accumulator_store.clone(), + accumulator_snapshot.clone(), + task_event_handle.clone(), + ) + .await + .map_err(|err| TaskError::BreakError(anyhow!(err)))?; + + let (start_index, accumulator) = sync_accumulator( + ancestor, + target_accumulator_info, + fetcher.clone(), + accumulator_store.clone(), + accumulator_snapshot.clone(), + ) + .await + .map_err(|err| TaskError::BreakError(anyhow!(err)))?; + + let block_chain = sync_dag_block( + start_index, + accumulator, + fetcher.clone(), + accumulator_snapshot.clone(), + local_store.clone(), + time_service.clone(), + connector_service.clone(), + network, + skip_pow_verify_when_sync, + dag.clone(), + block_chain_service.clone(), + net_id, + vm_metrics, + ) + .await + .map_err(|err| TaskError::BreakError(anyhow!(err)))?; + return anyhow::Result::Ok(block_chain); + }; + + let task = TaskFuture::new(all_fut.boxed()); + let (fut, handle) = task.with_handle(); + Ok((fut, handle, event_handle)) +} diff --git a/sync/src/tasks/sync_dag_protocol_trait.rs b/sync/src/tasks/sync_dag_protocol_trait.rs new file mode 100644 index 0000000000..78b2093c7a --- /dev/null +++ b/sync/src/tasks/sync_dag_protocol_trait.rs @@ -0,0 +1,29 @@ +use anyhow::Result; +use futures::future::BoxFuture; +use network_p2p_core::PeerId; +use starcoin_network_rpc_api::dag_protocol::{ + SyncDagBlockInfo, TargetDagAccumulatorLeaf, TargetDagAccumulatorLeafDetail, +}; + +pub trait PeerSynDagAccumulator: Send + Sync { + fn get_sync_dag_asccumulator_leaves( + &self, + peer_id: Option, + leaf_index: u64, + batch_size: u64, + ) -> BoxFuture>>; + + fn get_accumulator_leaf_detail( + &self, + peer_id: Option, + leaf_index: u64, + batch_size: u64, + ) -> BoxFuture>>>; + + fn get_dag_block_info( + &self, + peer: Option, + leaf_index: u64, + batch_size: u64, + ) -> BoxFuture>>>; +} diff --git a/sync/src/tasks/sync_find_ancestor_task.rs b/sync/src/tasks/sync_find_ancestor_task.rs new file mode 100644 index 0000000000..5206c2ef0c --- /dev/null +++ b/sync/src/tasks/sync_find_ancestor_task.rs @@ -0,0 +1,115 @@ +use anyhow::{format_err, Result}; +use futures::{future::BoxFuture, FutureExt}; +use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator}; +use starcoin_network_rpc_api::dag_protocol::{self, TargetDagAccumulatorLeaf}; +use starcoin_storage::{flexi_dag::SyncFlexiDagSnapshotStorage, storage::CodecKVStore}; +use std::sync::Arc; +use stream_task::{CollectorState, TaskResultCollector, TaskState}; + +use crate::verified_rpc_client::VerifiedRpcClient; + +#[derive(Clone)] +pub struct FindAncestorTask { + start_leaf_number: u64, + fetcher: Arc, + batch_size: u64, +} +impl FindAncestorTask { + pub(crate) fn new( + current_leaf_numeber: u64, + target_leaf_numeber: u64, + fetcher: Arc, + ) -> Self { + FindAncestorTask { + start_leaf_number: std::cmp::min(current_leaf_numeber, target_leaf_numeber), + fetcher, + batch_size: 3, + } + } +} + +impl TaskState for FindAncestorTask { + type Item = TargetDagAccumulatorLeaf; + + fn new_sub_task(self) -> BoxFuture<'static, Result>> { + async move { + let target_accumulator_leaves = self + .fetcher + .get_dag_accumulator_leaves(dag_protocol::GetDagAccumulatorLeaves { + accumulator_leaf_index: self.start_leaf_number, + batch_size: self.batch_size, + }) + .await?; + Ok(target_accumulator_leaves) + } + .boxed() + } + + fn next(&self) -> Option { + //this should never happen, because all node's genesis block should same. + if self.start_leaf_number == 0 { + return None; + } + + let next_number = self.start_leaf_number.saturating_sub(self.batch_size); + Some(Self { + start_leaf_number: next_number, + batch_size: self.batch_size, + fetcher: self.fetcher.clone(), + }) + } +} + +pub struct AncestorCollector { + accumulator: Arc, + ancestor: Option, + accumulator_snapshot: Arc, +} + +impl AncestorCollector { + pub fn new( + accumulator: Arc, + accumulator_snapshot: Arc, + ) -> Self { + Self { + accumulator, + ancestor: None, + accumulator_snapshot, + } + } +} + +impl TaskResultCollector for AncestorCollector { + type Output = AccumulatorInfo; + + fn collect(&mut self, item: TargetDagAccumulatorLeaf) -> anyhow::Result { + if self.ancestor.is_some() { + return Ok(CollectorState::Enough); + } + + let accumulator_leaf = self.accumulator.get_leaf(item.leaf_index)?.ok_or_else(|| { + format_err!( + "Cannot find accumulator leaf by number: {}", + item.leaf_index + ) + })?; + + let accumulator_info = match self.accumulator_snapshot.get(accumulator_leaf)? { + Some(snapshot) => snapshot.accumulator_info, + None => panic!("failed to get the snapshot, it is none."), + }; + + if item.accumulator_root == accumulator_info.accumulator_root { + self.ancestor = Some(accumulator_info); + return anyhow::Result::Ok(CollectorState::Enough); + } else { + Ok(CollectorState::Need) + } + } + + fn finish(mut self) -> Result { + self.ancestor + .take() + .ok_or_else(|| format_err!("Unexpect state, collector finished by ancestor is None")) + } +} diff --git a/sync/src/tasks/tests.rs b/sync/src/tasks/tests.rs index 06206f227e..88895a69c4 100644 --- a/sync/src/tasks/tests.rs +++ b/sync/src/tasks/tests.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 #![allow(clippy::integer_arithmetic)] +use crate::block_connector::{BlockConnectorService, CheckBlockConnectorHashValue}; use crate::tasks::block_sync_task::SyncBlockData; use crate::tasks::mock::{ErrorStrategy, MockBlockIdFetcher, SyncNodeMocker}; use crate::tasks::{ @@ -11,35 +12,43 @@ use crate::tasks::{ use crate::verified_rpc_client::RpcVerifyError; use anyhow::Context; use anyhow::{format_err, Result}; +use anyhow::{Context, Ok}; use futures::channel::mpsc::unbounded; use futures::future::BoxFuture; use futures::FutureExt; use futures_timer::Delay; use network_api::{PeerId, PeerInfo, PeerSelector, PeerStrategy}; use pin_utils::core_reexport::time::Duration; +use starcoin_account_api::AccountInfo; use starcoin_accumulator::accumulator_info::AccumulatorInfo; use starcoin_accumulator::tree_store::mock::MockAccumulatorStore; use starcoin_accumulator::{Accumulator, MerkleAccumulator}; use starcoin_chain::BlockChain; use starcoin_chain_api::ChainReader; use starcoin_chain_mock::MockChain; -use starcoin_config::{BuiltinNetworkID, ChainNetwork}; +use starcoin_config::{BuiltinNetworkID, ChainNetwork, NodeConfig}; use starcoin_crypto::HashValue; use starcoin_genesis::Genesis; +use starcoin_genesis::Genesis as StarcoinGenesis; use starcoin_logger::prelude::*; -use starcoin_storage::BlockStore; +use starcoin_service_registry::{RegistryAsyncService, RegistryService, ServiceRef}; +use starcoin_storage::{BlockStore, Storage}; use starcoin_sync_api::SyncTarget; +use starcoin_txpool_mock_service::MockTxPoolService; use starcoin_types::{ block::{Block, BlockBody, BlockHeaderBuilder, BlockIdAndNumber, BlockInfo}, U256, }; use std::collections::HashMap; use std::sync::{Arc, Mutex}; +use stest::actix_export::System; use stream_task::{ DefaultCustomErrorHandle, Generator, TaskError, TaskEventCounterHandle, TaskGenerator, }; use test_helper::DummyNetworkService; +use super::BlockConnectedEvent; + #[stest::test(timeout = 120)] pub async fn test_full_sync_new_node() -> Result<()> { let net1 = ChainNetwork::new_builtin(BuiltinNetworkID::Test); @@ -180,6 +189,7 @@ pub async fn test_failed_block() -> Result<()> { net.time_service(), chain_info.head().id(), storage.clone(), + net.id().clone(), None, )?; let (sender, _) = unbounded(); @@ -191,17 +201,22 @@ pub async fn test_failed_block() -> Result<()> { }; let mut block_collector = BlockCollector::new_with_handle( chain_info.status().info.clone(), - target, + Some(target), chain, sender, DummyNetworkService::default(), true, + HashValue::zero(), + None, ); let header = BlockHeaderBuilder::random().with_number(1).build(); let body = BlockBody::new(Vec::new(), None); let failed_block = Block::new(header, body); let failed_block_id = failed_block.id(); - if block_collector.apply_block_for_test(failed_block).is_err() { + if block_collector + .apply_block_for_test(failed_block, None, &mut None) + .is_err() + { assert!(storage.get_failed_block_by_id(failed_block_id)?.is_some()); Ok(()) } else { @@ -680,13 +695,29 @@ impl BlockFetcher for MockBlockFetcher { fn fetch_blocks( &self, block_ids: Vec, - ) -> BoxFuture)>>> { + ) -> BoxFuture< + Result< + Vec<( + Block, + Option, + Option>, + Option, + )>, + >, + > { let blocks = self.blocks.lock().unwrap(); - let result: Result)>> = block_ids + let result: Result< + Vec<( + Block, + Option, + Option>, + Option, + )>, + > = block_ids .iter() .map(|block_id| { if let Some(block) = blocks.get(block_id).cloned() { - Ok((block, None)) + Ok((block, None, None, None)) } else { Err(format_err!("Can not find block by id: {:?}", block_id)) } @@ -735,7 +766,7 @@ impl MockLocalBlockStore { ); self.store.lock().unwrap().insert( block.id(), - SyncBlockData::new(block.clone(), Some(block_info), None), + SyncBlockData::new(block.clone(), Some(block_info), None, None, 1, None), ); } } @@ -977,10 +1008,247 @@ async fn test_sync_target() { .unwrap() .unwrap(); let target = node2 - .get_better_target(genesis_chain_info.total_difficulty(), full_target, 10, 0) + .get_better_target(genesis_chain_info.total_difficulty(), full_target.0, 10, 0) .await .unwrap(); assert_eq!(target.peers.len(), 2); assert_eq!(target.target_id.number(), low_chain_info.head().number()); assert_eq!(target.target_id.id(), low_chain_info.head().id()); } + +fn sync_block_in_async_connection( + mut target_node: Arc, + local_node: Arc, + storage: Arc, + block_count: u64, +) -> Result> { + Arc::get_mut(&mut target_node) + .unwrap() + .produce_block(block_count)?; + let target = target_node.sync_target(); + let target_id = target.target_id.id(); + + let (sender, mut receiver) = futures::channel::mpsc::unbounded::(); + let thread_local_node = local_node.clone(); + + let process_block = move || { + let mut chain = MockChain::new_with_storage( + thread_local_node.chain_mocker.net().clone(), + storage.clone(), + thread_local_node.chain_mocker.head().status().head.id(), + thread_local_node.chain_mocker.miner().clone(), + ) + .unwrap(); + loop { + if let std::result::Result::Ok(result) = receiver.try_next() { + match result { + Some(event) => { + chain + .select_head(event.block) + .expect("select head must be successful"); + if event.feedback.is_some() { + event + .feedback + .unwrap() + .unbounded_send(super::BlockConnectedFinishEvent) + .unwrap(); + assert_eq!(target_id, chain.head().status().head.id()); + break; + } + } + None => break, + } + } + } + }; + let handle = std::thread::spawn(process_block); + + let current_block_header = local_node.chain().current_header(); + let storage = local_node.chain().get_storage(); + + let local_net = local_node.chain_mocker.net(); + let (local_ancestor_sender, _local_ancestor_receiver) = unbounded(); + + let (sync_task, _task_handle, task_event_counter) = full_sync_task( + current_block_header.id(), + target.clone(), + false, + local_net.time_service(), + storage.clone(), + sender, + target_node.clone(), + local_ancestor_sender, + DummyNetworkService::default(), + 15, + None, + None, + None, + )?; + let branch = async_std::task::block_on(sync_task)?; + assert_eq!(branch.current_header().id(), target.target_id.id()); + + handle.join().unwrap(); + + let reports = task_event_counter.get_reports(); + reports + .iter() + .for_each(|report| debug!("reports: {}", report)); + + Ok(target_node) +} + +#[stest::test] +async fn test_sync_block_in_async_connection() -> Result<()> { + let net = ChainNetwork::new_builtin(BuiltinNetworkID::Test); + let mut target_node = Arc::new(SyncNodeMocker::new(net.clone(), 1, 0)?); + + let (storage, chain_info, _) = + Genesis::init_storage_for_test(&net).expect("init storage by genesis fail."); + let local_node = Arc::new(SyncNodeMocker::new_with_storage( + net, + storage.clone(), + chain_info, + AccountInfo::random(), + 1, + 0, + )?); + + target_node = + sync_block_in_async_connection(target_node, local_node.clone(), storage.clone(), 10)?; + _ = sync_block_in_async_connection(target_node, local_node, storage, 20)?; + + Ok(()) +} + +fn sync_block_in_block_connection_service_mock( + mut target_node: Arc, + local_node: Arc, + registry: &ServiceRef, + block_count: u64, +) -> Result> { + Arc::get_mut(&mut target_node) + .unwrap() + .produce_block(block_count)?; + loop { + let target = target_node.sync_target(); + + let storage = local_node.chain().get_storage(); + let startup_info = storage + .get_startup_info()? + .ok_or_else(|| format_err!("Startup info should exist."))?; + let current_block_id = startup_info.main; + + let local_net = local_node.chain_mocker.net(); + let (local_ancestor_sender, _local_ancestor_receiver) = unbounded(); + + let (sync_task, _task_handle, task_event_counter) = full_sync_task( + current_block_id, + target.clone(), + false, + local_net.time_service(), + storage.clone(), + async_std::task::block_on( + registry.service_ref::>(), + )? + .clone(), + target_node.clone(), + local_ancestor_sender, + DummyNetworkService::default(), + 15, + None, + None, + None, + )?; + let branch = async_std::task::block_on(sync_task)?; + info!("checking branch in sync service is the same as target's branch"); + assert_eq!(branch.current_header().id(), target.target_id.id()); + + let block_connector_service = async_std::task::block_on( + registry.service_ref::>(), + )? + .clone(); + let result = async_std::task::block_on(block_connector_service.send( + CheckBlockConnectorHashValue { + head_hash: target.target_id.id(), + }, + ))?; + if result.is_ok() { + break; + } + let reports = task_event_counter.get_reports(); + reports + .iter() + .for_each(|report| debug!("reports: {}", report)); + } + + Ok(target_node) +} + +#[stest::test] +async fn test_sync_block_apply_failed_but_connect_success() -> Result<()> { + let config = Arc::new(NodeConfig::random_for_test()); + let (storage, chain_info, _) = StarcoinGenesis::init_storage_for_test(config.net()) + .expect("init storage by genesis fail."); + + let target_node = Arc::new(SyncNodeMocker::new(config.net().clone(), 1, 0)?); + let local_node = Arc::new(SyncNodeMocker::new_with_storage( + config.net().clone(), + storage.clone(), + chain_info.clone(), + AccountInfo::random(), + 1, + 0, + )?); + + let (registry_sender, registry_receiver) = async_std::channel::unbounded(); + + info!( + "in test_sync_block_apply_failed_but_connect_success, start tokio runtime for main thread" + ); + + let _handle = timeout_join_handler::spawn(move || { + let system = System::with_tokio_rt(|| { + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .on_thread_stop(|| debug!("main thread stopped")) + .thread_name("main") + .build() + .expect("failed to create tokio runtime for main") + }); + async_std::task::block_on(async { + let registry = RegistryService::launch(); + + registry.put_shared(config.clone()).await.unwrap(); + registry.put_shared(storage.clone()).await.unwrap(); + registry.put_shared(MockTxPoolService::new()).await.unwrap(); + + Delay::new(Duration::from_secs(2)).await; + + registry + .register::>() + .await + .unwrap(); + + registry_sender.send(registry).await.unwrap(); + }); + + system.run().unwrap(); + }); + + let registry = registry_receiver.recv().await.unwrap(); + + let target_node = sync_block_in_block_connection_service_mock( + target_node, + local_node.clone(), + ®istry, + 10, + )?; + _ = sync_block_in_block_connection_service_mock( + target_node, + local_node.clone(), + ®istry, + 20, + )?; + + Ok(()) +} diff --git a/sync/src/verified_rpc_client.rs b/sync/src/verified_rpc_client.rs index fc4bc6f8f5..265fdba26c 100644 --- a/sync/src/verified_rpc_client.rs +++ b/sync/src/verified_rpc_client.rs @@ -10,6 +10,7 @@ use starcoin_accumulator::node::AccumulatorStoreType; use starcoin_accumulator::AccumulatorNode; use starcoin_crypto::hash::HashValue; use starcoin_logger::prelude::*; +use starcoin_network_rpc_api::dag_protocol; use starcoin_network_rpc_api::{ gen_client::NetworkRpcClient, BlockBody, GetAccumulatorNodeByNodeHash, GetBlockHeadersByNumber, GetBlockIds, GetTxnsWithHash, RawRpcClient, @@ -383,8 +384,7 @@ impl VerifiedRpcClient { ) -> Result)>>> { let peer_id = self.select_a_peer()?; let start_time = Instant::now(); - let blocks: Vec> = - self.client.get_blocks(peer_id.clone(), ids.clone()).await?; + let blocks = self.client.get_blocks(peer_id.clone(), ids.clone()).await?; let time = (Instant::now() .saturating_duration_since(start_time) .as_millis()) as u32; @@ -395,7 +395,7 @@ impl VerifiedRpcClient { .zip(blocks) .map(|(id, block)| { if let Some(block) = block { - let actual_id = block.id(); + let actual_id = block.0.id(); if actual_id != id { warn!( "Get block by id: {:?} from peer: {:?}, but got block: {:?}", @@ -403,7 +403,7 @@ impl VerifiedRpcClient { ); None } else { - Some((block, Some(peer_id.clone()))) + Some((block.0, Some(peer_id.clone()))) } } else { None @@ -411,4 +411,36 @@ impl VerifiedRpcClient { }) .collect()) } + + pub async fn get_dag_accumulator_leaves( + &self, + req: dag_protocol::GetDagAccumulatorLeaves, + ) -> Result> { + let peer_id = self.select_a_peer()?; + self.client.get_dag_accumulator_leaves(peer_id, req).await + } + + pub async fn get_accumulator_leaf_detail( + &self, + req: dag_protocol::GetTargetDagAccumulatorLeafDetail, + ) -> Result>> { + let peer_id = self.select_a_peer()?; + match self.client.get_accumulator_leaf_detail(peer_id, req).await { + Ok(result) => Ok(result), + Err(error) => { + warn!( + "get_accumulator_leaf_detail return None, error: {}", + error.to_string() + ); + Ok(None) + } + } + } + + pub async fn get_dag_block_info( + &self, + _req: dag_protocol::GetSyncDagBlockInfo, + ) -> Result>> { + todo!() + } } diff --git a/test-helper/src/chain.rs b/test-helper/src/chain.rs index ba337c327b..d00457db33 100644 --- a/test-helper/src/chain.rs +++ b/test-helper/src/chain.rs @@ -13,7 +13,13 @@ pub fn gen_blockchain_for_test(net: &ChainNetwork) -> Result { let (storage, chain_info, _) = Genesis::init_storage_for_test(net).expect("init storage by genesis fail."); - let block_chain = BlockChain::new(net.time_service(), chain_info.head().id(), storage, None)?; + let block_chain = BlockChain::new( + net.time_service(), + chain_info.head().id(), + storage, + net.id().clone(), + None, + )?; Ok(block_chain) } @@ -27,7 +33,7 @@ pub fn gen_blockchain_with_blocks_for_test(count: u64, net: &ChainNetwork) -> Re let block = block_chain .consensus() .create_block(block_template, net.time_service().as_ref())?; - block_chain.apply(block)?; + block_chain.apply(block, None)?; } Ok(block_chain) diff --git a/test-helper/src/network.rs b/test-helper/src/network.rs index 2e5faea961..d8b5c0cf88 100644 --- a/test-helper/src/network.rs +++ b/test-helper/src/network.rs @@ -16,7 +16,7 @@ use starcoin_service_registry::{ RegistryAsyncService, RegistryService, ServiceContext, ServiceFactory, ServiceRef, }; use starcoin_storage::block_info::BlockInfoStore; -use starcoin_storage::{BlockStore, Storage}; +use starcoin_storage::{BlockStore, Storage, SyncFlexiDagStore}; use starcoin_types::startup_info::{ChainInfo, ChainStatus}; use std::any::Any; use std::borrow::Cow; @@ -184,7 +184,6 @@ impl ServiceFactory for MockNetworkServiceFactory { let peer_message_handle = MockPeerMessageHandler::default(); let config = ctx.get_shared::>()?; let storage = ctx.get_shared::>()?; - let genesis_hash = genesis.block().header().id(); let startup_info = storage.get_startup_info()?.unwrap(); let head_block_hash = startup_info.main; @@ -194,15 +193,25 @@ impl ServiceFactory for MockNetworkServiceFactory { let head_block_info = storage .get_block_info(head_block_hash)? .ok_or_else(|| format_err!("can't get block info by hash {}", head_block_hash))?; - let chain_status = ChainStatus::new(head_block_header, head_block_info); - let chain_info = - ChainInfo::new(config.net().chain_id(), genesis_hash, chain_status.clone()); + let dag_tips = storage.get_tips_by_block_id(head_block_hash)?; + let chain_status = + ChainStatus::new(head_block_header.clone(), head_block_info); + let (dag_accumulator_info, k_total_difficulties) = storage.get_lastest_snapshot()?.map(|snapshot| { + (Some(snapshot.accumulator_info), Some(snapshot.k_total_difficulties)) + }).unwrap_or((None, None)); + let chain_state_info = ChainInfo::new( + config.net().chain_id(), + genesis_hash, + chain_status.clone(), + dag_accumulator_info.clone(), + k_total_difficulties, + ); let actor_service = - NetworkActorService::new(config, chain_info, rpc, peer_message_handle.clone())?; + NetworkActorService::new(config, chain_state_info, rpc, peer_message_handle.clone())?; let network_service = actor_service.network_service(); let network_async_service = NetworkServiceRef::new(network_service, ctx.self_ref()); // set self sync status to synced for test. - let mut sync_status = SyncStatus::new(chain_status); + let mut sync_status = SyncStatus::new(chain_status, dag_accumulator_info); sync_status.sync_done(); ctx.notify(SyncStatusChangeEvent(sync_status)); diff --git a/test-helper/src/txn.rs b/test-helper/src/txn.rs index 10a419487a..cd0668a0fc 100644 --- a/test-helper/src/txn.rs +++ b/test-helper/src/txn.rs @@ -111,7 +111,7 @@ pub fn create_account_txn_sent_as_association( seq_num: u64, initial_amount: u128, expiration_timstamp_secs: u64, - net: &ChainNetwork, + net: &starcoin_config::ChainNetwork, ) -> SignedUserTransaction { let args = vec![ bcs_ext::to_bytes(new_account.address()).unwrap(), diff --git a/txpool/src/test.rs b/txpool/src/test.rs index e205b388e6..a25ae23841 100644 --- a/txpool/src/test.rs +++ b/txpool/src/test.rs @@ -227,6 +227,7 @@ async fn test_rollback() -> Result<()> { U256::from(1024u64), config.net().genesis_config().consensus(), None, + None, )?; let excluded_txns = open_block.push_txns(vec![txn])?; assert_eq!(excluded_txns.discarded_txns.len(), 0); @@ -257,7 +258,9 @@ async fn test_rollback() -> Result<()> { .unwrap(); txns.insert( 0, - Transaction::BlockMetadata(enacted_block.to_metadata(parent_block_header.gas_used())), + Transaction::BlockMetadata( + enacted_block.to_metadata(parent_block_header.gas_used(), None), + ), ); let root = starcoin_executor::block_execute(&chain_state, txns, u64::MAX, None)?.state_root; diff --git a/types/src/block.rs b/types/src/block.rs index 45704fa069..1b091ab305 100644 --- a/types/src/block.rs +++ b/types/src/block.rs @@ -7,7 +7,7 @@ use crate::genesis_config::{ChainId, ConsensusStrategy}; use crate::language_storage::CORE_CODE_ADDRESS; use crate::transaction::SignedUserTransaction; use crate::U256; -use bcs_ext::Sample; +use bcs_ext::{BCSCodec, Sample}; use schemars::{self, JsonSchema}; use serde::de::Error; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -20,9 +20,13 @@ use starcoin_crypto::{ use starcoin_vm_types::account_config::genesis_address; use starcoin_vm_types::transaction::authenticator::AuthenticationKey; use std::fmt::Formatter; +use std::hash::Hash; + /// Type for block number. pub type BlockNumber = u64; +pub type ParentsHash = Option>; + /// Type for block header extra #[derive(Clone, Default, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, JsonSchema)] pub struct BlockHeaderExtra(#[schemars(with = "String")] [u8; 4]); @@ -152,6 +156,9 @@ pub struct BlockHeader { nonce: u32, /// block header extra extra: BlockHeaderExtra, + /// Parents hash. + #[serde(skip_serializing_if = "Option::is_none")] + parents_hash: ParentsHash, } impl BlockHeader { @@ -169,6 +176,7 @@ impl BlockHeader { chain_id: ChainId, nonce: u32, extra: BlockHeaderExtra, + parents_hash: ParentsHash, ) -> BlockHeader { Self::new_with_auth_key( parent_hash, @@ -185,6 +193,7 @@ impl BlockHeader { chain_id, nonce, extra, + parents_hash, ) } @@ -204,6 +213,7 @@ impl BlockHeader { chain_id: ChainId, nonce: u32, extra: BlockHeaderExtra, + parents_hash: ParentsHash, ) -> BlockHeader { let mut header = BlockHeader { id: None, @@ -221,6 +231,7 @@ impl BlockHeader { body_hash, chain_id, extra, + parents_hash, }; header.id = Some(header.crypto_hash()); header @@ -247,6 +258,9 @@ impl BlockHeader { self.parent_hash } + pub fn parents_hash(&self) -> ParentsHash { + self.parents_hash.clone() + } pub fn timestamp(&self) -> u64 { self.timestamp } @@ -326,6 +340,7 @@ impl BlockHeader { chain_id, 0, BlockHeaderExtra::default(), + None, ) } @@ -344,6 +359,7 @@ impl BlockHeader { ChainId::test(), 0, BlockHeaderExtra([0u8; 4]), + None, ) } @@ -374,6 +390,7 @@ impl<'de> Deserialize<'de> for BlockHeader { chain_id: ChainId, nonce: u32, extra: BlockHeaderExtra, + parents_hash: ParentsHash, } let header_data = BlockHeaderData::deserialize(deserializer)?; @@ -392,6 +409,7 @@ impl<'de> Deserialize<'de> for BlockHeader { header_data.chain_id, header_data.nonce, header_data.extra, + header_data.parents_hash, ); Ok(block_header) } @@ -413,6 +431,7 @@ impl Default for BlockHeader { ChainId::test(), 0, BlockHeaderExtra([0u8; 4]), + None, ) } } @@ -433,6 +452,7 @@ impl Sample for BlockHeader { ChainId::test(), 0, BlockHeaderExtra([0u8; 4]), + None, ) } } @@ -453,6 +473,7 @@ impl Into for BlockHeader { difficulty: self.difficulty, body_hash: self.body_hash, chain_id: self.chain_id, + parents_hash: self.parents_hash, } } } @@ -484,6 +505,8 @@ pub struct RawBlockHeader { pub body_hash: HashValue, /// The chain id pub chain_id: ChainId, + /// parents hash + pub parents_hash: ParentsHash, } #[derive(Default)] @@ -724,7 +747,6 @@ impl Block { .as_ref() .map(|uncles| uncles.len() as u64) .unwrap_or(0); - BlockMetadata::new( self.header.parent_hash(), self.header.timestamp, @@ -822,6 +844,8 @@ impl BlockInfo { pub fn block_id(&self) -> &HashValue { &self.block_id } + + } impl Sample for BlockInfo { @@ -863,6 +887,8 @@ pub struct BlockTemplate { pub difficulty: U256, /// Block consensus strategy pub strategy: ConsensusStrategy, + /// parents + pub parents_hash: ParentsHash, } impl BlockTemplate { @@ -876,6 +902,7 @@ impl BlockTemplate { difficulty: U256, strategy: ConsensusStrategy, block_metadata: BlockMetadata, + parents_hash: ParentsHash, ) -> Self { let (parent_hash, timestamp, author, _author_auth_key, _, number, _, _) = block_metadata.into_inner(); @@ -893,6 +920,7 @@ impl BlockTemplate { chain_id, difficulty, strategy, + parents_hash, } } @@ -911,6 +939,7 @@ impl BlockTemplate { self.chain_id, nonce, extra, + self.parents_hash, ); Block { header, @@ -918,6 +947,56 @@ impl BlockTemplate { } } + pub fn into_single_chain_block(self, nonce: u32, extra: BlockHeaderExtra) -> Block { + let header = BlockHeader::new( + self.parent_hash, + self.timestamp, + self.number, + self.author, + self.txn_accumulator_root, + self.block_accumulator_root, + self.state_root, + self.gas_used, + self.difficulty, + self.body_hash, + self.chain_id, + nonce, + extra, + None, + ); + Block { + header, + body: self.body, + } + } + + fn generate_parent_header(&self) -> HashValue { + if self.parents_hash.is_none() { + return self.parent_hash; + } + let mut tips = self.parents_hash.as_ref().unwrap().clone(); + tips.sort(); + HashValue::sha3_256_of(&tips.encode().expect("dag parent must encode successfully")) + } + + pub fn as_raw_block_header_single_chain(&self) -> RawBlockHeader { + RawBlockHeader { + parent_hash: self.parent_hash, + timestamp: self.timestamp, + number: self.number, + author: self.author, + author_auth_key: None, + accumulator_root: self.txn_accumulator_root, + parent_block_accumulator_root: self.block_accumulator_root, + state_root: self.state_root, + gas_used: self.gas_used, + body_hash: self.body_hash, + difficulty: self.difficulty, + chain_id: self.chain_id, + parents_hash: self.parents_hash.clone(), + } + } + pub fn as_raw_block_header(&self) -> RawBlockHeader { RawBlockHeader { parent_hash: self.parent_hash, @@ -932,9 +1011,24 @@ impl BlockTemplate { body_hash: self.body_hash, difficulty: self.difficulty, chain_id: self.chain_id, + parents_hash: self.parents_hash.clone(), } } + pub fn as_pow_header_blob_single_chain(&self) -> Vec { + let mut blob = Vec::new(); + let raw_header = self.as_raw_block_header_single_chain(); + let raw_header_hash = raw_header.crypto_hash(); + let mut dh = [0u8; 32]; + raw_header.difficulty.to_big_endian(&mut dh); + let extend_and_nonce = [0u8; 12]; + blob.extend_from_slice(raw_header_hash.to_vec().as_slice()); + blob.extend_from_slice(&extend_and_nonce); + blob.extend_from_slice(&dh); + + blob + } + pub fn as_pow_header_blob(&self) -> Vec { let mut blob = Vec::new(); let raw_header = self.as_raw_block_header(); @@ -942,10 +1036,10 @@ impl BlockTemplate { let mut dh = [0u8; 32]; raw_header.difficulty.to_big_endian(&mut dh); let extend_and_nonce = [0u8; 12]; - blob.extend_from_slice(raw_header_hash.to_vec().as_slice()); blob.extend_from_slice(&extend_and_nonce); blob.extend_from_slice(&dh); + blob } @@ -964,6 +1058,7 @@ impl BlockTemplate { self.chain_id, nonce, extra, + self.parents_hash, ) } } diff --git a/types/src/blockhash.rs b/types/src/blockhash.rs new file mode 100644 index 0000000000..f283d0f387 --- /dev/null +++ b/types/src/blockhash.rs @@ -0,0 +1,71 @@ +use starcoin_crypto::hash::HashValue; +use std::collections::{HashMap, HashSet}; + +pub const BLOCK_VERSION: u16 = 1; + +pub const HASH_LENGTH: usize = HashValue::LENGTH; + +use std::sync::Arc; + +pub type BlockHashes = Arc>; + +/// `blockhash::NONE` is a hash which is used in rare cases as the `None` block hash +pub const NONE: [u8; HASH_LENGTH] = [0u8; HASH_LENGTH]; + +/// `blockhash::VIRTUAL` is a special hash representing the `virtual` block. +pub const VIRTUAL: [u8; HASH_LENGTH] = [0xff; HASH_LENGTH]; + +/// `blockhash::ORIGIN` is a special hash representing a `virtual genesis` block. +/// It serves as a special local block which all locally-known +/// blocks are in its future. +pub const ORIGIN: [u8; HASH_LENGTH] = [0xfe; HASH_LENGTH]; + +pub trait BlockHashExtensions { + fn is_none(&self) -> bool; + fn is_virtual(&self) -> bool; + fn is_origin(&self) -> bool; +} + +impl BlockHashExtensions for HashValue { + fn is_none(&self) -> bool { + self.eq(&HashValue::new(NONE)) + } + + fn is_virtual(&self) -> bool { + self.eq(&HashValue::new(VIRTUAL)) + } + + fn is_origin(&self) -> bool { + self.eq(&HashValue::new(ORIGIN)) + } +} + +/// Generates a unique block hash for each call to this function. +/// To be used for test purposes only. +pub fn new_unique() -> HashValue { + use std::sync::atomic::{AtomicU64, Ordering}; + static COUNTER: AtomicU64 = AtomicU64::new(1); + let c = COUNTER.fetch_add(1, Ordering::Relaxed); + HashValue::from_u64(c) +} + +/// TODO:FIXME as u256 +pub type BlueWorkType = u128; + +/// The type used to represent the GHOSTDAG K parameter +pub type KType = u16; + +/// Map from Block hash to K type +pub type HashKTypeMap = std::sync::Arc>; + +pub type BlockHashMap = HashMap; + +/// Same as `BlockHashMap` but a `HashSet`. +pub type BlockHashSet = HashSet; + +pub struct ChainPath { + pub added: Vec, + pub removed: Vec, +} + +pub type BlockLevel = u8; diff --git a/types/src/dag_block.rs b/types/src/dag_block.rs new file mode 100644 index 0000000000..672b728850 --- /dev/null +++ b/types/src/dag_block.rs @@ -0,0 +1,964 @@ +// Copyright (c) The Starcoin Core Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::account_address::AccountAddress; +use crate::block::BlockHeaderExtra; +use crate::blockhash::ORIGIN; +use crate::genesis_config::{ChainId, ConsensusStrategy}; +use crate::language_storage::CORE_CODE_ADDRESS; +use crate::transaction::SignedUserTransaction; +use crate::U256; +use bcs_ext::Sample; +use schemars::{self, JsonSchema}; +use serde::{Deserialize, Deserializer, Serialize}; +pub use starcoin_accumulator::accumulator_info::AccumulatorInfo; +use starcoin_crypto::hash::{ACCUMULATOR_PLACEHOLDER_HASH, SPARSE_MERKLE_PLACEHOLDER_HASH}; +use starcoin_crypto::{ + hash::{CryptoHash, CryptoHasher, PlainCryptoHash}, + HashValue, +}; +use starcoin_vm_types::account_config::genesis_address; +use starcoin_vm_types::dag_block_metadata::DagBlockMetadata; +use starcoin_vm_types::transaction::authenticator::AuthenticationKey; +use std::fmt::Formatter; + +/// block timestamp allowed future times +pub const ALLOWED_FUTURE_BLOCKTIME: u64 = 30000; // 30 second; + +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, CryptoHasher, CryptoHash, JsonSchema)] +pub struct DagBlockHeader { + #[serde(skip)] + id: Option, + /// Parent hash. + parent_hash: Vec, + /// Block timestamp. + timestamp: u64, + /// Block author. + author: AccountAddress, + /// Block author auth key. + /// this field is deprecated + author_auth_key: Option, + /// The transaction accumulator root hash after executing this block. + txn_accumulator_root: HashValue, + /// The parent block info's block accumulator root hash. + block_accumulator_root: HashValue, + /// The last transaction state_root of this block after execute. + state_root: HashValue, + /// Gas used for contracts execution. + gas_used: u64, + /// Block difficulty + #[schemars(with = "String")] + difficulty: U256, + /// hash for block body + body_hash: HashValue, + /// The chain id + chain_id: ChainId, + /// Consensus nonce field. + nonce: u32, + /// block header extra + extra: BlockHeaderExtra, +} + +impl DagBlockHeader { + pub fn new( + parent_hash: Vec, + timestamp: u64, + author: AccountAddress, + txn_accumulator_root: HashValue, + block_accumulator_root: HashValue, + state_root: HashValue, + gas_used: u64, + difficulty: U256, + body_hash: HashValue, + chain_id: ChainId, + nonce: u32, + extra: BlockHeaderExtra, + ) -> DagBlockHeader { + Self::new_with_auth_key( + parent_hash, + timestamp, + author, + None, + txn_accumulator_root, + block_accumulator_root, + state_root, + gas_used, + difficulty, + body_hash, + chain_id, + nonce, + extra, + ) + } + + // the author_auth_key field is deprecated, but keep this fn for compat with old block. + fn new_with_auth_key( + parent_hash: Vec, + timestamp: u64, + author: AccountAddress, + author_auth_key: Option, + txn_accumulator_root: HashValue, + block_accumulator_root: HashValue, + state_root: HashValue, + gas_used: u64, + difficulty: U256, + body_hash: HashValue, + chain_id: ChainId, + nonce: u32, + extra: BlockHeaderExtra, + ) -> DagBlockHeader { + let mut header = DagBlockHeader { + id: None, + parent_hash, + block_accumulator_root, + timestamp, + author, + author_auth_key, + txn_accumulator_root, + state_root, + gas_used, + difficulty, + nonce, + body_hash, + chain_id, + extra, + }; + header.id = Some(header.crypto_hash()); + header + } + + pub fn as_pow_header_blob(&self) -> Vec { + let mut blob = Vec::new(); + let raw_header: RawDagBlockHeader = self.to_owned().into(); + let raw_header_hash = raw_header.crypto_hash(); + let mut diff = [0u8; 32]; + raw_header.difficulty.to_big_endian(&mut diff); + let extend_and_nonce = [0u8; 12]; + blob.extend_from_slice(raw_header_hash.to_vec().as_slice()); + blob.extend_from_slice(&extend_and_nonce); + blob.extend_from_slice(&diff); + blob + } + + pub fn id(&self) -> HashValue { + self.id + .expect("DagBlockHeader id should be Some after init.") + } + + pub fn parent_hash(&self) -> Vec { + self.parent_hash.clone() + } + + pub fn timestamp(&self) -> u64 { + self.timestamp + } + + pub fn author(&self) -> AccountAddress { + self.author + } + + pub fn author_auth_key(&self) -> Option { + self.author_auth_key + } + + pub fn txn_accumulator_root(&self) -> HashValue { + self.txn_accumulator_root + } + + pub fn state_root(&self) -> HashValue { + self.state_root + } + + pub fn gas_used(&self) -> u64 { + self.gas_used + } + + pub fn nonce(&self) -> u32 { + self.nonce + } + + pub fn difficulty(&self) -> U256 { + self.difficulty + } + + pub fn block_accumulator_root(&self) -> HashValue { + self.block_accumulator_root + } + + pub fn body_hash(&self) -> HashValue { + self.body_hash + } + + pub fn chain_id(&self) -> ChainId { + self.chain_id + } + + pub fn extra(&self) -> &BlockHeaderExtra { + &self.extra + } + + pub fn is_genesis(&self) -> bool { + if self.parent_hash.len() == 1 { + return self.parent_hash[0] == HashValue::new(ORIGIN); + } + false + } + + pub fn genesis_block_header( + parent_hash: Vec, + timestamp: u64, + txn_accumulator_root: HashValue, + state_root: HashValue, + difficulty: U256, + body_hash: HashValue, + chain_id: ChainId, + ) -> Self { + Self::new( + parent_hash, + timestamp, + CORE_CODE_ADDRESS, + txn_accumulator_root, + *ACCUMULATOR_PLACEHOLDER_HASH, + state_root, + 0, + difficulty, + body_hash, + chain_id, + 0, + BlockHeaderExtra::default(), + ) + } + + pub fn random() -> Self { + Self::new( + vec![HashValue::random()], + rand::random(), + AccountAddress::random(), + HashValue::random(), + HashValue::random(), + HashValue::random(), + rand::random(), + U256::max_value(), + HashValue::random(), + ChainId::test(), + 0, + BlockHeaderExtra::new([0u8; 4]), + ) + } + + pub fn as_builder(&self) -> DagBlockHeaderBuilder { + DagBlockHeaderBuilder::new_with(self.clone()) + } +} + +impl<'de> Deserialize<'de> for DagBlockHeader { + fn deserialize(deserializer: D) -> Result>::Error> + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(rename = "DagBlockHeader")] + struct DagBlockHeaderData { + parent_hash: Vec, + timestamp: u64, + author: AccountAddress, + author_auth_key: Option, + txn_accumulator_root: HashValue, + block_accumulator_root: HashValue, + state_root: HashValue, + gas_used: u64, + difficulty: U256, + body_hash: HashValue, + chain_id: ChainId, + nonce: u32, + extra: BlockHeaderExtra, + } + + let header_data = DagBlockHeaderData::deserialize(deserializer)?; + let block_header = Self::new_with_auth_key( + header_data.parent_hash, + header_data.timestamp, + header_data.author, + header_data.author_auth_key, + header_data.txn_accumulator_root, + header_data.block_accumulator_root, + header_data.state_root, + header_data.gas_used, + header_data.difficulty, + header_data.body_hash, + header_data.chain_id, + header_data.nonce, + header_data.extra, + ); + Ok(block_header) + } +} + +impl Default for DagBlockHeader { + fn default() -> Self { + Self::new( + vec![HashValue::zero()], + 0, + AccountAddress::ZERO, + HashValue::zero(), + HashValue::zero(), + HashValue::zero(), + 0, + 0.into(), + HashValue::zero(), + ChainId::test(), + 0, + BlockHeaderExtra::new([0u8; 4]), + ) + } +} + +impl Sample for DagBlockHeader { + fn sample() -> Self { + Self::new( + vec![HashValue::zero()], + 1610110515000, + genesis_address(), + *ACCUMULATOR_PLACEHOLDER_HASH, + *ACCUMULATOR_PLACEHOLDER_HASH, + *SPARSE_MERKLE_PLACEHOLDER_HASH, + 0, + U256::from(1), + BlockBody::sample().crypto_hash(), + ChainId::test(), + 0, + BlockHeaderExtra::new([0u8; 4]), + ) + } +} + +#[allow(clippy::from_over_into)] +impl Into for DagBlockHeader { + fn into(self) -> RawDagBlockHeader { + RawDagBlockHeader { + parent_hash: self.parent_hash, + timestamp: self.timestamp, + author: self.author, + author_auth_key: self.author_auth_key, + accumulator_root: self.txn_accumulator_root, + parent_block_accumulator_root: self.block_accumulator_root, + state_root: self.state_root, + gas_used: self.gas_used, + difficulty: self.difficulty, + body_hash: self.body_hash, + chain_id: self.chain_id, + } + } +} + +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] +pub struct RawDagBlockHeader { + /// Parent hash. + pub parent_hash: Vec, + /// Block timestamp. + pub timestamp: u64, + /// Block author. + pub author: AccountAddress, + /// Block author auth key. + /// this field is deprecated + pub author_auth_key: Option, + /// The transaction accumulator root hash after executing this block. + pub accumulator_root: HashValue, + /// The parent block accumulator root hash. + pub parent_block_accumulator_root: HashValue, + /// The last transaction state_root of this block after execute. + pub state_root: HashValue, + /// Gas used for contracts execution. + pub gas_used: u64, + /// Block difficulty + pub difficulty: U256, + /// hash for block body + pub body_hash: HashValue, + /// The chain id + pub chain_id: ChainId, +} + +#[derive(Default)] +pub struct DagBlockHeaderBuilder { + buffer: DagBlockHeader, +} + +impl DagBlockHeaderBuilder { + pub fn new() -> Self { + Self::default() + } + + pub fn random() -> Self { + Self { + buffer: DagBlockHeader::random(), + } + } + + fn new_with(buffer: DagBlockHeader) -> Self { + Self { buffer } + } + + pub fn with_parent_hash(mut self, parent_hash: Vec) -> Self { + self.buffer.parent_hash = parent_hash; + self + } + + pub fn with_timestamp(mut self, timestamp: u64) -> Self { + self.buffer.timestamp = timestamp; + self + } + + pub fn with_author(mut self, author: AccountAddress) -> Self { + self.buffer.author = author; + self + } + + pub fn with_author_auth_key(mut self, author_auth_key: Option) -> Self { + self.buffer.author_auth_key = author_auth_key; + self + } + + pub fn with_accumulator_root(mut self, accumulator_root: HashValue) -> Self { + self.buffer.txn_accumulator_root = accumulator_root; + self + } + + pub fn with_parent_block_accumulator_root( + mut self, + parent_block_accumulator_root: HashValue, + ) -> Self { + self.buffer.block_accumulator_root = parent_block_accumulator_root; + self + } + + pub fn with_state_root(mut self, state_root: HashValue) -> Self { + self.buffer.state_root = state_root; + self + } + + pub fn with_gas_used(mut self, gas_used: u64) -> Self { + self.buffer.gas_used = gas_used; + self + } + + pub fn with_difficulty(mut self, difficulty: U256) -> Self { + self.buffer.difficulty = difficulty; + self + } + + pub fn with_body_hash(mut self, body_hash: HashValue) -> Self { + self.buffer.body_hash = body_hash; + self + } + + pub fn with_chain_id(mut self, chain_id: ChainId) -> Self { + self.buffer.chain_id = chain_id; + self + } + + pub fn with_nonce(mut self, nonce: u32) -> Self { + self.buffer.nonce = nonce; + self + } + + pub fn with_extra(mut self, extra: BlockHeaderExtra) -> Self { + self.buffer.extra = extra; + self + } + + pub fn build(mut self) -> DagBlockHeader { + self.buffer.id = Some(self.buffer.crypto_hash()); + self.buffer + } +} + +#[derive( + Default, Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash, +)] +pub struct BlockBody { + /// The transactions in this block. + pub transactions: Vec, + /// uncles block header + pub uncles: Option>, +} + +impl BlockBody { + pub fn new( + transactions: Vec, + uncles: Option>, + ) -> Self { + Self { + transactions, + uncles, + } + } + pub fn get_txn(&self, index: usize) -> Option<&SignedUserTransaction> { + self.transactions.get(index) + } + + /// Just for test + pub fn new_empty() -> BlockBody { + BlockBody { + transactions: Vec::new(), + uncles: None, + } + } + + pub fn hash(&self) -> HashValue { + self.crypto_hash() + } +} + +#[allow(clippy::from_over_into)] +impl Into for Vec { + fn into(self) -> BlockBody { + BlockBody { + transactions: self, + uncles: None, + } + } +} + +#[allow(clippy::from_over_into)] +impl Into> for BlockBody { + fn into(self) -> Vec { + self.transactions + } +} + +impl Sample for BlockBody { + fn sample() -> Self { + Self { + transactions: vec![], + uncles: None, + } + } +} + +/// A block, encoded as it is on the block chain. +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash)] +pub struct Block { + /// The header of this block. + pub header: DagBlockHeader, + /// The body of this block. + pub body: BlockBody, +} + +impl Block { + pub fn new(header: DagBlockHeader, body: B) -> Self + where + B: Into, + { + Block { + header, + body: body.into(), + } + } + + pub fn id(&self) -> HashValue { + self.header.id() + } + pub fn header(&self) -> &DagBlockHeader { + &self.header + } + pub fn transactions(&self) -> &[SignedUserTransaction] { + self.body.transactions.as_slice() + } + + pub fn uncles(&self) -> Option<&[DagBlockHeader]> { + match &self.body.uncles { + Some(uncles) => Some(uncles.as_slice()), + None => None, + } + } + + pub fn uncle_ids(&self) -> Vec { + self.uncles() + .map(|uncles| uncles.iter().map(|header| header.id()).collect()) + .unwrap_or_default() + } + + pub fn into_inner(self) -> (DagBlockHeader, BlockBody) { + (self.header, self.body) + } + + pub fn genesis_block( + parent_hash: Vec, + timestamp: u64, + accumulator_root: HashValue, + state_root: HashValue, + difficulty: U256, + genesis_txn: SignedUserTransaction, + ) -> Self { + let chain_id = genesis_txn.chain_id(); + let block_body = BlockBody::new(vec![genesis_txn], None); + let header = DagBlockHeader::genesis_block_header( + parent_hash, + timestamp, + accumulator_root, + state_root, + difficulty, + block_body.hash(), + chain_id, + ); + Self { + header, + body: block_body, + } + } + + pub fn to_metadata(&self, parent_gas_used: u64) -> DagBlockMetadata { + DagBlockMetadata::new( + self.header.parent_hash(), + self.header.timestamp, + self.header.author, + self.header.author_auth_key, + self.header.chain_id, + parent_gas_used, + ) + } +} + +impl std::fmt::Display for Block { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Block{{id:\"{}\", parent_id:\"{:?}\",", + self.id(), + self.header().parent_hash() + )?; + if let Some(uncles) = &self.body.uncles { + write!(f, "uncles:[")?; + for uncle in uncles { + write!(f, "\"{}\",", uncle.id())?; + } + write!(f, "],")?; + } + write!(f, "transactions:[")?; + for txn in &self.body.transactions { + write!(f, "\"{}\",", txn.id())?; + } + write!(f, "]}}") + } +} + +impl Sample for Block { + fn sample() -> Self { + Self { + header: DagBlockHeader::sample(), + body: BlockBody::sample(), + } + } +} + +/// `BlockInfo` is the object we store in the storage. It consists of the +/// block as well as the execution result of this block. +#[derive( + Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, CryptoHash, JsonSchema, +)] +pub struct BlockInfo { + /// Block id + pub block_id: HashValue, + /// The total difficulty. + #[schemars(with = "String")] + pub total_difficulty: U256, + /// The transaction accumulator info + pub txn_accumulator_info: AccumulatorInfo, + /// The block accumulator info. + pub block_accumulator_info: AccumulatorInfo, +} + +impl BlockInfo { + pub fn new( + block_id: HashValue, + total_difficulty: U256, + txn_accumulator_info: AccumulatorInfo, + block_accumulator_info: AccumulatorInfo, + ) -> Self { + Self { + block_id, + total_difficulty, + txn_accumulator_info, + block_accumulator_info, + } + } + + pub fn id(&self) -> HashValue { + self.crypto_hash() + } + + pub fn get_total_difficulty(&self) -> U256 { + self.total_difficulty + } + + pub fn get_block_accumulator_info(&self) -> &AccumulatorInfo { + &self.block_accumulator_info + } + + pub fn get_txn_accumulator_info(&self) -> &AccumulatorInfo { + &self.txn_accumulator_info + } + + pub fn block_id(&self) -> &HashValue { + &self.block_id + } +} + +impl Sample for BlockInfo { + fn sample() -> Self { + Self { + block_id: DagBlockHeader::sample().id(), + total_difficulty: 0.into(), + txn_accumulator_info: AccumulatorInfo::sample(), + block_accumulator_info: AccumulatorInfo::sample(), + } + } +} + +#[derive(Clone, Debug)] +pub struct DagBlockTemplate { + /// Parent hash. + pub parent_hash: Vec, + /// Block timestamp. + pub timestamp: u64, + /// Block author. + pub author: AccountAddress, + /// The transaction accumulator root hash after executing this block. + pub txn_accumulator_root: HashValue, + /// The block accumulator root hash. + pub block_accumulator_root: HashValue, + /// The last transaction state_root of this block after execute. + pub state_root: HashValue, + /// Gas used for contracts execution. + pub gas_used: u64, + /// hash for block body + pub body_hash: HashValue, + /// body of the block + pub body: BlockBody, + /// The chain id + pub chain_id: ChainId, + /// Block difficulty + pub difficulty: U256, + /// Block consensus strategy + pub strategy: ConsensusStrategy, +} + +impl DagBlockTemplate { + pub fn new( + parent_block_accumulator_root: HashValue, + accumulator_root: HashValue, + state_root: HashValue, + gas_used: u64, + body: BlockBody, + chain_id: ChainId, + difficulty: U256, + strategy: ConsensusStrategy, + block_metadata: DagBlockMetadata, + ) -> Self { + let (parent_hash, timestamp, author, _author_auth_key, _, _) = block_metadata.into_inner(); + Self { + parent_hash, + block_accumulator_root: parent_block_accumulator_root, + timestamp, + author, + txn_accumulator_root: accumulator_root, + state_root, + gas_used, + body_hash: body.hash(), + body, + chain_id, + difficulty, + strategy, + } + } + + pub fn into_block(self, nonce: u32, extra: BlockHeaderExtra) -> Block { + let header = DagBlockHeader::new( + self.parent_hash, + self.timestamp, + self.author, + self.txn_accumulator_root, + self.block_accumulator_root, + self.state_root, + self.gas_used, + self.difficulty, + self.body_hash, + self.chain_id, + nonce, + extra, + ); + Block { + header, + body: self.body, + } + } + + pub fn as_raw_block_header(&self) -> RawDagBlockHeader { + RawDagBlockHeader { + parent_hash: self.parent_hash.clone(), + timestamp: self.timestamp, + author: self.author, + author_auth_key: None, + accumulator_root: self.txn_accumulator_root, + parent_block_accumulator_root: self.block_accumulator_root, + state_root: self.state_root, + gas_used: self.gas_used, + body_hash: self.body_hash, + difficulty: self.difficulty, + chain_id: self.chain_id, + } + } + + pub fn as_pow_header_blob(&self) -> Vec { + let mut blob = Vec::new(); + let raw_header = self.as_raw_block_header(); + let raw_header_hash = raw_header.crypto_hash(); + let mut dh = [0u8; 32]; + raw_header.difficulty.to_big_endian(&mut dh); + let extend_and_nonce = [0u8; 12]; + + blob.extend_from_slice(raw_header_hash.to_vec().as_slice()); + blob.extend_from_slice(&extend_and_nonce); + blob.extend_from_slice(&dh); + blob + } + + pub fn into_block_header(self, nonce: u32, extra: BlockHeaderExtra) -> DagBlockHeader { + DagBlockHeader::new( + self.parent_hash, + self.timestamp, + self.author, + self.txn_accumulator_root, + self.block_accumulator_root, + self.state_root, + self.gas_used, + self.difficulty, + self.body_hash, + self.chain_id, + nonce, + extra, + ) + } +} + +#[derive(Clone, Debug, Hash, Serialize, Deserialize, CryptoHasher, CryptoHash)] +pub struct ExecutedBlock { + pub block: Block, + pub block_info: BlockInfo, +} + +impl ExecutedBlock { + pub fn new(block: Block, block_info: BlockInfo) -> Self { + ExecutedBlock { block, block_info } + } + + pub fn total_difficulty(&self) -> U256 { + self.block_info.total_difficulty + } + + pub fn block(&self) -> &Block { + &self.block + } + + pub fn block_info(&self) -> &BlockInfo { + &self.block_info + } + + pub fn header(&self) -> &DagBlockHeader { + self.block.header() + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct BlockSummary { + pub block_header: DagBlockHeader, + pub uncles: Vec, +} + +impl BlockSummary { + pub fn uncles(&self) -> &[DagBlockHeader] { + &self.uncles + } + + pub fn header(&self) -> &DagBlockHeader { + &self.block_header + } +} + +impl From for BlockSummary { + fn from(block: Block) -> Self { + Self { + block_header: block.header, + uncles: block.body.uncles.unwrap_or_default(), + } + } +} + +#[allow(clippy::from_over_into)] +impl Into<(DagBlockHeader, Vec)> for BlockSummary { + fn into(self) -> (DagBlockHeader, Vec) { + (self.block_header, self.uncles) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct UncleSummary { + /// total uncle + pub uncles: u64, + /// sum(number of the block which contain uncle block - uncle parent block number). + pub sum: u64, + pub avg: u64, + pub time_sum: u64, + pub time_avg: u64, +} + +impl UncleSummary { + pub fn new(uncles: u64, sum: u64, time_sum: u64) -> Self { + let (avg, time_avg) = ( + sum.checked_div(uncles).unwrap_or_default(), + time_sum.checked_div(uncles).unwrap_or_default(), + ); + Self { + uncles, + sum, + avg, + time_sum, + time_avg, + } + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct EpochUncleSummary { + /// epoch number + pub epoch: u64, + pub number_summary: UncleSummary, + pub epoch_summary: UncleSummary, +} + +impl EpochUncleSummary { + pub fn new(epoch: u64, number_summary: UncleSummary, epoch_summary: UncleSummary) -> Self { + Self { + epoch, + number_summary, + epoch_summary, + } + } +} +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] +pub struct KTotalDifficulty { + pub head_block_id: HashValue, + pub total_difficulty: U256, +} + +impl Ord for KTotalDifficulty { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.total_difficulty.cmp(&other.total_difficulty) + } +} + +impl PartialOrd for KTotalDifficulty { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} diff --git a/types/src/header.rs b/types/src/header.rs new file mode 100644 index 0000000000..a93ddcde36 --- /dev/null +++ b/types/src/header.rs @@ -0,0 +1,74 @@ +use crate::block::{BlockHeader, BlockNumber}; +use crate::blockhash::{BlockLevel, ORIGIN}; +use crate::U256; +use serde::{Deserialize, Serialize}; +use starcoin_crypto::HashValue as Hash; +use std::sync::Arc; + +pub trait ConsensusHeader { + fn parents_hash(&self) -> &[Hash]; + fn difficulty(&self) -> U256; + fn hash(&self) -> Hash; + fn timestamp(&self) -> u64; +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct DagHeader { + block_header: BlockHeader, + parents_hash: Vec, +} + +impl DagHeader { + pub fn new(block_header: BlockHeader) -> Self { + Self { + parents_hash: block_header + .parents_hash() + .expect("dag block must have parents hash"), + block_header, + } + } + pub fn new_genesis(genesis_header: BlockHeader) -> DagHeader { + Self { + block_header: genesis_header, + parents_hash: vec![Hash::new(ORIGIN)], + } + } + + pub fn number(&self) -> BlockNumber { + self.block_header.number() + } +} + +impl Into for DagHeader { + fn into(self) -> BlockHeader { + self.block_header + } +} + +impl ConsensusHeader for DagHeader { + fn parents_hash(&self) -> &[Hash] { + &self.parents_hash + } + fn difficulty(&self) -> U256 { + self.block_header.difficulty() + } + fn hash(&self) -> Hash { + self.block_header.id() + } + + fn timestamp(&self) -> u64 { + self.block_header.timestamp() + } +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub struct HeaderWithBlockLevel { + pub header: Arc, + pub block_level: BlockLevel, +} + +#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize)] +pub struct CompactHeaderData { + pub timestamp: u64, + pub difficulty: U256, +} diff --git a/types/src/lib.rs b/types/src/lib.rs index ec49aa8bed..9ff354a624 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -24,6 +24,7 @@ pub mod account_state; #[allow(clippy::too_many_arguments)] pub mod block; pub mod compact_block; +pub mod dag_block; pub mod block_metadata { pub use starcoin_vm_types::block_metadata::BlockMetadata; @@ -104,3 +105,6 @@ pub mod sync_status; pub mod proof { pub use forkable_jellyfish_merkle::proof::SparseMerkleProof; } + +pub mod blockhash; +pub mod header; diff --git a/types/src/startup_info.rs b/types/src/startup_info.rs index d536020128..4e08226411 100644 --- a/types/src/startup_info.rs +++ b/types/src/startup_info.rs @@ -2,31 +2,46 @@ // SPDX-License-Identifier: Apache-2.0 use crate::block::{BlockHeader, BlockInfo, BlockNumber}; +use crate::dag_block::KTotalDifficulty; use anyhow::Result; use bcs_ext::{BCSCodec, Sample}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use starcoin_accumulator::accumulator_info::AccumulatorInfo; +use starcoin_accumulator::MerkleAccumulator; use starcoin_crypto::HashValue; use starcoin_uint::U256; use starcoin_vm_types::genesis_config::ChainId; +use std::collections::BTreeSet; use std::convert::{TryFrom, TryInto}; use std::fmt; use std::fmt::Formatter; +use std::hash::Hash; + /// The info of a chain. #[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug)] pub struct ChainInfo { chain_id: ChainId, genesis_hash: HashValue, status: ChainStatus, + flexi_dag_accumulator_info: Option, + k_total_difficulties: Option>, } impl ChainInfo { - pub fn new(chain_id: ChainId, genesis_hash: HashValue, status: ChainStatus) -> Self { + pub fn new( + chain_id: ChainId, + genesis_hash: HashValue, + status: ChainStatus, + flexi_dag_accumulator_info: Option, + k_total_difficulties: Option>, + ) -> Self { Self { chain_id, genesis_hash, status, + flexi_dag_accumulator_info, + k_total_difficulties, } } @@ -43,15 +58,30 @@ impl ChainInfo { } pub fn update_status(&mut self, status: ChainStatus) { - self.status = status + self.status = status; + } + + pub fn update_dag_accumulator_info( + &mut self, + flexi_dag_accumulator_info: Option, + ) { + self.flexi_dag_accumulator_info = flexi_dag_accumulator_info; } pub fn head(&self) -> &BlockHeader { - self.status.head() + &self.status.head + } + + pub fn dag_accumulator_info(&self) -> &Option { + &self.flexi_dag_accumulator_info } pub fn total_difficulty(&self) -> U256 { - self.status.total_difficulty() + self.status.info.get_total_difficulty() + } + + pub fn k_total_difficulties(&self) -> &Option> { + &self.k_total_difficulties } pub fn into_inner(self) -> (ChainId, HashValue, ChainStatus) { @@ -63,6 +93,13 @@ impl ChainInfo { chain_id: ChainId::new(rand::random()), genesis_hash: HashValue::random(), status: ChainStatus::random(), + flexi_dag_accumulator_info: Some(AccumulatorInfo::new( + HashValue::random(), + vec![], + rand::random::(), + rand::random::(), + )), + k_total_difficulties: Some(BTreeSet::new()), } } } @@ -73,6 +110,8 @@ impl std::default::Default for ChainInfo { chain_id: ChainId::test(), genesis_hash: HashValue::default(), status: ChainStatus::sample(), + flexi_dag_accumulator_info: Some(AccumulatorInfo::default()), + k_total_difficulties: Some(BTreeSet::new()), } } } @@ -120,7 +159,7 @@ impl ChainStatus { ), ); Self { - head, + head: head.clone(), info: block_info, } } @@ -151,10 +190,44 @@ impl Sample for ChainStatus { } } +#[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug)] +pub struct DagChainStatus { + pub flexi_dag_accumulator_info: AccumulatorInfo, +} + +impl DagChainStatus { + pub fn new(flexi_dag_accumulator_info: AccumulatorInfo) -> Self { + Self { + flexi_dag_accumulator_info, + } + } + + pub fn random() -> Self { + let head = BlockHeader::random(); + Self { + flexi_dag_accumulator_info: AccumulatorInfo::new( + head.block_accumulator_root(), + vec![], + rand::random::(), + rand::random::(), + ), + } + } + + pub fn sample() -> Self { + Self { + flexi_dag_accumulator_info: AccumulatorInfo::sample(), + } + } +} + #[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug)] pub struct StartupInfo { /// main chain head block hash pub main: HashValue, + + /// dag accumulator info hash + pub dag_main: Option, } impl fmt::Display for StartupInfo { @@ -168,7 +241,14 @@ impl fmt::Display for StartupInfo { impl StartupInfo { pub fn new(main: HashValue) -> Self { - Self { main } + Self { + main, + dag_main: None, + } + } + + pub fn new_with_dag(main: HashValue, dag_main: Option) -> Self { + Self { main, dag_main } } pub fn update_main(&mut self, new_head: HashValue) { @@ -178,6 +258,14 @@ impl StartupInfo { pub fn get_main(&self) -> &HashValue { &self.main } + + pub fn update_dag_main(&mut self, new_head: HashValue) { + self.dag_main = Some(new_head); + } + + pub fn get_dag_main(&self) -> Option { + self.dag_main + } } impl TryFrom> for StartupInfo { diff --git a/types/src/sync_status.rs b/types/src/sync_status.rs index 5ad32c6169..1fb3b97e9e 100644 --- a/types/src/sync_status.rs +++ b/types/src/sync_status.rs @@ -5,6 +5,7 @@ use crate::block::BlockIdAndNumber; use crate::startup_info::ChainStatus; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; +use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator}; use starcoin_uint::U256; #[derive(Eq, PartialEq, Deserialize, Serialize, Clone, Debug, JsonSchema)] pub enum SyncState { @@ -38,15 +39,28 @@ impl SyncState { pub struct SyncStatus { chain_status: ChainStatus, state: SyncState, + dag_accumulator_info: Option, } pub const NEARLY_SYNCED_BLOCKS: u64 = 24; impl SyncStatus { - pub fn new(chain_status: ChainStatus) -> Self { + pub fn new(chain_status: ChainStatus, dag_accumulator_info: Option) -> Self { Self { chain_status, state: SyncState::Prepare, + dag_accumulator_info, + } + } + + pub fn new_with_dag_accumulator( + chain_status: ChainStatus, + dag_accumulator_info: AccumulatorInfo, + ) -> Self { + Self { + chain_status, + state: SyncState::Prepare, + dag_accumulator_info: Some(dag_accumulator_info), } } @@ -69,10 +83,18 @@ impl SyncStatus { false } + pub fn update_dag_accumulator_info(&mut self, dag_accumulator_info: Option) { + self.dag_accumulator_info = dag_accumulator_info; + } + pub fn sync_status(&self) -> &SyncState { &self.state } + pub fn dag_accumulator_info(&self) -> &Option { + &self.dag_accumulator_info + } + pub fn chain_status(&self) -> &ChainStatus { &self.chain_status } @@ -89,13 +111,11 @@ impl SyncStatus { target, total_difficulty, } => { - if target.number() < self.chain_status.head().number() { + let max_header_number = self.chain_status.head().number(); + if target.number() < max_header_number { false } else { - target - .number - .saturating_sub(self.chain_status.head().number()) - <= NEARLY_SYNCED_BLOCKS + target.number.saturating_sub(max_header_number) <= NEARLY_SYNCED_BLOCKS || self.chain_status.total_difficulty() >= *total_difficulty } } diff --git a/types/src/system_events.rs b/types/src/system_events.rs index 0a84fe1a2d..0fdc0c8899 100644 --- a/types/src/system_events.rs +++ b/types/src/system_events.rs @@ -17,7 +17,7 @@ pub struct NewHeadBlock(pub Arc); pub struct NewBranch(pub Arc); #[derive(Clone, Debug)] -pub struct MinedBlock(pub Arc); +pub struct MinedBlock(pub Arc, pub Option>); ///Fire this event on System start and all service is init. #[derive(Clone, Debug)] diff --git a/vm/starcoin-transactional-test-harness/src/fork_chain.rs b/vm/starcoin-transactional-test-harness/src/fork_chain.rs index 9d0dda112d..b5cf984a64 100644 --- a/vm/starcoin-transactional-test-harness/src/fork_chain.rs +++ b/vm/starcoin-transactional-test-harness/src/fork_chain.rs @@ -198,6 +198,8 @@ impl ChainApi for MockChainApi { status.head.header().chain_id(), HashValue::random(), status.status, + None, + None, ))), None => match client { Some(client) => client.info().await.map_err(|e| anyhow!("{}", e)), diff --git a/vm/types/src/dag_block_metadata.rs b/vm/types/src/dag_block_metadata.rs new file mode 100644 index 0000000000..db785968c0 --- /dev/null +++ b/vm/types/src/dag_block_metadata.rs @@ -0,0 +1,146 @@ +// Copyright (c) The Starcoin Core Contributors +// SPDX-License-Identifier: Apache-2.0 + +// Copyright (c) The Diem Core Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::account_address::AccountAddress; +use crate::account_config::genesis_address; +use crate::genesis_config::ChainId; +use crate::transaction::authenticator::AuthenticationKey; +use bcs_ext::Sample; +use serde::{Deserialize, Deserializer, Serialize}; +use starcoin_crypto::hash::PlainCryptoHash; +use starcoin_crypto::{ + hash::{CryptoHash, CryptoHasher}, + HashValue, +}; + +/// Struct that will be persisted on chain to store the information of the current block. +/// +/// The flow will look like following: +/// 1. The executor will pass this struct to VM at the begin of a block proposal. +/// 2. The VM will use this struct to create a special system transaction that will modify the on +/// chain resource that represents the information of the current block. This transaction can't +/// be emitted by regular users and is generated by each of the miners on the fly. Such +/// transaction will be executed before all of the user-submitted transactions in the blocks. +/// 3. Once that special resource is modified, the other user transactions can read the consensus +/// info by calling into the read method of that resource, which would thus give users the +/// information such as the current block number. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, CryptoHasher, CryptoHash)] +//TODO rename to DagBlockMetadataTransaction +pub struct DagBlockMetadata { + #[serde(skip)] + id: Option, + /// Parent block hash. + parent_hash: Vec, + timestamp: u64, + author: AccountAddress, + author_auth_key: Option, + chain_id: ChainId, + parent_gas_used: u64, +} + +impl DagBlockMetadata { + pub fn new( + parent_hash: Vec, + timestamp: u64, + author: AccountAddress, + author_auth_key: Option, + chain_id: ChainId, + parent_gas_used: u64, + ) -> Self { + let mut txn = Self { + id: None, + parent_hash, + timestamp, + author, + author_auth_key, + chain_id, + parent_gas_used, + }; + txn.id = Some(txn.crypto_hash()); + txn + } + + pub fn into_inner( + self, + ) -> ( + Vec, + u64, + AccountAddress, + Option, + ChainId, + u64, + ) { + ( + self.parent_hash, + self.timestamp, + self.author, + self.author_auth_key, + self.chain_id, + self.parent_gas_used, + ) + } + + pub fn parent_hash(&self) -> Vec { + self.parent_hash.clone() + } + + pub fn timestamp(&self) -> u64 { + self.timestamp + } + + pub fn chain_id(&self) -> ChainId { + self.chain_id + } + + pub fn id(&self) -> HashValue { + self.id + .expect("DagBlockMetadata's id should been Some after init.") + } + + pub fn author(&self) -> AccountAddress { + self.author + } +} + +impl<'de> Deserialize<'de> for DagBlockMetadata { + fn deserialize(deserializer: D) -> Result>::Error> + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(rename = "DagBlockMetadata")] + struct DagBlockMetadataData { + parent_hash: Vec, + timestamp: u64, + author: AccountAddress, + author_auth_key: Option, + chain_id: ChainId, + parent_gas_used: u64, + } + let data = DagBlockMetadataData::deserialize(deserializer)?; + Ok(Self::new( + data.parent_hash, + data.timestamp, + data.author, + data.author_auth_key, + data.chain_id, + data.parent_gas_used, + )) + } +} + +impl Sample for DagBlockMetadata { + fn sample() -> Self { + Self::new( + vec![HashValue::zero()], + 0, + genesis_address(), + None, + ChainId::test(), + 0, + ) + } +} diff --git a/vm/types/src/lib.rs b/vm/types/src/lib.rs index ea86f45141..6afe4cff0d 100644 --- a/vm/types/src/lib.rs +++ b/vm/types/src/lib.rs @@ -5,6 +5,7 @@ mod language_storage_ext; pub mod account_address; +pub mod dag_block_metadata; pub mod gas_schedule; pub mod location { pub use move_ir_types::location::Loc;