diff --git a/node/src/components/consensus/era_supervisor.rs b/node/src/components/consensus/era_supervisor.rs index 042d893505..1cb004f0a8 100644 --- a/node/src/components/consensus/era_supervisor.rs +++ b/node/src/components/consensus/era_supervisor.rs @@ -31,8 +31,9 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize}; use tracing::{debug, error, info, trace, warn}; use casper_types::{ - AsymmetricType, BlockHash, BlockHeader, Chainspec, ConsensusProtocolName, Deploy, DeployHash, - Digest, DisplayIter, EraId, PublicKey, RewardedSignatures, TimeDiff, Timestamp, + AsymmetricType, BlockHash, BlockHeader, Chainspec, ConsensusProtocolName, DeployHash, Digest, + DisplayIter, EraId, PublicKey, RewardedSignatures, TimeDiff, Timestamp, Transaction, + TransactionHash, }; use crate::{ @@ -1359,18 +1360,18 @@ impl SerializedMessage { } } -async fn get_deploys( +async fn get_transactions( effect_builder: EffectBuilder, - hashes: Vec, -) -> Option> + hashes: Vec, +) -> Option> where REv: From, { effect_builder - .get_deploys_from_storage(hashes) + .get_transactions_from_storage(hashes) .await .into_iter() - .map(|maybe_deploy| maybe_deploy.map(|deploy| deploy.into_naive())) + .map(|maybe_transaction| maybe_transaction.map(|transaction| transaction.into_naive())) .collect() } @@ -1386,21 +1387,21 @@ async fn execute_finalized_block( .store_finalized_approvals(deploy_hash.into(), finalized_approvals.into()) .await; } - // Get all deploys in order they appear in the finalized block. - let deploys = match get_deploys( + // Get all transactions in order they appear in the finalized block. + let transactions = match get_transactions( effect_builder, finalized_block .deploy_and_transfer_hashes() - .cloned() + .map(|deploy_hash| TransactionHash::from(*deploy_hash)) .collect_vec(), ) .await { - Some(deploys) => deploys, + Some(transactions) => transactions, None => { fatal!( effect_builder, - "Could not fetch deploys and transfers for finalized block: {:?}", + "Could not fetch transactions for finalized block: {:?}", finalized_block ) .await; @@ -1408,6 +1409,13 @@ async fn execute_finalized_block( } }; + let deploys = transactions + .into_iter() + .filter_map(|txn| match txn { + Transaction::Deploy(deploy) => Some(deploy), + Transaction::V1(_) => None, + }) + .collect(); let executable_block = ExecutableBlock::from_finalized_block_and_deploys(finalized_block, deploys); effect_builder diff --git a/node/src/components/contract_runtime.rs b/node/src/components/contract_runtime.rs index c6658bb942..97f8361377 100644 --- a/node/src/components/contract_runtime.rs +++ b/node/src/components/contract_runtime.rs @@ -46,7 +46,7 @@ use casper_storage::{ use casper_types::{ bytesrepr::Bytes, package::PackageKindTag, BlockHash, BlockHeaderV2, Chainspec, ChainspecRawBytes, ChainspecRegistry, Digest, EraId, Key, ProtocolVersion, Timestamp, - UpgradeConfig, U512, + Transaction, UpgradeConfig, U512, }; use crate::{ @@ -674,19 +674,25 @@ impl ContractRuntime { } .ignore() } - ContractRuntimeRequest::SpeculativeDeployExecution { + ContractRuntimeRequest::SpeculativelyExecute { execution_prestate, - deploy, + transaction, responder, } => { + let deploy_item = match *transaction { + Transaction::Deploy(deploy) => DeployItem::from(deploy), + Transaction::V1(_) => { + return responder + .respond(Err(engine_state::Error::InvalidDeployItemVariant( + "temp error until EE handles transactions".to_string(), + ))) + .ignore(); + } + }; let engine_state = Arc::clone(&self.engine_state); async move { let result = run_intensive_task(move || { - execute_only( - engine_state.as_ref(), - execution_prestate, - DeployItem::from((*deploy).clone()), - ) + execute_only(engine_state.as_ref(), execution_prestate, deploy_item) }) .await; responder.respond(result).await diff --git a/node/src/components/fetcher/fetcher_impls/block_header_fetcher.rs b/node/src/components/fetcher/fetcher_impls/block_header_fetcher.rs index 7d7eb2cdd8..819487b4e0 100644 --- a/node/src/components/fetcher/fetcher_impls/block_header_fetcher.rs +++ b/node/src/components/fetcher/fetcher_impls/block_header_fetcher.rs @@ -26,6 +26,8 @@ impl FetchItem for BlockHeader { } fn validate(&self, _metadata: &EmptyValidationMetadata) -> Result<(), Self::ValidationError> { + // No need for further validation. The received header has necessarily had its hash + // computed to be the same value we used for the fetch ID if we got here. Ok(()) } } diff --git a/node/src/components/gossiper/tests.rs b/node/src/components/gossiper/tests.rs index cf84a0b48c..212b1bcd39 100644 --- a/node/src/components/gossiper/tests.rs +++ b/node/src/components/gossiper/tests.rs @@ -18,8 +18,8 @@ use tokio::time; use tracing::debug; use casper_types::{ - testing::TestRng, BlockV2, Chainspec, ChainspecRawBytes, Deploy, EraId, FinalitySignature, - ProtocolVersion, TimeDiff, Transaction, TransactionV1, + testing::TestRng, BlockV2, Chainspec, ChainspecRawBytes, EraId, FinalitySignature, + ProtocolVersion, TimeDiff, Transaction, }; use super::*; @@ -332,14 +332,6 @@ impl NetworkedReactor for Reactor { } } -fn random_txn(rng: &mut TestRng) -> Transaction { - if rng.gen() { - Transaction::from(Deploy::random_valid_native_transfer(rng)) - } else { - Transaction::from(TransactionV1::random(rng)) - } -} - fn announce_transaction_received( transaction: &Transaction, ) -> impl FnOnce(EffectBuilder) -> Effects { @@ -359,7 +351,7 @@ async fn run_gossip(rng: &mut TestRng, network_size: usize, txn_count: usize) { // Create `txn_count` random transactions. let (all_txn_hashes, mut txns): (BTreeSet<_>, Vec<_>) = iter::repeat_with(|| { - let txn = random_txn(rng); + let txn = Transaction::random(rng); (txn.hash(), txn) }) .take(txn_count) @@ -420,7 +412,7 @@ async fn should_get_from_alternate_source() { let node_ids = network.add_nodes(rng, NETWORK_SIZE).await; // Create random transaction. - let txn = random_txn(rng); + let txn = Transaction::random(rng); let txn_hash = txn.hash(); // Give the transaction to nodes 0 and 1 to be gossiped. @@ -498,7 +490,7 @@ async fn should_timeout_gossip_response() { let mut node_ids = network.add_nodes(rng, infection_target as usize + 1).await; // Create random transaction. - let txn = random_txn(rng); + let txn = Transaction::random(rng); let txn_hash = txn.hash(); // Give the transaction to node 0 to be gossiped. @@ -575,7 +567,7 @@ async fn should_timeout_new_item_from_peer() { // received, no component triggers the `ItemReceived` event. reactor_0.fake_transaction_acceptor.set_active(false); - let txn = random_txn(rng); + let txn = Transaction::random(rng); // Give the transaction to node 1 to gossip to node 0. network @@ -631,7 +623,7 @@ async fn should_not_gossip_old_stored_item_again() { let node_ids = network.add_nodes(rng, NETWORK_SIZE).await; let node_0 = node_ids[0]; - let txn = random_txn(rng); + let txn = Transaction::random(rng); // Store the transaction on node 0. let store_txn = |effect_builder: EffectBuilder| { @@ -701,7 +693,7 @@ async fn should_ignore_unexpected_message(message_type: Unexpected) { let node_ids = network.add_nodes(rng, NETWORK_SIZE).await; let node_0 = node_ids[0]; - let txn = Box::new(random_txn(rng)); + let txn = Box::new(Transaction::random(rng)); let message = match message_type { Unexpected::Response => Message::GossipResponse { diff --git a/node/src/components/rpc_server/http_server.rs b/node/src/components/rpc_server/http_server.rs index bf9ecc28c4..619c1e9257 100644 --- a/node/src/components/rpc_server/http_server.rs +++ b/node/src/components/rpc_server/http_server.rs @@ -5,7 +5,7 @@ use casper_types::ProtocolVersion; use super::{ rpcs::{ - account::PutDeploy, + account::{PutDeploy, PutTransaction}, chain::{ GetBlock, GetBlockTransfers, GetEraInfoBySwitchBlock, GetEraSummary, GetStateRootHash, }, @@ -37,6 +37,7 @@ pub(super) async fn run( ) { let mut handlers = RequestHandlersBuilder::new(); PutDeploy::register_as_handler(effect_builder, api_version, &mut handlers); + PutTransaction::register_as_handler(effect_builder, api_version, &mut handlers); GetBlock::register_as_handler(effect_builder, api_version, &mut handlers); GetBlockTransfers::register_as_handler(effect_builder, api_version, &mut handlers); GetStateRootHash::register_as_handler(effect_builder, api_version, &mut handlers); diff --git a/node/src/components/rpc_server/rpcs/account.rs b/node/src/components/rpc_server/rpcs/account.rs index 3b1e97f70b..a191491964 100644 --- a/node/src/components/rpc_server/rpcs/account.rs +++ b/node/src/components/rpc_server/rpcs/account.rs @@ -8,7 +8,7 @@ use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use tracing::debug; -use casper_types::{Deploy, DeployHash, ProtocolVersion, Transaction}; +use casper_types::{Deploy, DeployHash, ProtocolVersion, Transaction, TransactionHash}; use super::{ docs::{DocExample, DOCS_EXAMPLE_PROTOCOL_VERSION}, @@ -24,6 +24,14 @@ static PUT_DEPLOY_RESULT: Lazy = Lazy::new(|| PutDeployResult { deploy_hash: *Deploy::doc_example().hash(), }); +static PUT_TRANSACTION_PARAMS: Lazy = Lazy::new(|| PutTransactionParams { + transaction: Transaction::doc_example().clone(), +}); +static PUT_TRANSACTION_RESULT: Lazy = Lazy::new(|| PutTransactionResult { + api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, + transaction_hash: Transaction::doc_example().hash(), +}); + /// Params for "account_put_deploy" RPC request. #[derive(Serialize, Deserialize, Debug, JsonSchema)] #[serde(deny_unknown_fields)] @@ -95,3 +103,75 @@ impl RpcWithParams for PutDeploy { } } } + +/// Params for "account_put_transaction" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct PutTransactionParams { + /// The `Transaction`. + pub transaction: Transaction, +} + +impl DocExample for PutTransactionParams { + fn doc_example() -> &'static Self { + &PUT_TRANSACTION_PARAMS + } +} + +/// Result for "account_put_transaction" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct PutTransactionResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ProtocolVersion, + /// The transaction hash. + pub transaction_hash: TransactionHash, +} + +impl DocExample for PutTransactionResult { + fn doc_example() -> &'static Self { + &PUT_TRANSACTION_RESULT + } +} + +/// "account_put_transaction" RPC +pub struct PutTransaction {} + +#[async_trait] +impl RpcWithParams for PutTransaction { + const METHOD: &'static str = "account_put_transaction"; + type RequestParams = PutTransactionParams; + type ResponseResult = PutTransactionResult; + + async fn do_handle_request( + effect_builder: EffectBuilder, + api_version: ProtocolVersion, + params: Self::RequestParams, + ) -> Result { + let transaction_hash = params.transaction.hash(); + + let accept_transaction_result = effect_builder + .try_accept_transaction(params.transaction, None) + .await; + + match accept_transaction_result { + Ok(_) => { + debug!(%transaction_hash, "transaction was stored"); + let result = Self::ResponseResult { + api_version, + transaction_hash, + }; + Ok(result) + } + Err(error) => { + debug!( + %transaction_hash, + %error, + "the transaction submitted by the client was invalid", + ); + Err(Error::new(ErrorCode::InvalidTransaction, error.to_string())) + } + } + } +} diff --git a/node/src/components/rpc_server/rpcs/docs.rs b/node/src/components/rpc_server/rpcs/docs.rs index f883f67e19..c0accc8791 100644 --- a/node/src/components/rpc_server/rpcs/docs.rs +++ b/node/src/components/rpc_server/rpcs/docs.rs @@ -13,11 +13,11 @@ use serde_json::{json, Value}; use casper_types::ProtocolVersion; use super::{ - account::PutDeploy, + account::{PutDeploy, PutTransaction}, chain::{ GetBlock, GetBlockTransfers, GetEraInfoBySwitchBlock, GetEraSummary, GetStateRootHash, }, - info::{GetChainspec, GetDeploy, GetPeers, GetStatus, GetValidatorChanges}, + info::{GetChainspec, GetDeploy, GetPeers, GetStatus, GetTransaction, GetValidatorChanges}, state::{ GetAccountInfo, GetAuctionInfo, GetBalance, GetDictionaryItem, GetItem, QueryBalance, QueryGlobalState, @@ -65,8 +65,16 @@ pub(crate) static OPEN_RPC_SCHEMA: Lazy = Lazy::new(|| { }, }; - schema.push_with_params::("receives a Deploy to be executed by the network"); - schema.push_with_params::("returns a Deploy from the network"); + schema.push_with_params::( + "receives a Deploy to be executed by the network (DEPRECATED: use \ + `account_put_transaction` instead)", + ); + schema + .push_with_params::("receives a Transaction to be executed by the network"); + schema.push_with_params::( + "returns a Deploy from the network (DEPRECATED: use `info_get_transaction` instead)", + ); + schema.push_with_params::("returns a Transaction from the network"); schema.push_with_params::("returns an Account from the network"); schema.push_with_params::("returns an item from a Dictionary"); schema.push_with_params::( @@ -457,7 +465,7 @@ impl RpcWithoutParams for ListRpcs { mod doc_example_impls { use casper_types::{ - account::Account, Deploy, EraEndV1, EraEndV2, EraReport, PublicKey, Timestamp, + account::Account, Deploy, EraEndV1, EraEndV2, EraReport, PublicKey, Timestamp, Transaction, }; use super::DocExample; @@ -468,6 +476,12 @@ mod doc_example_impls { } } + impl DocExample for Transaction { + fn doc_example() -> &'static Self { + Transaction::example() + } + } + impl DocExample for Account { fn doc_example() -> &'static Self { Account::example() diff --git a/node/src/components/rpc_server/rpcs/error_code.rs b/node/src/components/rpc_server/rpcs/error_code.rs index ef22174fae..eb659c3143 100644 --- a/node/src/components/rpc_server/rpcs/error_code.rs +++ b/node/src/components/rpc_server/rpcs/error_code.rs @@ -37,6 +37,12 @@ pub enum ErrorCode { NoSuchStateRoot = -32012, /// The main purse for a given account hash does not exist. NoSuchMainPurse = -32013, + /// The requested Transaction was not found. + NoSuchTransaction = -32014, + /// Variant mismatch. + VariantMismatch = -32015, + /// The given Transaction cannot be executed as it is invalid. + InvalidTransaction = -32016, } impl From for (i64, &'static str) { @@ -62,6 +68,9 @@ impl From for (i64, &'static str) { ErrorCode::FailedToGetTrie => (error_code as i64, "Failed to get trie"), ErrorCode::NoSuchStateRoot => (error_code as i64, "No such state root"), ErrorCode::NoSuchMainPurse => (error_code as i64, "Failed to get main purse"), + ErrorCode::NoSuchTransaction => (error_code as i64, "No such transaction"), + ErrorCode::VariantMismatch => (error_code as i64, "Variant mismatch internal error"), + ErrorCode::InvalidTransaction => (error_code as i64, "Invalid transaction"), } } } diff --git a/node/src/components/rpc_server/rpcs/info.rs b/node/src/components/rpc_server/rpcs/info.rs index 7630aedb0e..033bb14c82 100644 --- a/node/src/components/rpc_server/rpcs/info.rs +++ b/node/src/components/rpc_server/rpcs/info.rs @@ -6,11 +6,12 @@ use async_trait::async_trait; use once_cell::sync::Lazy; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use tracing::info; +use tracing::{debug, error}; use casper_types::{ execution::{ExecutionResult, ExecutionResultV2}, - Block, ChainspecRawBytes, Deploy, DeployHash, EraId, ProtocolVersion, PublicKey, + Block, ChainspecRawBytes, Deploy, DeployHash, EraId, ProtocolVersion, PublicKey, Transaction, + TransactionHash, }; use super::{ @@ -21,7 +22,10 @@ use crate::{ components::consensus::ValidatorChange, effect::EffectBuilder, reactor::QueueKind, - types::{DeployExecutionInfo, GetStatusResult, PeersMap}, + types::{ + DeployWithFinalizedApprovals, ExecutionInfo, GetStatusResult, PeersMap, + TransactionWithFinalizedApprovals, VariantMismatch, + }, }; static GET_DEPLOY_PARAMS: Lazy = Lazy::new(|| GetDeployParams { @@ -31,7 +35,20 @@ static GET_DEPLOY_PARAMS: Lazy = Lazy::new(|| GetDeployParams { static GET_DEPLOY_RESULT: Lazy = Lazy::new(|| GetDeployResult { api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, deploy: Deploy::doc_example().clone(), - execution_info: Some(DeployExecutionInfo { + execution_info: Some(ExecutionInfo { + block_hash: *Block::example().hash(), + block_height: Block::example().clone_header().height(), + execution_result: Some(ExecutionResult::from(ExecutionResultV2::example().clone())), + }), +}); +static GET_TRANSACTION_PARAMS: Lazy = Lazy::new(|| GetTransactionParams { + transaction_hash: Transaction::doc_example().hash(), + finalized_approvals: true, +}); +static GET_TRANSACTION_RESULT: Lazy = Lazy::new(|| GetTransactionResult { + api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, + transaction: Transaction::doc_example().clone(), + execution_info: Some(ExecutionInfo { block_hash: *Block::example().hash(), block_height: Block::example().height(), execution_result: Some(ExecutionResult::from(ExecutionResultV2::example().clone())), @@ -67,7 +84,8 @@ pub struct GetDeployParams { pub finalized_approvals: bool, } -/// The default for `GetDeployParams::finalized_approvals`. +/// The default for `GetDeployParams::finalized_approvals` and +/// `GetTransactionParams::finalized_approvals`. fn finalized_approvals_default() -> bool { false } @@ -89,7 +107,7 @@ pub struct GetDeployResult { pub deploy: Deploy, /// Execution info, if available. #[serde(skip_serializing_if = "Option::is_none", flatten)] - pub execution_info: Option, + pub execution_info: Option, } impl DocExample for GetDeployResult { @@ -107,34 +125,132 @@ impl RpcWithParams for GetDeploy { type RequestParams = GetDeployParams; type ResponseResult = GetDeployResult; + async fn do_handle_request( + effect_builder: EffectBuilder, + api_version: ProtocolVersion, + params: Self::RequestParams, + ) -> Result { + let txn_hash = TransactionHash::from(params.deploy_hash); + let (txn_with_finalized_approvals, execution_info) = match effect_builder + .get_transaction_and_execution_info_from_storage(txn_hash) + .await + { + Some(value) => value, + None => { + let message = format!( + "failed to get {} and execution info from storage", + params.deploy_hash + ); + debug!("{}", message); + return Err(Error::new(ErrorCode::NoSuchDeploy, message)); + } + }; + + let deploy_with_finalized_approvals = match txn_with_finalized_approvals { + TransactionWithFinalizedApprovals::Deploy { + deploy, + finalized_approvals, + } => DeployWithFinalizedApprovals::new(deploy, finalized_approvals), + other => { + let message = format!( + "internal error: failed to get {} and execution info from storage: {}", + params.deploy_hash, + VariantMismatch(Box::new((txn_hash, other))) + ); + error!("{}", message); + return Err(Error::new(ErrorCode::VariantMismatch, message)); + } + }; + + let deploy = if params.finalized_approvals { + deploy_with_finalized_approvals.into_naive() + } else { + deploy_with_finalized_approvals.discard_finalized_approvals() + }; + Ok(Self::ResponseResult { + api_version, + deploy, + execution_info, + }) + } +} + +/// Params for "info_get_transaction" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetTransactionParams { + /// The transaction hash. + pub transaction_hash: TransactionHash, + /// Whether to return the transaction with the finalized approvals substituted. If `false` or + /// omitted, returns the transaction with the approvals that were originally received by the + /// node. + #[serde(default = "finalized_approvals_default")] + pub finalized_approvals: bool, +} + +impl DocExample for GetTransactionParams { + fn doc_example() -> &'static Self { + &GET_TRANSACTION_PARAMS + } +} + +/// Result for "info_get_transaction" RPC response. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetTransactionResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ProtocolVersion, + /// The transaction. + pub transaction: Transaction, + /// Execution info, if available. + #[serde(skip_serializing_if = "Option::is_none", flatten)] + pub execution_info: Option, +} + +impl DocExample for GetTransactionResult { + fn doc_example() -> &'static Self { + &GET_TRANSACTION_RESULT + } +} + +/// "info_get_transaction" RPC. +pub struct GetTransaction {} + +#[async_trait] +impl RpcWithParams for GetTransaction { + const METHOD: &'static str = "info_get_transaction"; + type RequestParams = GetTransactionParams; + type ResponseResult = GetTransactionResult; + async fn do_handle_request( effect_builder: EffectBuilder, api_version: ProtocolVersion, params: Self::RequestParams, ) -> Result { match effect_builder - .get_deploy_and_execution_info_from_storage(params.deploy_hash) + .get_transaction_and_execution_info_from_storage(params.transaction_hash) .await { - Some((deploy_with_finalized_approvals, execution_info)) => { - let deploy = if params.finalized_approvals { - deploy_with_finalized_approvals.into_naive() + Some((txn_with_finalized_approvals, execution_info)) => { + let transaction = if params.finalized_approvals { + txn_with_finalized_approvals.into_naive() } else { - deploy_with_finalized_approvals.discard_finalized_approvals() + txn_with_finalized_approvals.discard_finalized_approvals() }; Ok(Self::ResponseResult { api_version, - deploy, + transaction, execution_info, }) } None => { let message = format!( "failed to get {} and execution info from storage", - params.deploy_hash + params.transaction_hash ); - info!("{}", message); - Err(Error::new(ErrorCode::NoSuchDeploy, message)) + debug!("{}", message); + Err(Error::new(ErrorCode::NoSuchTransaction, message)) } } } diff --git a/node/src/components/rpc_server/rpcs/speculative_exec.rs b/node/src/components/rpc_server/rpcs/speculative_exec.rs index af8f9ff8fe..81b3297f1f 100644 --- a/node/src/components/rpc_server/rpcs/speculative_exec.rs +++ b/node/src/components/rpc_server/rpcs/speculative_exec.rs @@ -1,6 +1,6 @@ //! RPC related to speculative execution. -use std::{str, sync::Arc}; +use std::str; use async_trait::async_trait; use once_cell::sync::Lazy; @@ -19,48 +19,94 @@ use super::{ }; use crate::{components::contract_runtime::SpeculativeExecutionState, effect::EffectBuilder}; +static SPECULATIVE_EXEC_TXN_PARAMS: Lazy = + Lazy::new(|| SpeculativeExecTxnParams { + block_identifier: Some(BlockIdentifier::Hash(*BlockHash::example())), + transaction: Transaction::doc_example().clone(), + }); +static SPECULATIVE_EXEC_TXN_RESULT: Lazy = + Lazy::new(|| SpeculativeExecTxnResult { + api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, + block_hash: *BlockHash::example(), + execution_result: ExecutionResultV2::example().clone(), + }); static SPECULATIVE_EXEC_PARAMS: Lazy = Lazy::new(|| SpeculativeExecParams { block_identifier: Some(BlockIdentifier::Hash(*BlockHash::example())), deploy: Deploy::doc_example().clone(), }); -static SPECULATIVE_EXEC_RESULT: Lazy = Lazy::new(|| SpeculativeExecResult { - api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, - block_hash: *BlockHash::example(), - execution_result: ExecutionResultV2::example().clone(), -}); -/// Params for "speculative_exec" RPC request. +/// Params for "speculative_exec_txn" RPC request. #[derive(Serialize, Deserialize, Debug, JsonSchema)] #[serde(deny_unknown_fields)] -pub struct SpeculativeExecParams { - /// Block hash on top of which to execute the deploy. +pub struct SpeculativeExecTxnParams { + /// Block hash on top of which to execute the transaction. pub block_identifier: Option, - /// Deploy to execute. - pub deploy: Deploy, + /// Transaction to execute. + pub transaction: Transaction, } -impl DocExample for SpeculativeExecParams { +impl DocExample for SpeculativeExecTxnParams { fn doc_example() -> &'static Self { - &SPECULATIVE_EXEC_PARAMS + &SPECULATIVE_EXEC_TXN_PARAMS } } -/// Result for "speculative_exec" RPC response. +/// Result for "speculative_exec_txn" and "speculative_exec" RPC responses. #[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] #[serde(deny_unknown_fields)] -pub struct SpeculativeExecResult { +pub struct SpeculativeExecTxnResult { /// The RPC API version. #[schemars(with = "String")] pub api_version: ProtocolVersion, - /// Hash of the block on top of which the deploy was executed. + /// Hash of the block on top of which the transaction was executed. pub block_hash: BlockHash, /// Result of the execution. pub execution_result: ExecutionResultV2, } -impl DocExample for SpeculativeExecResult { +impl DocExample for SpeculativeExecTxnResult { fn doc_example() -> &'static Self { - &SPECULATIVE_EXEC_RESULT + &SPECULATIVE_EXEC_TXN_RESULT + } +} + +/// "speculative_exec_txn" RPC +pub struct SpeculativeExecTxn {} + +#[async_trait] +impl RpcWithParams for SpeculativeExecTxn { + const METHOD: &'static str = "speculative_exec_txn"; + type RequestParams = SpeculativeExecTxnParams; + type ResponseResult = SpeculativeExecTxnResult; + + async fn do_handle_request( + effect_builder: EffectBuilder, + api_version: ProtocolVersion, + params: Self::RequestParams, + ) -> Result { + handle_request( + effect_builder, + api_version, + params.block_identifier, + params.transaction, + ) + .await + } +} + +/// Params for "speculative_exec" RPC request. +#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct SpeculativeExecParams { + /// Block hash on top of which to execute the deploy. + pub block_identifier: Option, + /// Deploy to execute. + pub deploy: Deploy, +} + +impl DocExample for SpeculativeExecParams { + fn doc_example() -> &'static Self { + &SPECULATIVE_EXEC_PARAMS } } @@ -71,104 +117,112 @@ pub struct SpeculativeExec {} impl RpcWithParams for SpeculativeExec { const METHOD: &'static str = "speculative_exec"; type RequestParams = SpeculativeExecParams; - type ResponseResult = SpeculativeExecResult; + type ResponseResult = SpeculativeExecTxnResult; async fn do_handle_request( effect_builder: EffectBuilder, api_version: ProtocolVersion, params: Self::RequestParams, ) -> Result { - let SpeculativeExecParams { - block_identifier: maybe_block_id, - deploy, - } = params; - let only_from_available_block_range = true; - - let block = common::get_block( - maybe_block_id, - only_from_available_block_range, + handle_request( effect_builder, + api_version, + params.block_identifier, + Transaction::from(params.deploy), ) - .await?; - let block_hash = *block.hash(); - let execution_prestate = SpeculativeExecutionState { - state_root_hash: *block.state_root_hash(), - block_time: block.timestamp(), - protocol_version: block.protocol_version(), - }; - - let accept_transaction_result = effect_builder - .try_accept_transaction( - Transaction::from(deploy.clone()), - Some(Box::new(block.take_header())), - ) - .await; - - if let Err(error) = accept_transaction_result { - return Err(Error::new(ErrorCode::InvalidDeploy, error.to_string())); - } + .await + } +} - let result = effect_builder - .speculative_execute_deploy(execution_prestate, Arc::new(deploy)) - .await; - - match result { - Ok(Some(execution_result)) => { - let result = Self::ResponseResult { - api_version, - block_hash, - execution_result, - }; - Ok(result) - } - Ok(None) => Err(Error::new( - ErrorCode::NoSuchBlock, - "block hash not found".to_string(), - )), - Err(error) => { - let rpc_error = match error { - EngineStateError::RootNotFound(_) => Error::new(ErrorCode::NoSuchStateRoot, ""), - EngineStateError::WasmPreprocessing(error) => { - Error::new(ErrorCode::InvalidDeploy, error.to_string()) - } - EngineStateError::InvalidDeployItemVariant(error) => { - Error::new(ErrorCode::InvalidDeploy, error) - } - EngineStateError::InvalidProtocolVersion(_) => Error::new( - ErrorCode::InvalidDeploy, - format!("deploy used invalid protocol version {}", error), - ), - EngineStateError::Deploy => Error::new(ErrorCode::InvalidDeploy, ""), - EngineStateError::Genesis(_) - | EngineStateError::WasmSerialization(_) - | EngineStateError::Exec(_) - | EngineStateError::Storage(_) - | EngineStateError::Authorization - | EngineStateError::InsufficientPayment - | EngineStateError::GasConversionOverflow - | EngineStateError::Finalization - | EngineStateError::Bytesrepr(_) - | EngineStateError::Mint(_) - | EngineStateError::InvalidKeyVariant - | EngineStateError::ProtocolUpgrade(_) - | EngineStateError::CommitError(_) - | EngineStateError::MissingSystemContractRegistry - | EngineStateError::MissingSystemContractHash(_) - | EngineStateError::RuntimeStackOverflow - | EngineStateError::FailedToGetKeys(_) - | EngineStateError::FailedToGetStoredWithdraws - | EngineStateError::FailedToGetWithdrawPurses - | EngineStateError::FailedToRetrieveUnbondingDelay - | EngineStateError::FailedToRetrieveEraId => { - Error::new(ReservedErrorCode::InternalError, error.to_string()) - } - _ => Error::new( - ReservedErrorCode::InternalError, - format!("Unhandled engine state error: {}", error), - ), - }; - Err(rpc_error) - } +async fn handle_request( + effect_builder: EffectBuilder, + api_version: ProtocolVersion, + maybe_block_id: Option, + transaction: Transaction, +) -> Result { + let only_from_available_block_range = true; + + let block = common::get_block( + maybe_block_id, + only_from_available_block_range, + effect_builder, + ) + .await?; + let block_hash = *block.hash(); + let execution_prestate = SpeculativeExecutionState { + state_root_hash: *block.state_root_hash(), + block_time: block.timestamp(), + protocol_version: block.protocol_version(), + }; + + let accept_transaction_result = effect_builder + .try_accept_transaction(transaction.clone(), Some(Box::new(block.take_header()))) + .await; + + if let Err(error) = accept_transaction_result { + return Err(Error::new(ErrorCode::InvalidTransaction, error.to_string())); + } + + let result = effect_builder + .speculatively_execute(execution_prestate, Box::new(transaction)) + .await; + + match result { + Ok(Some(execution_result)) => { + let result = SpeculativeExecTxnResult { + api_version, + block_hash, + execution_result, + }; + Ok(result) + } + Ok(None) => Err(Error::new( + ErrorCode::NoSuchBlock, + "block hash not found".to_string(), + )), + Err(error) => { + let rpc_error = match error { + EngineStateError::RootNotFound(_) => Error::new(ErrorCode::NoSuchStateRoot, ""), + EngineStateError::WasmPreprocessing(error) => { + Error::new(ErrorCode::InvalidDeploy, error.to_string()) + } + EngineStateError::InvalidDeployItemVariant(error) => { + Error::new(ErrorCode::InvalidDeploy, error) + } + EngineStateError::InvalidProtocolVersion(_) => Error::new( + ErrorCode::InvalidDeploy, + format!("deploy used invalid protocol version {}", error), + ), + EngineStateError::Deploy => Error::new(ErrorCode::InvalidDeploy, ""), + EngineStateError::Genesis(_) + | EngineStateError::WasmSerialization(_) + | EngineStateError::Exec(_) + | EngineStateError::Storage(_) + | EngineStateError::Authorization + | EngineStateError::InsufficientPayment + | EngineStateError::GasConversionOverflow + | EngineStateError::Finalization + | EngineStateError::Bytesrepr(_) + | EngineStateError::Mint(_) + | EngineStateError::InvalidKeyVariant + | EngineStateError::ProtocolUpgrade(_) + | EngineStateError::CommitError(_) + | EngineStateError::MissingSystemContractRegistry + | EngineStateError::MissingSystemContractHash(_) + | EngineStateError::RuntimeStackOverflow + | EngineStateError::FailedToGetKeys(_) + | EngineStateError::FailedToGetStoredWithdraws + | EngineStateError::FailedToGetWithdrawPurses + | EngineStateError::FailedToRetrieveUnbondingDelay + | EngineStateError::FailedToRetrieveEraId => { + Error::new(ReservedErrorCode::InternalError, error.to_string()) + } + _ => Error::new( + ReservedErrorCode::InternalError, + format!("Unhandled engine state error: {}", error), + ), + }; + Err(rpc_error) } } } diff --git a/node/src/components/rpc_server/speculative_exec_server.rs b/node/src/components/rpc_server/speculative_exec_server.rs index 002f8761ac..ab5b6235c4 100644 --- a/node/src/components/rpc_server/speculative_exec_server.rs +++ b/node/src/components/rpc_server/speculative_exec_server.rs @@ -6,7 +6,10 @@ use casper_types::ProtocolVersion; use super::ReactorEventT; use crate::{ effect::EffectBuilder, - rpcs::{speculative_exec::SpeculativeExec, RpcWithParams}, + rpcs::{ + speculative_exec::{SpeculativeExec, SpeculativeExecTxn}, + RpcWithParams, + }, }; /// The URL path for all JSON-RPC requests. @@ -24,6 +27,7 @@ pub(super) async fn run( cors_origin: String, ) { let mut handlers = RequestHandlersBuilder::new(); + SpeculativeExecTxn::register_as_handler(effect_builder, api_version, &mut handlers); SpeculativeExec::register_as_handler(effect_builder, api_version, &mut handlers); let handlers = handlers.build(); diff --git a/node/src/components/storage.rs b/node/src/components/storage.rs index c9360c4587..53ac3230f7 100644 --- a/node/src/components/storage.rs +++ b/node/src/components/storage.rs @@ -30,59 +30,53 @@ //! The storage component itself is panic free and in general reports three classes of errors: //! Corruption, temporary resource exhaustion and potential bugs. +mod config; mod deploy_metadata_v1; pub(crate) mod disjoint_sequences; mod error; +mod event; mod indices; mod lmdb_ext; mod metrics; mod object_pool; #[cfg(test)] mod tests; -mod write_block; +mod versioned_databases; #[cfg(test)] use std::collections::BTreeSet; use std::{ borrow::Cow, - collections::{BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + convert::TryInto, fmt::{self, Display, Formatter}, fs::{self, OpenOptions}, io::ErrorKind, - iter, mem, path::{Path, PathBuf}, rc::Rc, sync::Arc, }; use datasize::DataSize; -use derive_more::From; use itertools::Itertools; use lmdb::{ - Cursor, Database, DatabaseFlags, Environment, EnvironmentFlags, RwTransaction, + Database, DatabaseFlags, Environment, EnvironmentFlags, RwCursor, RwTransaction, Transaction as LmdbTransaction, WriteFlags, }; use prometheus::Registry; -use serde::{Deserialize, Serialize}; use smallvec::SmallVec; -use static_assertions::const_assert; -#[cfg(test)] -use tempfile::TempDir; use thiserror::Error; use tracing::{debug, error, info, trace, warn}; -#[cfg(test)] -use casper_types::bytesrepr; use casper_types::{ bytesrepr::{FromBytes, ToBytes}, execution::{ execution_result_v1, ExecutionResult, ExecutionResultV1, ExecutionResultV2, TransformKind, }, - Block, BlockBody, BlockBodyV1, BlockHash, BlockHashAndHeight, BlockHeader, BlockHeaderV1, - BlockSignatures, BlockV2, Deploy, DeployApprovalsHash, DeployConfigurationFailure, DeployHash, - DeployHeader, Digest, EraId, FinalitySignature, ProtocolVersion, PublicKey, SignedBlockHeader, - StoredValue, Timestamp, Transaction, TransactionApprovalsHash, TransactionHash, TransactionId, + Block, BlockBody, BlockHash, BlockHashAndHeight, BlockHeader, BlockSignatures, BlockV2, Deploy, + DeployApprovalsHash, DeployConfigurationFailure, DeployHash, DeployHeader, Digest, EraId, + FinalitySignature, ProtocolVersion, PublicKey, SignedBlockHeader, StoredValue, Timestamp, + Transaction, TransactionApprovalsHash, TransactionHash, TransactionId, TransactionV1ApprovalsHash, TransactionV1ConfigFailure, Transfer, }; @@ -94,29 +88,29 @@ use crate::{ effect::{ announcements::FatalAnnouncement, incoming::{NetRequest, NetRequestIncoming}, - requests::{ - MakeBlockExecutableRequest, MarkBlockCompletedRequest, NetworkRequest, StorageRequest, - }, + requests::{MarkBlockCompletedRequest, NetworkRequest, StorageRequest}, EffectBuilder, EffectExt, Effects, }, fatal, protocol::Message, types::{ ApprovalsHashes, AvailableBlockRange, BlockExecutionResultsOrChunk, - BlockExecutionResultsOrChunkId, BlockWithMetadata, DeployExecutionInfo, - DeployWithFinalizedApprovals, ExecutableBlock, FinalizedApprovals, - FinalizedDeployApprovals, LegacyDeploy, MaxTtl, NodeId, NodeRng, SignedBlock, SyncLeap, - SyncLeapIdentifier, TransactionWithFinalizedApprovals, + BlockExecutionResultsOrChunkId, BlockWithMetadata, ExecutableBlock, ExecutionInfo, + FinalizedApprovals, LegacyDeploy, MaxTtl, NodeId, NodeRng, SignedBlock, SyncLeap, + SyncLeapIdentifier, TransactionWithFinalizedApprovals, VariantMismatch, }, utils::{display_error, WithDir}, }; +pub use config::Config; use deploy_metadata_v1::DeployMetadataV1; use disjoint_sequences::{DisjointSequences, Sequence}; +pub use error::FatalStorageError; use error::GetRequestError; -pub use error::{FatalStorageError, VariantMismatch}; +pub(crate) use event::Event; use lmdb_ext::{BytesreprError, LmdbExtError, TransactionExt, WriteTransactionExt}; use metrics::Metrics; use object_pool::ObjectPool; +use versioned_databases::VersionedDatabases; const COMPONENT_NAME: &str = "storage"; @@ -127,17 +121,6 @@ const STORAGE_DB_FILENAME: &str = "storage.lmdb"; /// one time. const MAX_TRANSACTIONS: u32 = 1; -/// One Gibibyte. -const GIB: usize = 1024 * 1024 * 1024; - -/// Default max block store size. -const DEFAULT_MAX_BLOCK_STORE_SIZE: usize = 450 * GIB; -/// Default max deploy store size. -const DEFAULT_MAX_DEPLOY_STORE_SIZE: usize = 300 * GIB; -/// Default max deploy metadata store size. -const DEFAULT_MAX_DEPLOY_METADATA_STORE_SIZE: usize = 300 * GIB; -/// Default max state store size. -const DEFAULT_MAX_STATE_STORE_SIZE: usize = 10 * GIB; /// Maximum number of allowed dbs. const MAX_DB_COUNT: u32 = 14; /// Key under which completed blocks are to be stored. @@ -154,8 +137,6 @@ const OS_FLAGS: EnvironmentFlags = EnvironmentFlags::WRITE_MAP; /// Mac OS X exhibits performance regressions when `WRITE_MAP` is used. #[cfg(target_os = "macos")] const OS_FLAGS: EnvironmentFlags = EnvironmentFlags::empty(); -const _STORAGE_EVENT_SIZE: usize = mem::size_of::(); -const_assert!(_STORAGE_EVENT_SIZE <= 32); const STORAGE_FILES: [&str; 5] = [ "data.lmdb", @@ -165,16 +146,6 @@ const STORAGE_FILES: [&str; 5] = [ "sse_index", ]; -#[derive(DataSize, Debug)] -struct VersionedDatabases { - /// The legacy block body database, storing [BlockBodyV1] objects. - #[data_size(skip)] - legacy: Database, - /// The current block body database, storing [BlockBody] objects. - #[data_size(skip)] - current: Database, -} - /// The storage component. #[derive(DataSize, Debug)] pub struct Storage { @@ -184,47 +155,34 @@ pub struct Storage { #[data_size(skip)] env: Rc, /// The block header databases. - #[data_size(skip)] - block_header_dbs: VersionedDatabases, + block_header_dbs: VersionedDatabases, /// The block body databases. - #[data_size(skip)] - block_body_dbs: VersionedDatabases, + block_body_dbs: VersionedDatabases, /// The approvals hashes database. #[data_size(skip)] approvals_hashes_db: Database, /// The block metadata db. #[data_size(skip)] block_metadata_db: Database, - /// The deploy database. - #[data_size(skip)] - deploy_db: Database, - /// The transaction database. - #[data_size(skip)] - transaction_db: Database, - /// Database of `ExecutionResultV1`s indexed by deploy hash. - #[data_size(skip)] - execution_results_v1_db: Database, - /// Database of `ExecutionResult`s indexed by transaction hash. - #[data_size(skip)] - execution_results_db: Database, + /// The transaction databases. + transaction_dbs: VersionedDatabases, + /// Databases of `ExecutionResult`s indexed by transaction hash for current DB or by deploy + /// hash for legacy DB. + execution_result_dbs: VersionedDatabases, /// The transfer database. #[data_size(skip)] transfer_db: Database, /// The state storage database. #[data_size(skip)] state_store_db: Database, - /// The finalized deploy approvals database. - #[data_size(skip)] - finalized_deploy_approvals_db: Database, - /// The finalized transaction approvals database. - #[data_size(skip)] - finalized_transaction_approvals_db: Database, + /// The finalized transaction approvals databases. + finalized_transaction_approvals_dbs: VersionedDatabases, /// A map of block height to block ID. block_height_index: BTreeMap, /// A map of era ID to switch block ID. switch_block_era_id_index: BTreeMap, - /// A map of deploy hashes to hashes and heights of blocks containing them. - deploy_hash_index: BTreeMap, + /// A map of transaction hashes to hashes and heights of blocks containing them. + transaction_hash_index: BTreeMap, /// Runs of completed blocks known in storage. completed_blocks: DisjointSequences, /// The activation point era of the current protocol version. @@ -246,55 +204,6 @@ pub struct Storage { max_ttl: MaxTtl, } -/// A storage component event. -#[derive(Debug, From, Serialize)] -#[repr(u8)] -pub(crate) enum Event { - /// Storage request. - #[from] - StorageRequest(Box), - /// Incoming net request. - NetRequestIncoming(Box), - /// Mark block completed request. - #[from] - MarkBlockCompletedRequest(MarkBlockCompletedRequest), - /// Make block executable request. - #[from] - MakeBlockExecutableRequest(Box), -} - -impl Display for Event { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - Event::StorageRequest(req) => req.fmt(f), - Event::NetRequestIncoming(incoming) => incoming.fmt(f), - Event::MarkBlockCompletedRequest(req) => req.fmt(f), - Event::MakeBlockExecutableRequest(req) => req.fmt(f), - } - } -} - -impl From for Event { - #[inline] - fn from(incoming: NetRequestIncoming) -> Self { - Event::NetRequestIncoming(Box::new(incoming)) - } -} - -impl From for Event { - #[inline] - fn from(request: StorageRequest) -> Self { - Event::StorageRequest(Box::new(request)) - } -} - -impl From for Event { - #[inline] - fn from(request: MakeBlockExecutableRequest) -> Self { - Event::MakeBlockExecutableRequest(Box::new(request)) - } -} - pub(crate) enum HighestOrphanedBlockResult { MissingHighestSequence, MissingFromBlockHeightIndex(u64), @@ -417,45 +326,20 @@ impl Storage { .saturating_add(config.max_deploy_store_size) .saturating_add(config.max_deploy_metadata_store_size); - // Creates the environment and databases. - let env = Environment::new() - .set_flags( - OS_FLAGS - // We manage our own directory. - | EnvironmentFlags::NO_SUB_DIR - // Disable thread local storage, strongly suggested for operation with tokio. - | EnvironmentFlags::NO_TLS - // Disable read-ahead. Our data is not stored/read in sequence that would benefit from the read-ahead. - | EnvironmentFlags::NO_READAHEAD, - ) - .set_max_readers(MAX_TRANSACTIONS) - .set_max_dbs(MAX_DB_COUNT) - .set_map_size(total_size) - .open(&root.join(STORAGE_DB_FILENAME))?; - - let block_header_dbs = VersionedDatabases { - legacy: env.create_db(Some("block_header"), DatabaseFlags::empty())?, - current: env.create_db(Some("block_header_v2"), DatabaseFlags::empty())?, - }; + // Create the environment and databases. + let env = new_environment(total_size, root.as_path())?; + + let block_header_dbs = VersionedDatabases::new(&env, "block_header", "block_header_v2")?; let block_metadata_db = env.create_db(Some("block_metadata"), DatabaseFlags::empty())?; - let deploy_db = env.create_db(Some("deploys"), DatabaseFlags::empty())?; - let transaction_db = env.create_db(Some("transactions"), DatabaseFlags::empty())?; - let execution_results_v1_db = - env.create_db(Some("deploy_metadata"), DatabaseFlags::empty())?; - let execution_results_db = - env.create_db(Some("execution_results"), DatabaseFlags::empty())?; + let transaction_dbs = VersionedDatabases::new(&env, "deploys", "transactions")?; + let execution_result_dbs = + VersionedDatabases::new(&env, "deploy_metadata", "execution_results")?; let transfer_db = env.create_db(Some("transfer"), DatabaseFlags::empty())?; let state_store_db = env.create_db(Some("state_store"), DatabaseFlags::empty())?; - let finalized_deploy_approvals_db = - env.create_db(Some("finalized_approvals"), DatabaseFlags::empty())?; - let block_body_dbs = VersionedDatabases { - legacy: env.create_db(Some("block_body"), DatabaseFlags::empty())?, - current: env.create_db(Some("block_body_v2"), DatabaseFlags::empty())?, - }; - let finalized_transaction_approvals_db = env.create_db( - Some("versioned_finalized_approvals"), - DatabaseFlags::empty(), - )?; + let block_body_dbs = + VersionedDatabases::<_, BlockBody>::new(&env, "block_body", "block_body_v2")?; + let finalized_transaction_approvals_dbs = + VersionedDatabases::new(&env, "finalized_approvals", "versioned_finalized_approvals")?; let approvals_hashes_db = env.create_db(Some("approvals_hashes"), DatabaseFlags::empty())?; @@ -463,86 +347,60 @@ impl Storage { info!("indexing block store"); let mut block_height_index = BTreeMap::new(); let mut switch_block_era_id_index = BTreeMap::new(); - let mut deploy_hash_index = BTreeMap::new(); + let mut transaction_hash_index = BTreeMap::new(); let mut block_txn = env.begin_rw_txn()?; let mut deleted_block_hashes = HashSet::new(); - let mut deleted_block_body_hashes = HashSet::new(); - let mut deleted_deploy_hashes = HashSet::::new(); - - // Note: `iter_start` has an undocumented panic if called on an empty database. We rely on - // the iterator being at the start when created. - let mut cursor = block_txn.open_rw_cursor(block_header_dbs.current)?; - for row in cursor.iter() { - let (_, raw_val) = row?; - let mut body_txn = env.begin_ro_txn()?; - let block_header: BlockHeader = lmdb_ext::deserialize(raw_val)?; - let maybe_block_body = - get_body_for_block_hash(&mut body_txn, block_header.body_hash(), &block_body_dbs); - if let Some(invalid_era) = hard_reset_to_start_of_era { - // Remove blocks that are in to-be-upgraded eras, but have obsolete protocol - // versions - they were most likely created before the upgrade and should be - // reverted. - if block_header.era_id() >= invalid_era - && block_header.protocol_version() < protocol_version - { - let _ = deleted_block_hashes.insert(block_header.block_hash()); - - if let Some(block_body) = maybe_block_body? { - deleted_deploy_hashes.extend(block_body.deploy_hashes()); - deleted_deploy_hashes.extend(block_body.transfer_hashes()); - } - - let _ = deleted_block_body_hashes.insert(*block_header.body_hash()); + // Map of all block body hashes, with their values representing whether to retain the + // corresponding block bodies or not. + let mut block_body_hashes = HashMap::new(); + let mut deleted_transaction_hashes = HashSet::::new(); + + let mut init_fn = |cursor: &mut RwCursor, + block_header: BlockHeader| + -> Result<(), FatalStorageError> { + let should_retain_block = match hard_reset_to_start_of_era { + Some(invalid_era) => { + // Retain blocks from eras before the hard reset era, and blocks after this + // era if they are from the current protocol version (as otherwise a node + // restart would purge them again, despite them being valid). + block_header.era_id() < invalid_era + || block_header.protocol_version() == protocol_version + } + None => true, + }; - cursor.del(WriteFlags::empty())?; - continue; + // If we don't already have the block body hash in the collection, insert it with the + // value `should_retain_block`. + // + // If there is an existing value, the updated value should be `false` iff the existing + // value and `should_retain_block` are both `false`. Otherwise the updated value should + // be `true`. + match block_body_hashes.entry(*block_header.body_hash()) { + Entry::Vacant(entry) => { + entry.insert(should_retain_block); + } + Entry::Occupied(entry) => { + let value = entry.into_mut(); + *value = *value || should_retain_block; } } - Self::insert_to_block_header_indices( - &mut block_height_index, - &mut switch_block_era_id_index, - &block_header, - )?; - - if let Some(block_body) = maybe_block_body? { - Self::insert_to_deploy_index( - &mut deploy_hash_index, - block_header.block_hash(), - block_header.height(), - block_body.deploy_and_transfer_hashes(), - )?; - } - } - drop(cursor); - let mut cursor = block_txn.open_rw_cursor(block_header_dbs.legacy)?; - for row in cursor.iter() { - let (_, raw_val) = row?; let mut body_txn = env.begin_ro_txn()?; - let block_header: BlockHeaderV1 = lmdb_ext::deserialize(raw_val)?; - let block_header = BlockHeader::from(block_header); - let maybe_block_body = - get_body_for_block_hash(&mut body_txn, block_header.body_hash(), &block_body_dbs); - if let Some(invalid_era) = hard_reset_to_start_of_era { - // Remove blocks that are in to-be-upgraded eras, but have obsolete protocol - // versions - they were most likely created before the upgrade and should be - // reverted. - if block_header.era_id() >= invalid_era - && block_header.protocol_version() < protocol_version - { - let _ = deleted_block_hashes.insert(block_header.block_hash()); - - if let Some(block_body) = maybe_block_body? { - deleted_deploy_hashes.extend(block_body.deploy_hashes()); - deleted_deploy_hashes.extend(block_body.transfer_hashes()); - } - - let _ = deleted_block_body_hashes.insert(*block_header.body_hash()); - - cursor.del(WriteFlags::empty())?; - continue; + let maybe_block_body = block_body_dbs.get(&mut body_txn, block_header.body_hash())?; + if !should_retain_block { + let _ = deleted_block_hashes.insert(block_header.block_hash()); + + if let Some(block_body) = &maybe_block_body { + deleted_transaction_hashes.extend( + block_body + .deploy_and_transfer_hashes() + .map(|deploy_hash| TransactionHash::Deploy(*deploy_hash)), + ); } + + cursor.del(WriteFlags::empty())?; + return Ok(()); } Self::insert_to_block_header_indices( @@ -551,45 +409,35 @@ impl Storage { &block_header, )?; - if let Some(block_body) = maybe_block_body? { - Self::insert_to_deploy_index( - &mut deploy_hash_index, + if let Some(block_body) = maybe_block_body { + let transaction_hashes = block_body + .deploy_and_transfer_hashes() + .map(|deploy_hash| TransactionHash::Deploy(*deploy_hash)) + .collect(); + Self::insert_to_transaction_index( + &mut transaction_hash_index, block_header.block_hash(), block_header.height(), - block_body.deploy_and_transfer_hashes(), + transaction_hashes, )?; } - } - drop(cursor); - info!("block store reindexing complete"); - block_txn.commit()?; + Ok(()) + }; - let deleted_block_hashes_raw = deleted_block_hashes.iter().map(BlockHash::as_ref).collect(); + block_header_dbs.for_each_value_in_current(&mut block_txn, &mut init_fn)?; + block_header_dbs.for_each_value_in_legacy(&mut block_txn, &mut init_fn)?; - initialize_block_body_db( - &env, - &block_header_dbs, - &block_body_dbs, - &deleted_block_body_hashes - .iter() - .map(Digest::as_ref) - .collect(), - )?; + info!("block store reindexing complete"); + block_txn.commit()?; - initialize_block_metadata_db(&env, &block_metadata_db, &deleted_block_hashes_raw)?; - initialize_execution_results_db( - &env, - &execution_results_v1_db, - "execution results v1 db", - &deleted_deploy_hashes, - )?; - initialize_execution_results_db( - &env, - &execution_results_db, - "execution results db", - &deleted_deploy_hashes, - )?; + let deleted_block_body_hashes = block_body_hashes + .into_iter() + .filter_map(|(body_hash, retain)| (!retain).then_some(body_hash)) + .collect(); + initialize_block_body_dbs(&env, block_body_dbs, deleted_block_body_hashes)?; + initialize_block_metadata_db(&env, block_metadata_db, deleted_block_hashes)?; + initialize_execution_result_dbs(&env, execution_result_dbs, deleted_transaction_hashes)?; let metrics = registry.map(Metrics::new).transpose()?; @@ -600,17 +448,14 @@ impl Storage { block_body_dbs, block_metadata_db, approvals_hashes_db, - deploy_db, - transaction_db, - execution_results_v1_db, - execution_results_db, + transaction_dbs, + execution_result_dbs, transfer_db, state_store_db, - finalized_deploy_approvals_db, - finalized_transaction_approvals_db, + finalized_transaction_approvals_dbs, block_height_index, switch_block_era_id_index, - deploy_hash_index, + transaction_hash_index, completed_blocks: Default::default(), activation_era, key_block_height_for_activation_point: None, @@ -789,9 +634,7 @@ impl Storage { } NetRequest::LegacyDeploy(ref serialized_id) => { let id = decode_item_id::(serialized_id)?; - let opt_item = self - .get_legacy_deploy(id) - .map_err(FatalStorageError::from)?; + let opt_item = self.get_legacy_deploy(id)?; let fetch_response = FetchResponse::from_opt(id, opt_item); Ok(self.update_pool_and_send( @@ -900,7 +743,7 @@ impl Storage { // average the actual execution time will be very low. Ok(match req { StorageRequest::PutBlock { block, responder } => { - responder.respond(self.write_block(&block)?).ignore() + responder.respond(self.put_block(&block)?).ignore() } StorageRequest::PutApprovalsHashes { approvals_hashes, @@ -973,43 +816,24 @@ impl Storage { } => responder .respond(self.put_transaction(&transaction)?) .ignore(), - StorageRequest::GetDeploys { - deploy_hashes, + StorageRequest::GetTransactions { + transaction_hashes, responder, } => { let mut txn = self.env.begin_ro_txn()?; responder - .respond( - self.get_deploys_with_finalized_approvals( - &mut txn, - deploy_hashes.as_slice(), - )?, - ) + .respond(self.get_transactions_with_finalized_approvals( + &mut txn, + transaction_hashes.as_slice(), + )?) .ignore() } StorageRequest::GetLegacyDeploy { deploy_hash, responder, } => { - let transaction_hash = TransactionHash::from(deploy_hash); - let mut txn = self.env.begin_ro_txn()?; - let transaction = match self - .get_transaction_with_finalized_approvals(&mut txn, &transaction_hash)? - { - Some(transaction_with_finalized_approvals) => { - transaction_with_finalized_approvals.into_naive() - } - None => return Ok(responder.respond(None).ignore()), - }; - let legacy_deploy = match transaction { - Transaction::Deploy(deploy) => LegacyDeploy::from(deploy), - transaction @ Transaction::V1(_) => { - let mismatch = VariantMismatch(Box::new((transaction_hash, transaction))); - error!(%mismatch, "failed getting legacy deploy"); - return Err(FatalStorageError::from(mismatch)); - } - }; - responder.respond(Some(legacy_deploy)).ignore() + let maybe_legacy_deploy = self.get_legacy_deploy(deploy_hash)?; + responder.respond(maybe_legacy_deploy).ignore() } StorageRequest::GetTransaction { transaction_id, @@ -1033,18 +857,9 @@ impl Storage { responder, } => { let mut txn = self.env.begin_ro_txn()?; - if txn.value_exists_bytesrepr( - self.transaction_db, - &transaction_id.transaction_hash(), - )? { - return Ok(responder.respond(true).ignore()); - } - let has_transaction = match transaction_id { - TransactionId::Deploy { deploy_hash, .. } => { - txn.value_exists(&[self.deploy_db], &deploy_hash)? - } - TransactionId::V1 { .. } => false, - }; + let has_transaction = self + .transaction_dbs + .exists(&mut txn, &transaction_id.transaction_hash())?; responder.respond(has_transaction).ignore() } StorageRequest::GetExecutionResults { @@ -1073,43 +888,34 @@ impl Storage { txn.commit()?; responder.respond(()).ignore() } - StorageRequest::GetDeployAndExecutionInfo { - deploy_hash, + StorageRequest::GetTransactionAndExecutionInfo { + transaction_hash, responder, } => { - let transaction_hash = TransactionHash::from(deploy_hash); let mut txn = self.env.begin_ro_txn()?; - let deploy_wfa = match self + let transaction_wfa = match self .get_transaction_with_finalized_approvals(&mut txn, &transaction_hash)? { - Some(TransactionWithFinalizedApprovals::Deploy { - deploy, - finalized_approvals, - }) => DeployWithFinalizedApprovals::new(deploy, finalized_approvals), - Some(transaction_wfa) => { - let mismatch = - VariantMismatch(Box::new((transaction_hash, transaction_wfa))); - error!(%mismatch, "failed getting getting deploy and exec info"); - return Err(FatalStorageError::from(mismatch)); - } + Some(transaction_wfa) => transaction_wfa, None => return Ok(responder.respond(None).ignore()), }; - let block_hash_and_height = - match self.get_block_hash_and_height_by_deploy_hash(deploy_hash)? { - Some(value) => value, - None => return Ok(responder.respond(Some((deploy_wfa, None))).ignore()), - }; - let execution_result = self.get_execution_result(&mut txn, &deploy_hash)?; - let execution_info = DeployExecutionInfo { + let block_hash_and_height = match self.transaction_hash_index.get(&transaction_hash) + { + Some(value) => *value, + None => return Ok(responder.respond(Some((transaction_wfa, None))).ignore()), + }; + let execution_result = + self.execution_result_dbs.get(&mut txn, &transaction_hash)?; + let execution_info = ExecutionInfo { block_hash: *block_hash_and_height.block_hash(), block_height: block_hash_and_height.block_height(), execution_result, }; responder - .respond(Some((deploy_wfa, Some(execution_info)))) + .respond(Some((transaction_wfa, Some(execution_info)))) .ignore() } StorageRequest::GetSignedBlockByHash { @@ -1344,14 +1150,8 @@ impl Storage { block_header, responder, } => { - let block_header_hash = block_header.block_hash(); - match self.put_block_headers(vec![*block_header]) { - Ok(result) => responder.respond(result).ignore(), - Err(err) => { - error!(?err, ?block_header_hash, "error when storing block header"); - return Err(err); - } - } + let result = self.put_block_header(*block_header)?; + responder.respond(result).ignore() } StorageRequest::GetAvailableBlockRange { responder } => { responder.respond(self.get_available_block_range()).ignore() @@ -1397,6 +1197,90 @@ impl Storage { }) } + /// Writes a block to storage, updating indices as necessary. + /// + /// Returns `Ok(true)` if the block has been successfully written, `Ok(false)` if a part of it + /// couldn't be written because it already existed, and `Err(_)` if there was an error. + fn put_block(&mut self, block: &Block) -> Result { + let env = Rc::clone(&self.env); + let mut txn = env.begin_rw_txn()?; + let wrote = self.write_block(&mut txn, block)?; + if wrote { + txn.commit()?; + } + Ok(wrote) + } + + /// Writes a block to storage, updating indices as necessary. + /// + /// Returns `Ok(true)` if the block has been successfully written, `Ok(false)` if a part of it + /// couldn't be written because it already existed, and `Err(_)` if there was an error. + fn write_block( + &mut self, + txn: &mut RwTransaction, + block: &Block, + ) -> Result { + if !self + .block_body_dbs + .put(txn, block.body_hash(), &block.clone_body(), true)? + { + error!(%block, "could not insert block body"); + return Ok(false); + } + + let block_header = block.clone_header(); + if !self + .block_header_dbs + .put(txn, block.hash(), &block_header, true)? + { + error!(%block, "could not insert block header"); + return Ok(false); + } + + Self::insert_to_block_header_indices( + &mut self.block_height_index, + &mut self.switch_block_era_id_index, + &block_header, + )?; + let transaction_hashes = block + .deploy_and_transfer_hashes() + .map(|deploy_hash| TransactionHash::Deploy(*deploy_hash)) + .collect(); + Self::insert_to_transaction_index( + &mut self.transaction_hash_index, + *block.hash(), + block.height(), + transaction_hashes, + )?; + + Ok(true) + } + + pub(crate) fn put_executed_block( + &mut self, + block: &Block, + approvals_hashes: &ApprovalsHashes, + execution_results: HashMap, + ) -> Result { + let env = Rc::clone(&self.env); + let mut txn = env.begin_rw_txn()?; + let wrote = self.write_block(&mut txn, block)?; + if !wrote { + return Err(FatalStorageError::FailedToOverwriteBlock); + } + + let _ = self.write_approvals_hashes(&mut txn, approvals_hashes)?; + let _ = self.write_execution_results( + &mut txn, + block.hash(), + block.height(), + execution_results, + )?; + txn.commit()?; + + Ok(true) + } + /// Handles a [`BlockCompletedAnnouncement`]. fn handle_mark_block_completed_request( &mut self, @@ -1441,10 +1325,11 @@ impl Storage { /// Put a single transaction into storage. pub fn put_transaction(&self, transaction: &Transaction) -> Result { - let mut txn = self.env.begin_rw_txn()?; let transaction_hash = transaction.hash(); - let outcome = - txn.put_value_bytesrepr(self.transaction_db, &transaction_hash, transaction, false)?; + let mut txn = self.env.begin_rw_txn()?; + let outcome = self + .transaction_dbs + .put(&mut txn, &transaction_hash, transaction, false)?; if outcome { debug!(%transaction_hash, "Storage: new transaction stored"); } else { @@ -1468,10 +1353,9 @@ impl Storage { return Ok(false); } }; - Ok(txn.value_exists( - &[self.block_body_dbs.legacy, self.block_body_dbs.current], - block_header.body_hash(), - )?) + self.block_body_dbs + .exists(&mut txn, block_header.body_hash()) + .map_err(Into::into) } /// Retrieves approvals hashes by block hash. @@ -1612,11 +1496,15 @@ impl Storage { block_height: u64, execution_results: HashMap, ) -> Result { - Self::insert_to_deploy_index( - &mut self.deploy_hash_index, + let transaction_hashes = execution_results + .keys() + .map(|deploy_hash| TransactionHash::Deploy(*deploy_hash)) + .collect(); + Self::insert_to_transaction_index( + &mut self.transaction_hash_index, *block_hash, block_height, - execution_results.keys(), + transaction_hashes, )?; let mut transfers: Vec = vec![]; for (deploy_hash, execution_result) in execution_results.into_iter() { @@ -1645,29 +1533,19 @@ impl Storage { } } - // Write the execution result to the appropriate DB. - let was_written = match execution_result { - ExecutionResult::V1(v1_result) => { - let v1_results = DeployMetadataV1 { - execution_results: iter::once((*block_hash, v1_result)).collect(), - }; - txn.put_value( - self.execution_results_v1_db, - &deploy_hash, - &v1_results, - true, - )? - } - versioned_result => txn.put_value( - self.execution_results_db, - &deploy_hash, - &versioned_result, - true, - )?, - }; + let was_written = self.execution_result_dbs.put( + txn, + &TransactionHash::Deploy(deploy_hash), + &execution_result, + true, + )?; if !was_written { - error!(?block_hash, ?deploy_hash, "failed to write deploy metadata"); + error!( + ?block_hash, + ?deploy_hash, + "failed to write execution results" + ); debug_assert!(was_written); } } @@ -1875,11 +1753,19 @@ impl Storage { return Ok(None); }; - let deploy_hashes = block.deploy_and_transfer_hashes().copied().collect_vec(); + let transaction_hashes = block + .deploy_and_transfer_hashes() + .map(|deploy_hash| TransactionHash::from(*deploy_hash)) + .collect_vec(); Ok(self - .get_deploys_with_finalized_approvals(&mut txn, &deploy_hashes)? + .get_transactions_with_finalized_approvals(&mut txn, &transaction_hashes)? .into_iter() - .map(|maybe_deploy| maybe_deploy.map(|deploy| deploy.into_naive())) + .map(|maybe_transaction| { + maybe_transaction.and_then(|twfa| match twfa.into_naive() { + Transaction::Deploy(deploy) => Some(deploy), + Transaction::V1(_) => None, + }) + }) .collect::>>() .map(|deploys| (block, deploys))) } @@ -1936,8 +1822,8 @@ impl Storage { txn: &mut Tx, deploy_hash: DeployHash, ) -> Result, FatalStorageError> { - self.deploy_hash_index - .get(&deploy_hash) + self.transaction_hash_index + .get(&TransactionHash::Deploy(deploy_hash)) .and_then(|block_hash_and_height| { self.get_single_block_header(txn, block_hash_and_height.block_hash()) .transpose() @@ -1945,15 +1831,6 @@ impl Storage { .transpose() } - /// Retrieves the block hash and height for a deploy hash by looking it up in the index - /// and returning it. - fn get_block_hash_and_height_by_deploy_hash( - &self, - deploy_hash: DeployHash, - ) -> Result, FatalStorageError> { - Ok(self.deploy_hash_index.get(&deploy_hash).copied()) - } - /// Retrieves the highest block from storage, if one exists. May return an LMDB error. fn get_highest_block( &self, @@ -2176,19 +2053,11 @@ impl Storage { txn: &mut Tx, block_hash: &BlockHash, ) -> Result, FatalStorageError> { - // Let's try to get a regular block header first: - let (block_header, _is_legacy): (BlockHeader, _) = - match txn.get_value(self.block_header_dbs.current, &block_hash)? { - Some(header) => (header, false), - // If there isn't any, let's look at the legacy database: - None => match txn.get_value(self.block_header_dbs.legacy, &block_hash)? { - Some(legacy_header) => (BlockHeaderV1::into(legacy_header), true), - None => return Ok(None), - }, - }; - + let block_header = match self.block_header_dbs.get(txn, block_hash)? { + Some(block_header) => block_header, + None => return Ok(None), + }; block_header.set_block_hash(*block_hash); - Ok(Some(block_header)) } @@ -2212,52 +2081,21 @@ impl Storage { Ok(Some(SignedBlockHeader::new(block_header, block_signatures))) } - /// Stores block headers in the db and, if successful, updates the in-memory indices. - /// Returns an error on failure or a boolean indicating whether any of the block headers were - /// previously known. - fn put_block_headers( - &mut self, - block_headers: Vec, - ) -> Result { + fn put_block_header(&mut self, block_header: BlockHeader) -> Result { let mut txn = self.env.begin_rw_txn()?; - let mut result = false; - - for block_header in &block_headers { - let block_header_hash = block_header.block_hash(); - let put_result = match block_header { - BlockHeader::V1(block_header) => txn.put_value( - self.block_header_dbs.legacy, - &block_header_hash, - block_header, - false, - ), - BlockHeader::V2(_) => txn.put_value( - self.block_header_dbs.current, - &block_header_hash, - block_header, - false, - ), - }; - match put_result { - Ok(single_result) => { - result = result && single_result; - } - Err(err) => { - error!(?err, ?block_header_hash, "error when storing block header"); - txn.abort(); - return Err(err.into()); - } - } - } + let result = self.block_header_dbs.put( + &mut txn, + &block_header.block_hash(), + &block_header, + false, + )?; txn.commit()?; - // Update the indices if and only if we wrote to storage correctly. - for block_header in &block_headers { - Self::insert_to_block_header_indices( - &mut self.block_height_index, - &mut self.switch_block_era_id_index, - block_header, - )?; - } + + Self::insert_to_block_header_indices( + &mut self.block_height_index, + &mut self.switch_block_era_id_index, + &block_header, + )?; Ok(result) } @@ -2289,8 +2127,7 @@ impl Storage { } }; - let maybe_block_body = - get_body_for_block_hash(txn, block_header.body_hash(), &self.block_body_dbs); + let maybe_block_body = self.block_body_dbs.get(txn, block_header.body_hash()); let block_body = match maybe_block_body? { Some(block_body) => block_body, None => { @@ -2306,99 +2143,33 @@ impl Storage { Ok(Some(block)) } - /// Retrieves a set of deploys from storage, along with their potential finalized approvals. - fn get_deploys_with_finalized_approvals( + /// Retrieves a set of transactions, along with their potential finalized approvals. + fn get_transactions_with_finalized_approvals( &self, txn: &mut Tx, - deploy_hashes: &[DeployHash], - ) -> Result; 1]>, FatalStorageError> { - deploy_hashes + transaction_hashes: &[TransactionHash], + ) -> Result; 1]>, FatalStorageError> { + transaction_hashes .iter() - .map(|deploy_hash| { - let transaction_hash = TransactionHash::from(*deploy_hash); - match self.get_transaction_with_finalized_approvals(txn, &transaction_hash)? { - Some(TransactionWithFinalizedApprovals::Deploy { - deploy, - finalized_approvals, - }) => Ok(Some(DeployWithFinalizedApprovals::new( - deploy, - finalized_approvals, - ))), - Some(transaction_wfa) => { - let mismatch = - VariantMismatch(Box::new((transaction_hash, transaction_wfa))); - error!( - %mismatch, - "failed getting getting deploys with finalized approvals" - ); - Err(FatalStorageError::from(mismatch)) - } - None => Ok(None), - } + .map(|transaction_hash| { + self.get_transaction_with_finalized_approvals(txn, transaction_hash) }) .collect() } - /// Retrieves a single transaction, trying the `transaction_db` first and falling back to - /// `deploy_db` if appropriate. - fn get_transaction( - &self, - txn: &mut Tx, - transaction_hash: &TransactionHash, - ) -> Result, LmdbExtError> { - if let Some(transaction) = txn.get_value_bytesrepr(self.transaction_db, transaction_hash)? { - return Ok(Some(transaction)); - } - - // If this is for a deploy, try the old deploy_db. - let deploy_hash = match transaction_hash { - TransactionHash::Deploy(deploy_hash) => deploy_hash, - TransactionHash::V1(_) => return Ok(None), - }; - - Ok(txn - .get_value::<_, Deploy>(self.deploy_db, deploy_hash)? - .map(Transaction::from)) - } - - /// Retrieves the finalized approvals for a transaction, trying the `finalized_approvals_db` - /// first and falling back to `finalized_deploy_approvals_db` if appropriate. - fn get_finalized_approvals( - &self, - txn: &mut Tx, - transaction_hash: &TransactionHash, - ) -> Result, LmdbExtError> { - if let Some(approvals) = - txn.get_value_bytesrepr(self.finalized_transaction_approvals_db, transaction_hash)? - { - return Ok(Some(approvals)); - } - - // If this is for a deploy, try the old finalized_deploy_approvals_db. - let deploy_hash = match transaction_hash { - TransactionHash::Deploy(deploy_hash) => deploy_hash, - TransactionHash::V1(_) => return Ok(None), - }; - - Ok(txn - .get_value::<_, FinalizedDeployApprovals>( - self.finalized_deploy_approvals_db, - deploy_hash, - )? - .map(FinalizedApprovals::from)) - } - /// Retrieves a single transaction along with its finalized approvals. fn get_transaction_with_finalized_approvals( &self, txn: &mut Tx, transaction_hash: &TransactionHash, ) -> Result, FatalStorageError> { - let transaction = match self.get_transaction(txn, transaction_hash)? { + let transaction = match self.transaction_dbs.get(txn, transaction_hash)? { Some(transaction) => transaction, None => return Ok(None), }; - let finalized_approvals = self.get_finalized_approvals(txn, transaction_hash)?; + let finalized_approvals = self + .finalized_transaction_approvals_dbs + .get(txn, transaction_hash)?; let ret = match (transaction, finalized_approvals) { ( @@ -2424,38 +2195,6 @@ impl Storage { Ok(Some(ret)) } - /// Retrieves the execution result associated with the given deploy. - fn get_execution_result( - &self, - txn: &mut Tx, - deploy_hash: &DeployHash, - ) -> Result, FatalStorageError> { - if let Some(exec_result) = txn.get_value(self.execution_results_db, deploy_hash)? { - return Ok(Some(exec_result)); - }; - - // If we don't have this execution result stored as a versioned ExecutionResult, try to read - // from the `execution_results_v1_db`, where they were stored as a newtyped HashMap with a - // single entry prior to `casper-node` v2.0.0. - let v1_results: DeployMetadataV1 = - match txn.get_value(self.execution_results_v1_db, deploy_hash)? { - Some(results) => results, - None => return Ok(None), - }; - - if v1_results.execution_results.len() != 1 { - return Err(FatalStorageError::InvalidExecutionResultsV1Length { - deploy_hash: *deploy_hash, - results_length: v1_results.execution_results.len(), - }); - } - - // Safe to unwrap due to length check immediately above. - let v1_result = v1_results.execution_results.into_iter().next().unwrap().1; - - Ok(Some(ExecutionResult::V1(v1_result))) - } - /// Retrieves transfers associated with block. /// /// If no transfers are stored for the block, an empty transfers instance will be @@ -2508,15 +2247,19 @@ impl Storage { finalized_approvals: &FinalizedApprovals, ) -> Result { let mut txn = self.env.begin_rw_txn()?; - let original_transaction = self.get_transaction(&mut txn, transaction_hash)?.ok_or({ - FatalStorageError::UnexpectedFinalizedApprovals { - transaction_hash: *transaction_hash, - } - })?; + let original_transaction = self + .transaction_dbs + .get(&mut txn, transaction_hash)? + .ok_or({ + FatalStorageError::UnexpectedFinalizedApprovals { + transaction_hash: *transaction_hash, + } + })?; // Only store the finalized approvals if they are different from the original ones. - let maybe_existing_finalized_approvals = - self.get_finalized_approvals(&mut txn, transaction_hash)?; + let maybe_existing_finalized_approvals = self + .finalized_transaction_approvals_dbs + .get(&mut txn, transaction_hash)?; if maybe_existing_finalized_approvals.as_ref() == Some(finalized_approvals) { return Ok(false); } @@ -2537,8 +2280,8 @@ impl Storage { }; if should_store { - let _ = txn.put_value_bytesrepr( - self.finalized_transaction_approvals_db, + let _ = self.finalized_transaction_approvals_dbs.put( + &mut txn, transaction_hash, finalized_approvals, true, @@ -2552,11 +2295,25 @@ impl Storage { fn get_legacy_deploy( &self, deploy_hash: DeployHash, - ) -> Result, LmdbExtError> { - self.env - .begin_ro_txn() - .map_err(Into::into) - .and_then(|mut txn| txn.get_value(self.deploy_db, &deploy_hash)) + ) -> Result, FatalStorageError> { + let transaction_hash = TransactionHash::from(deploy_hash); + let mut txn = self.env.begin_ro_txn()?; + let transaction = + match self.get_transaction_with_finalized_approvals(&mut txn, &transaction_hash)? { + Some(transaction_with_finalized_approvals) => { + transaction_with_finalized_approvals.into_naive() + } + None => return Ok(None), + }; + + match transaction { + Transaction::Deploy(deploy) => Ok(Some(LegacyDeploy::from(deploy))), + transaction @ Transaction::V1(_) => { + let mismatch = VariantMismatch(Box::new((transaction_hash, transaction))); + error!(%mismatch, "failed getting legacy deploy"); + Err(FatalStorageError::from(mismatch)) + } + } } /// Retrieves a transaction by transaction ID. @@ -2567,7 +2324,7 @@ impl Storage { let transaction_hash = transaction_id.transaction_hash(); let mut txn = self.env.begin_ro_txn()?; - let transaction = match self.get_transaction(&mut txn, &transaction_hash)? { + let transaction = match self.transaction_dbs.get(&mut txn, &transaction_hash)? { None => return Ok(None), Some(transaction) if transaction.fetch_id() == transaction_id => { return Ok(Some(transaction)); @@ -2575,7 +2332,10 @@ impl Storage { Some(transaction) => transaction, }; - let finalized_approvals = match self.get_finalized_approvals(&mut txn, &transaction_hash)? { + let finalized_approvals = match self + .finalized_transaction_approvals_dbs + .get(&mut txn, &transaction_hash)? + { None => return Ok(None), Some(approvals) => approvals, }; @@ -2790,8 +2550,7 @@ impl Storage { Some(block_header) => block_header, None => return Ok(None), }; - let maybe_block_body = - get_body_for_block_hash(txn, block_header.body_hash(), &self.block_body_dbs); + let maybe_block_body = self.block_body_dbs.get(txn, block_header.body_hash()); let Some(block_body) = maybe_block_body? else { debug!( @@ -2803,12 +2562,13 @@ impl Storage { let mut execution_results = vec![]; for deploy_hash in block_body.deploy_and_transfer_hashes() { - match self.get_execution_result(txn, deploy_hash)? { + let transaction_hash = TransactionHash::Deploy(*deploy_hash); + match self.execution_result_dbs.get(txn, &transaction_hash)? { None => { debug!( %block_hash, - %deploy_hash, - "retrieved block but deploy is absent" + %transaction_hash, + "retrieved block but execution result for given transaction is absent" ); return Ok(None); } @@ -2833,16 +2593,22 @@ impl Storage { let mut ret = Vec::with_capacity(execution_results.len()); for (deploy_hash, execution_result) in execution_results { - match txn.get_value::<_, Deploy>(self.deploy_db, &deploy_hash)? { + let transaction_hash = TransactionHash::from(deploy_hash); + match self.transaction_dbs.get(&mut txn, &transaction_hash)? { None => { warn!( %block_hash, - %deploy_hash, - "missing deploy" + %transaction_hash, + "missing transaction" ); return Ok(None); } - Some(deploy) => ret.push((deploy_hash, deploy.take_header(), execution_result)), + Some(Transaction::Deploy(deploy)) => { + ret.push((deploy_hash, deploy.take_header(), execution_result)) + } + // Note: the `unreachable!` below will be removed when execution results are updated + // to handle Transactions. + Some(Transaction::V1(_)) => unreachable!(), }; } Ok(Some(ret)) @@ -2947,67 +2713,6 @@ fn move_storage_files_to_network_subdir( Ok(()) } -/// On-disk storage configuration. -#[derive(Clone, DataSize, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct Config { - /// The path to the folder where any files created or read by the storage component will exist. - /// - /// If the folder doesn't exist, it and any required parents will be created. - pub path: PathBuf, - /// The maximum size of the database to use for the block store. - /// - /// The size should be a multiple of the OS page size. - pub max_block_store_size: usize, - /// The maximum size of the database to use for the deploy store. - /// - /// The size should be a multiple of the OS page size. - pub max_deploy_store_size: usize, - /// The maximum size of the database to use for the deploy metadata store. - /// - /// The size should be a multiple of the OS page size. - pub max_deploy_metadata_store_size: usize, - /// The maximum size of the database to use for the component state store. - /// - /// The size should be a multiple of the OS page size. - pub max_state_store_size: usize, - /// Whether or not memory deduplication is enabled. - pub enable_mem_deduplication: bool, - /// How many loads before memory duplication checks for dead references. - pub mem_pool_prune_interval: u16, -} - -impl Default for Config { - fn default() -> Self { - Config { - // No one should be instantiating a config with storage set to default. - path: "/dev/null".into(), - max_block_store_size: DEFAULT_MAX_BLOCK_STORE_SIZE, - max_deploy_store_size: DEFAULT_MAX_DEPLOY_STORE_SIZE, - max_deploy_metadata_store_size: DEFAULT_MAX_DEPLOY_METADATA_STORE_SIZE, - max_state_store_size: DEFAULT_MAX_STATE_STORE_SIZE, - enable_mem_deduplication: true, - mem_pool_prune_interval: 4096, - } - } -} - -impl Config { - /// Returns a default `Config` suitable for tests, along with a `TempDir` which must be kept - /// alive for the duration of the test since its destructor removes the dir from the filesystem. - #[cfg(test)] - pub(crate) fn default_for_tests() -> (Self, TempDir) { - let tempdir = tempfile::tempdir().expect("should get tempdir"); - let path = tempdir.path().join("lmdb"); - - let config = Config { - path, - ..Default::default() - }; - (config, tempdir) - } -} - // Testing code. The functions below allow direct inspection of the storage component and should // only ever be used when writing tests. #[cfg(test)] @@ -3016,14 +2721,14 @@ impl Storage { pub fn write_legacy_deploy(&self, deploy: &Deploy) -> bool { let mut txn = self.env.begin_rw_txn().unwrap(); let deploy_hash = deploy.hash(); - let outcome = txn - .put_value(self.deploy_db, &deploy_hash, deploy, false) - .unwrap(); + let outcome = self + .transaction_dbs + .put_legacy(&mut txn, deploy_hash, deploy, false); txn.commit().unwrap(); outcome } - /// Directly returns a deploy from internal store. + /// Directly returns a transaction from internal store. /// /// # Panics /// @@ -3036,7 +2741,8 @@ impl Storage { .env .begin_ro_txn() .expect("could not create RO transaction"); - txn.get_value_bytesrepr(self.transaction_db, &transaction_hash) + self.transaction_dbs + .get(&mut txn, &transaction_hash) .expect("could not retrieve value from storage") } @@ -3053,7 +2759,8 @@ impl Storage { .env .begin_ro_txn() .expect("could not create RO transaction"); - self.get_execution_result(&mut txn, deploy_hash) + self.execution_result_dbs + .get(&mut txn, &TransactionHash::Deploy(*deploy_hash)) .expect("could not retrieve deploy metadata from storage") } @@ -3084,18 +2791,7 @@ impl Storage { .env .begin_ro_txn() .expect("could not create RO transaction"); - - let mut cursor = txn - .open_ro_cursor(self.transaction_db) - .expect("could not create cursor"); - - cursor - .iter() - .map(Result::unwrap) - .map(|(raw_key, _)| { - bytesrepr::deserialize(raw_key.to_vec()).expect("malformed transaction hash in DB") - }) - .collect() + self.transaction_dbs.keys(&txn) } /// Directly returns a deploy from internal store. @@ -3119,129 +2815,71 @@ impl Storage { } } -/// Purges stale entries from the block body database. -fn initialize_block_body_db( +fn new_environment(total_size: usize, root: &Path) -> Result { + Environment::new() + .set_flags( + OS_FLAGS + // We manage our own directory. + | EnvironmentFlags::NO_SUB_DIR + // Disable thread local storage, strongly suggested for operation with tokio. + | EnvironmentFlags::NO_TLS + // Disable read-ahead. Our data is not stored/read in sequence that would benefit from the read-ahead. + | EnvironmentFlags::NO_READAHEAD, + ) + .set_max_readers(MAX_TRANSACTIONS) + .set_max_dbs(MAX_DB_COUNT) + .set_map_size(total_size) + .open(&root.join(STORAGE_DB_FILENAME)) + .map_err(Into::into) +} + +/// Purges stale entries from the block body databases. +fn initialize_block_body_dbs( env: &Environment, - block_header_dbs: &VersionedDatabases, - block_body_dbs: &VersionedDatabases, - deleted_block_body_hashes_raw: &HashSet<&[u8]>, + block_body_dbs: VersionedDatabases, + deleted_block_body_hashes: HashSet, ) -> Result<(), FatalStorageError> { - info!("initializing block body database"); - - fn construct_block_body_to_block_header_reverse_lookup( - txn: &impl LmdbTransaction, - block_header_dbs: &VersionedDatabases, - ) -> Result, LmdbExtError> { - let mut block_body_hash_to_header_map: BTreeMap = BTreeMap::new(); - - for row in txn.open_ro_cursor(block_header_dbs.current)?.iter() { - let (_raw_key, raw_val) = row?; - let block_header: BlockHeader = lmdb_ext::deserialize(raw_val)?; - block_body_hash_to_header_map.insert(block_header.body_hash().to_owned(), block_header); - } - for row in txn.open_ro_cursor(block_header_dbs.legacy)?.iter() { - let (_raw_key, raw_val) = row?; - let block_header: BlockHeaderV1 = lmdb_ext::deserialize(raw_val)?; - block_body_hash_to_header_map - .insert(block_header.body_hash().to_owned(), block_header.into()); - } - - Ok(block_body_hash_to_header_map) - } - + info!("initializing block body databases"); let mut txn = env.begin_rw_txn()?; - - let block_body_hash_to_header_map = - construct_block_body_to_block_header_reverse_lookup(&txn, block_header_dbs)?; - - for db in &[block_body_dbs.current, block_body_dbs.legacy] { - let mut cursor = txn.open_rw_cursor(*db)?; - - for row in cursor.iter() { - let (raw_key, _raw_val) = row?; - let block_body_hash = Digest::try_from(raw_key) - .map_err(|err| LmdbExtError::DataCorrupted(Box::new(err)))?; - if !block_body_hash_to_header_map.contains_key(&block_body_hash) { - if !deleted_block_body_hashes_raw.contains(raw_key) { - // This means that the block body isn't referenced by any header, but no header - // referencing it was just deleted, either - warn!(?raw_key, "orphaned block body detected"); - } - info!(?raw_key, "deleting block body"); - cursor.del(WriteFlags::empty())?; - } - } - - drop(cursor); + for body_hash in deleted_block_body_hashes { + block_body_dbs.delete(&mut txn, &body_hash)?; } - txn.commit()?; info!("block body database initialized"); Ok(()) } -/// Retrieves the block body for the given block hash. -fn get_body_for_block_hash( - txn: &mut Tx, - block_body_hash: &Digest, - block_body_dbs: &VersionedDatabases, -) -> Result, LmdbExtError> { - let maybe_block_body: Option = - txn.get_value(block_body_dbs.current, block_body_hash)?; - Ok(if maybe_block_body.is_none() { - let maybe_legacy_block_body: Option = - txn.get_value(block_body_dbs.legacy, block_body_hash)?; - maybe_legacy_block_body.map(|block_body_v1| block_body_v1.into()) - } else { - maybe_block_body - }) -} - /// Purges stale entries from the block metadata database. fn initialize_block_metadata_db( env: &Environment, - block_metadata_db: &Database, - deleted_block_hashes: &HashSet<&[u8]>, + block_metadata_db: Database, + deleted_block_hashes: HashSet, ) -> Result<(), FatalStorageError> { info!("initializing block metadata database"); let mut txn = env.begin_rw_txn()?; - let mut cursor = txn.open_rw_cursor(*block_metadata_db)?; - - for row in cursor.iter() { - let (raw_key, _) = row?; - if deleted_block_hashes.contains(raw_key) { - cursor.del(WriteFlags::empty())?; - continue; - } + for block_hash in deleted_block_hashes { + txn.del(block_metadata_db, &block_hash, None)?; } - - drop(cursor); txn.commit()?; - info!("block metadata database initialized"); Ok(()) } -/// Purges stale entries from the deploy metadata database. -fn initialize_execution_results_db( +/// Purges stale entries from the execution result databases. +fn initialize_execution_result_dbs( env: &Environment, - db: &Database, - db_name: &str, - deleted_deploy_hashes: &HashSet, + execution_result_dbs: VersionedDatabases, + deleted_transaction_hashes: HashSet, ) -> Result<(), LmdbExtError> { - info!("initializing {}", db_name); - + info!("initializing execution result databases"); let mut txn = env.begin_rw_txn()?; - deleted_deploy_hashes - .iter() - .for_each(|deleted_deploy_hash| { - if txn.del(*db, deleted_deploy_hash, None).is_err() { - debug!(%deleted_deploy_hash, db_name, "not purging entry: doesn't exist"); - } - }); + for hash in deleted_transaction_hashes { + if execution_result_dbs.delete(&mut txn, &hash).is_err() { + debug!(%hash, "not purging entry: doesn't exist"); + } + } txn.commit()?; - - info!("deploy metadata database initialized"); + info!("execution result databases initialized"); Ok(()) } diff --git a/node/src/components/storage/config.rs b/node/src/components/storage/config.rs new file mode 100644 index 0000000000..91af7eaf24 --- /dev/null +++ b/node/src/components/storage/config.rs @@ -0,0 +1,73 @@ +use std::path::PathBuf; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; +#[cfg(test)] +use tempfile::TempDir; + +const GIB: usize = 1024 * 1024 * 1024; +const DEFAULT_MAX_BLOCK_STORE_SIZE: usize = 450 * GIB; +const DEFAULT_MAX_DEPLOY_STORE_SIZE: usize = 300 * GIB; +const DEFAULT_MAX_DEPLOY_METADATA_STORE_SIZE: usize = 300 * GIB; +const DEFAULT_MAX_STATE_STORE_SIZE: usize = 10 * GIB; + +/// On-disk storage configuration. +#[derive(Clone, DataSize, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct Config { + /// The path to the folder where any files created or read by the storage component will exist. + /// + /// If the folder doesn't exist, it and any required parents will be created. + pub path: PathBuf, + /// The maximum size of the database to use for the block store. + /// + /// The size should be a multiple of the OS page size. + pub max_block_store_size: usize, + /// The maximum size of the database to use for the deploy store. + /// + /// The size should be a multiple of the OS page size. + pub max_deploy_store_size: usize, + /// The maximum size of the database to use for the deploy metadata store. + /// + /// The size should be a multiple of the OS page size. + pub max_deploy_metadata_store_size: usize, + /// The maximum size of the database to use for the component state store. + /// + /// The size should be a multiple of the OS page size. + pub max_state_store_size: usize, + /// Whether or not memory deduplication is enabled. + pub enable_mem_deduplication: bool, + /// How many loads before memory duplication checks for dead references. + pub mem_pool_prune_interval: u16, +} + +impl Default for Config { + fn default() -> Self { + Config { + // No one should be instantiating a config with storage set to default. + path: "/dev/null".into(), + max_block_store_size: DEFAULT_MAX_BLOCK_STORE_SIZE, + max_deploy_store_size: DEFAULT_MAX_DEPLOY_STORE_SIZE, + max_deploy_metadata_store_size: DEFAULT_MAX_DEPLOY_METADATA_STORE_SIZE, + max_state_store_size: DEFAULT_MAX_STATE_STORE_SIZE, + enable_mem_deduplication: true, + mem_pool_prune_interval: 4096, + } + } +} + +impl Config { + /// Returns a default `Config` suitable for tests, along with a `TempDir` which must be kept + /// alive for the duration of the test since its destructor removes the dir from the filesystem. + #[cfg(test)] + pub(crate) fn default_for_tests() -> (Self, TempDir) { + let tempdir = tempfile::tempdir().expect("should get tempdir"); + let path = tempdir.path().join("lmdb"); + + let config = Config { + path, + ..Default::default() + }; + (config, tempdir) + } +} diff --git a/node/src/components/storage/deploy_metadata_v1.rs b/node/src/components/storage/deploy_metadata_v1.rs index 70aa1a2393..84eec56390 100644 --- a/node/src/components/storage/deploy_metadata_v1.rs +++ b/node/src/components/storage/deploy_metadata_v1.rs @@ -2,12 +2,31 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; -use casper_types::{execution::ExecutionResultV1, BlockHash}; +use casper_types::{ + execution::{ExecutionResult, ExecutionResultV1}, + BlockHash, +}; /// Version 1 metadata related to a single deploy prior to `casper-node` v2.0.0. #[derive(Clone, Default, Serialize, Deserialize, Debug, PartialEq, Eq)] pub(super) struct DeployMetadataV1 { - /// The block hashes of blocks containing the related deploy, along with the results of - /// executing the related deploy in the context of one or more blocks. + /// The hash of the single block containing the related deploy, along with the results of + /// executing it. + /// + /// Due to reasons, this was implemented as a map, despite the guarantee that there will only + /// ever be a single entry. pub(super) execution_results: HashMap, } + +impl From for ExecutionResult { + fn from(v1_results: DeployMetadataV1) -> Self { + let v1_result = v1_results + .execution_results + .into_iter() + .next() + // Safe to unwrap as it's guaranteed to contain exactly one entry. + .expect("must be exactly one result") + .1; + ExecutionResult::V1(v1_result) + } +} diff --git a/node/src/components/storage/error.rs b/node/src/components/storage/error.rs index 44829eab2f..496ae1ace2 100644 --- a/node/src/components/storage/error.rs +++ b/node/src/components/storage/error.rs @@ -9,6 +9,7 @@ use casper_types::{ }; use super::lmdb_ext::LmdbExtError; +use crate::types::VariantMismatch; /// A fatal storage component error. /// @@ -39,11 +40,11 @@ pub enum FatalStorageError { /// Second block hash encountered at `era_id`. second: BlockHash, }, - /// Found a duplicate switch-block-at-era-id index entry. - #[error("duplicate entries for blocks for deploy {deploy_hash}: {first} / {second}")] - DuplicateDeployIndex { - /// Deploy hash at which duplicate was found. - deploy_hash: DeployHash, + /// Found a duplicate transaction index entry. + #[error("duplicate entries for blocks for transaction {transaction_hash}: {first} / {second}")] + DuplicateTransactionIndex { + /// Transaction hash at which duplicate was found. + transaction_hash: TransactionHash, /// First block hash encountered at `deploy_hash`. first: BlockHashAndHeight, /// Second block hash encountered at `deploy_hash`. @@ -213,8 +214,3 @@ pub(super) enum GetRequestError { finality_signature: Box, }, } - -/// The variants in the given types are expected to all be the same. -#[derive(Debug, Error)] -#[error("mismatch in variants: {0:?}")] -pub struct VariantMismatch(pub(super) Box); diff --git a/node/src/components/storage/event.rs b/node/src/components/storage/event.rs new file mode 100644 index 0000000000..93048d25bc --- /dev/null +++ b/node/src/components/storage/event.rs @@ -0,0 +1,65 @@ +use std::{ + fmt::{self, Display, Formatter}, + mem, +}; + +use derive_more::From; +use serde::Serialize; +use static_assertions::const_assert; + +use crate::effect::{ + incoming::NetRequestIncoming, + requests::{MakeBlockExecutableRequest, MarkBlockCompletedRequest, StorageRequest}, +}; + +const _STORAGE_EVENT_SIZE: usize = mem::size_of::(); +const_assert!(_STORAGE_EVENT_SIZE <= 32); + +/// A storage component event. +#[derive(Debug, From, Serialize)] +#[repr(u8)] +pub(crate) enum Event { + /// Storage request. + #[from] + StorageRequest(Box), + /// Incoming net request. + NetRequestIncoming(Box), + /// Mark block completed request. + #[from] + MarkBlockCompletedRequest(MarkBlockCompletedRequest), + /// Make block executable request. + #[from] + MakeBlockExecutableRequest(Box), +} + +impl Display for Event { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Event::StorageRequest(req) => req.fmt(f), + Event::NetRequestIncoming(incoming) => incoming.fmt(f), + Event::MarkBlockCompletedRequest(req) => req.fmt(f), + Event::MakeBlockExecutableRequest(req) => req.fmt(f), + } + } +} + +impl From for Event { + #[inline] + fn from(incoming: NetRequestIncoming) -> Self { + Event::NetRequestIncoming(Box::new(incoming)) + } +} + +impl From for Event { + #[inline] + fn from(request: StorageRequest) -> Self { + Event::StorageRequest(Box::new(request)) + } +} + +impl From for Event { + #[inline] + fn from(request: MakeBlockExecutableRequest) -> Self { + Event::MakeBlockExecutableRequest(Box::new(request)) + } +} diff --git a/node/src/components/storage/indices.rs b/node/src/components/storage/indices.rs index a1eeb5242a..cf92f59ae8 100644 --- a/node/src/components/storage/indices.rs +++ b/node/src/components/storage/indices.rs @@ -1,6 +1,6 @@ use std::collections::{btree_map::Entry, BTreeMap}; -use casper_types::{BlockHash, BlockHashAndHeight, BlockHeader, DeployHash, EraId}; +use casper_types::{BlockHash, BlockHashAndHeight, BlockHeader, EraId, TransactionHash}; use super::{FatalStorageError, Storage}; @@ -48,31 +48,28 @@ impl Storage { /// Inserts the relevant entries to the index. /// /// If a duplicate entry is encountered, index is not updated and an error is returned. - /// Inserts the relevant entries to the index. - /// - /// If a duplicate entry is encountered, index is not updated and an error is returned. - pub(crate) fn insert_to_deploy_index<'a>( - deploy_hash_index: &mut BTreeMap, + pub(crate) fn insert_to_transaction_index( + transaction_hash_index: &mut BTreeMap, block_hash: BlockHash, block_height: u64, - deploy_hash_iter: impl Iterator + Clone, + transaction_hashes: Vec, ) -> Result<(), FatalStorageError> { - if let Some(hash) = deploy_hash_iter.clone().find(|hash| { - deploy_hash_index + if let Some(hash) = transaction_hashes.iter().find(|hash| { + transaction_hash_index .get(hash) .map_or(false, |old_block_hash_and_height| { *old_block_hash_and_height.block_hash() != block_hash }) }) { - return Err(FatalStorageError::DuplicateDeployIndex { - deploy_hash: *hash, - first: deploy_hash_index[hash], + return Err(FatalStorageError::DuplicateTransactionIndex { + transaction_hash: *hash, + first: transaction_hash_index[hash], second: BlockHashAndHeight::new(block_hash, block_height), }); } - for hash in deploy_hash_iter { - deploy_hash_index.insert(*hash, BlockHashAndHeight::new(block_hash, block_height)); + for hash in transaction_hashes { + transaction_hash_index.insert(hash, BlockHashAndHeight::new(block_hash, block_height)); } Ok(()) diff --git a/node/src/components/storage/lmdb_ext.rs b/node/src/components/storage/lmdb_ext.rs index a911c82506..301ed0c02b 100644 --- a/node/src/components/storage/lmdb_ext.rs +++ b/node/src/components/storage/lmdb_ext.rs @@ -100,10 +100,8 @@ pub(super) trait TransactionExt { ) -> Result, LmdbExtError>; /// Returns `true` if the given key has an entry in the given database. - fn value_exists<'a, K, D>(&'a mut self, lookup_dbs: D, key: &K) -> Result - where - K: AsRef<[u8]>, - D: IntoIterator; + fn value_exists>(&mut self, db: Database, key: &K) + -> Result; /// Helper function to load a value from a database using the `bytesrepr` `ToBytes`/`FromBytes` /// serialization. @@ -169,19 +167,16 @@ where } #[inline] - fn value_exists<'a, K, D>(&'a mut self, lookup_dbs: D, key: &K) -> Result - where - K: AsRef<[u8]>, - D: IntoIterator, - { - for db in lookup_dbs.into_iter() { - match self.get(*db, key) { - Ok(_raw) => return Ok(true), - Err(lmdb::Error::NotFound) => continue, - Err(err) => return Err(err.into()), - } + fn value_exists>( + &mut self, + db: Database, + key: &K, + ) -> Result { + match self.get(db, key) { + Ok(_raw) => Ok(true), + Err(lmdb::Error::NotFound) => Ok(false), + Err(err) => Err(err.into()), } - Ok(false) } #[inline] diff --git a/node/src/components/storage/tests.rs b/node/src/components/storage/tests.rs index f7f3005560..78d105fd10 100644 --- a/node/src/components/storage/tests.rs +++ b/node/src/components/storage/tests.rs @@ -22,7 +22,7 @@ use casper_types::{ AccessRights, Block, BlockHash, BlockHashAndHeight, BlockHeader, BlockSignatures, BlockV2, Chainspec, ChainspecRawBytes, Deploy, DeployApprovalsHash, DeployHash, Digest, EraId, FinalitySignature, ProtocolVersion, PublicKey, SecretKey, SignedBlockHeader, TestBlockBuilder, - TestBlockV1Builder, TimeDiff, Transaction, Transfer, URef, U512, + TestBlockV1Builder, TimeDiff, Transaction, TransactionHash, Transfer, URef, U512, }; use tempfile::tempdir; @@ -43,8 +43,8 @@ use crate::{ testing::{ComponentHarness, UnitTestEvent}, types::{ sync_leap_validation_metadata::SyncLeapValidationMetaData, ApprovalsHashes, - AvailableBlockRange, DeployExecutionInfo, DeployWithFinalizedApprovals, LegacyDeploy, - SignedBlock, SyncLeapIdentifier, + AvailableBlockRange, ExecutionInfo, LegacyDeploy, SignedBlock, SyncLeapIdentifier, + TransactionWithFinalizedApprovals, }, utils::{Loadable, WithDir}, }; @@ -138,7 +138,7 @@ fn create_sync_leap_test_chain( blocks.push(block); }); blocks.iter().for_each(|block| { - storage.write_block(block).unwrap(); + storage.put_block(block).unwrap(); let fs = FinalitySignature::create(*block.hash(), block.era_id(), &validator_secret_key); assert!(fs.is_verified().is_ok()); @@ -340,17 +340,17 @@ fn get_block_signatures(storage: &mut Storage, block_hash: BlockHash) -> Option< storage.get_block_signatures(&mut txn, &block_hash).unwrap() } -/// Loads a set of deploys from a storage component. +/// Loads a set of `Transaction`s from a storage component. /// -/// Applies `into_naive` to all loaded deploys. -fn get_naive_deploys( +/// Applies `into_naive` to all loaded `Transaction`s. +fn get_naive_transactions( harness: &mut ComponentHarness, storage: &mut Storage, - deploy_hashes: Multiple, -) -> Vec> { + transaction_hashes: Multiple, +) -> Vec> { let response = harness.send_request(storage, move |responder| { - StorageRequest::GetDeploys { - deploy_hashes: deploy_hashes.to_vec(), + StorageRequest::GetTransactions { + transaction_hashes: transaction_hashes.to_vec(), responder, } .into() @@ -358,28 +358,28 @@ fn get_naive_deploys( assert!(harness.is_idle()); response .into_iter() - .map(|opt_dfa| opt_dfa.map(DeployWithFinalizedApprovals::into_naive)) + .map(|opt_twfa| opt_twfa.map(TransactionWithFinalizedApprovals::into_naive)) .collect() } /// Loads a deploy with associated execution info from the storage component. /// /// Any potential finalized approvals are discarded. -fn get_naive_deploy_and_execution_info( +fn get_naive_transaction_and_execution_info( harness: &mut ComponentHarness, storage: &mut Storage, - deploy_hash: DeployHash, -) -> Option<(Deploy, Option)> { + transaction_hash: TransactionHash, +) -> Option<(Transaction, Option)> { let response = harness.send_request(storage, |responder| { - StorageRequest::GetDeployAndExecutionInfo { - deploy_hash, + StorageRequest::GetTransactionAndExecutionInfo { + transaction_hash, responder, } .into() }); assert!(harness.is_idle()); - response.map(|(deploy_with_finalized_approvals, exec_info)| { - (deploy_with_finalized_approvals.into_naive(), exec_info) + response.map(|(txn_with_finalized_approvals, exec_info)| { + (txn_with_finalized_approvals.into_naive(), exec_info) }) } @@ -480,13 +480,13 @@ fn put_block_signatures( response } -/// Stores a deploy in a storage component. -fn put_deploy( +/// Stores a `Transaction` in a storage component. +fn put_transaction( harness: &mut ComponentHarness, storage: &mut Storage, - deploy: &Deploy, + transaction: &Transaction, ) -> bool { - let transaction = Arc::new(Transaction::from(deploy.clone())); + let transaction = Arc::new(transaction.clone()); let response = harness.send_request(storage, move |responder| { StorageRequest::PutTransaction { transaction, @@ -498,14 +498,14 @@ fn put_deploy( response } -fn insert_to_deploy_index( +fn insert_to_transaction_index( storage: &mut Storage, - deploy: Deploy, + transaction: Transaction, block_hash_and_height: BlockHashAndHeight, ) -> bool { storage - .deploy_hash_index - .insert(*deploy.hash(), block_hash_and_height) + .transaction_hash_index + .insert(transaction.hash(), block_hash_and_height) .is_none() } @@ -908,75 +908,76 @@ fn different_block_at_height_is_fatal() { } #[test] -fn get_vec_of_non_existing_deploy_returns_nones() { +fn get_vec_of_non_existing_transaction_returns_nones() { let mut harness = ComponentHarness::default(); let mut storage = storage_fixture(&harness); - let deploy_id = DeployHash::random(&mut harness.rng); - let response = get_naive_deploys(&mut harness, &mut storage, smallvec![deploy_id]); + let transaction_id = Transaction::random(&mut harness.rng).hash(); + let response = get_naive_transactions(&mut harness, &mut storage, smallvec![transaction_id]); assert_eq!(response, vec![None]); - // Also verify that we can retrieve using an empty set of deploy hashes. - let response = get_naive_deploys(&mut harness, &mut storage, smallvec![]); + // Also verify that we can retrieve using an empty set of transaction hashes. + let response = get_naive_transactions(&mut harness, &mut storage, smallvec![]); assert!(response.is_empty()); } #[test] -fn can_retrieve_store_and_load_deploys() { +fn can_retrieve_store_and_load_transactions() { let mut harness = ComponentHarness::default(); let mut storage = storage_fixture(&harness); // Create a random deploy, store and load it. - let deploy = Deploy::random(&mut harness.rng); + let transaction = Transaction::random(&mut harness.rng); - let was_new = put_deploy(&mut harness, &mut storage, &deploy); + let was_new = put_transaction(&mut harness, &mut storage, &transaction); let block_hash_and_height = BlockHashAndHeight::random(&mut harness.rng); // Insert to the deploy hash index as well so that we can perform the GET later. // Also check that we don't have an entry there for this deploy. - assert!(insert_to_deploy_index( + assert!(insert_to_transaction_index( &mut storage, - deploy.clone(), + transaction.clone(), block_hash_and_height )); - assert!(was_new, "putting deploy should have returned `true`"); + assert!(was_new, "putting transaction should have returned `true`"); // Storing the same deploy again should work, but yield a result of `false`. - let was_new_second_time = put_deploy(&mut harness, &mut storage, &deploy); + let was_new_second_time = put_transaction(&mut harness, &mut storage, &transaction); assert!( !was_new_second_time, - "storing deploy the second time should have returned `false`" + "storing transaction the second time should have returned `false`" ); - assert!(!insert_to_deploy_index( + assert!(!insert_to_transaction_index( &mut storage, - deploy.clone(), + transaction.clone(), block_hash_and_height )); - // Retrieve the stored deploy. - let response = get_naive_deploys(&mut harness, &mut storage, smallvec![*deploy.hash()]); - assert_eq!(response, vec![Some(deploy.clone())]); + // Retrieve the stored transaction. + let response = + get_naive_transactions(&mut harness, &mut storage, smallvec![transaction.hash()]); + assert_eq!(response, vec![Some(transaction.clone())]); // Finally try to get the execution info as well. Since we did not store any, we expect to get // the block hash and height from the indices. - let (deploy_response, exec_info_response) = harness + let (transaction_response, exec_info_response) = harness .send_request(&mut storage, |responder| { - StorageRequest::GetDeployAndExecutionInfo { - deploy_hash: *deploy.hash(), + StorageRequest::GetTransactionAndExecutionInfo { + transaction_hash: transaction.hash(), responder, } .into() }) - .expect("no deploy with execution info returned"); + .expect("no transaction with execution info returned"); - assert_eq!(deploy_response.into_naive(), deploy); + assert_eq!(transaction_response.into_naive(), transaction); match exec_info_response { - Some(DeployExecutionInfo { + Some(ExecutionInfo { execution_result: Some(_), .. }) => { panic!("We didn't store any execution info but we received it in the response.") } - Some(DeployExecutionInfo { + Some(ExecutionInfo { block_hash, block_height, execution_result: None, @@ -991,23 +992,23 @@ fn can_retrieve_store_and_load_deploys() { ), } - // Create a random deploy, store and load it. - let deploy = Deploy::random(&mut harness.rng); + // Create a random transaction, store and load it. + let transaction = Transaction::random(&mut harness.rng); - assert!(put_deploy(&mut harness, &mut storage, &deploy)); - // Don't insert to the deploy hash index. Since we have no execution results + assert!(put_transaction(&mut harness, &mut storage, &transaction)); + // Don't insert to the transaction hash index. Since we have no execution results // either, we should receive a `None` execution info response. - let (deploy_response, exec_info_response) = harness + let (transaction_response, exec_info_response) = harness .send_request(&mut storage, |responder| { - StorageRequest::GetDeployAndExecutionInfo { - deploy_hash: *deploy.hash(), + StorageRequest::GetTransactionAndExecutionInfo { + transaction_hash: transaction.hash(), responder, } .into() }) - .expect("no deploy with execution info returned"); + .expect("no transaction with execution info returned"); - assert_eq!(deploy_response.into_naive(), deploy); + assert_eq!(transaction_response.into_naive(), transaction); assert!( exec_info_response.is_none(), "We didn't store any block info in the index but we received it in the response." @@ -1015,27 +1016,28 @@ fn can_retrieve_store_and_load_deploys() { } #[test] -fn storing_and_loading_a_lot_of_deploys_does_not_exhaust_handles() { +fn storing_and_loading_a_lot_of_transactions_does_not_exhaust_handles() { let mut harness = ComponentHarness::default(); let mut storage = storage_fixture(&harness); let total = 1000; let batch_size = 25; - let mut deploy_hashes = Vec::new(); + let mut transaction_hashes = Vec::new(); for _ in 0..total { - let deploy = Deploy::random(&mut harness.rng); - deploy_hashes.push(*deploy.hash()); - put_deploy(&mut harness, &mut storage, &deploy); + let transaction = Transaction::random(&mut harness.rng); + transaction_hashes.push(transaction.hash()); + put_transaction(&mut harness, &mut storage, &transaction); } - // Shuffle deploy hashes around to get a random order. - deploy_hashes.as_mut_slice().shuffle(&mut harness.rng); + // Shuffle transaction hashes around to get a random order. + transaction_hashes.as_mut_slice().shuffle(&mut harness.rng); // Retrieve all from storage, ensuring they are found. - for chunk in deploy_hashes.chunks(batch_size) { - let result = get_naive_deploys(&mut harness, &mut storage, chunk.iter().cloned().collect()); + for chunk in transaction_hashes.chunks(batch_size) { + let result = + get_naive_transactions(&mut harness, &mut storage, chunk.iter().cloned().collect()); assert!(result.iter().all(Option::is_some)); } } @@ -1055,7 +1057,7 @@ fn store_random_execution_results() { fn setup_block( harness: &mut ComponentHarness, storage: &mut Storage, - expected_outcome: &mut HashMap, + expected_outcome: &mut HashMap, block_hash: &BlockHash, block_height: u64, ) { @@ -1069,11 +1071,11 @@ fn store_random_execution_results() { let deploy = Deploy::random(&mut harness.rng); // Store deploy. - put_deploy(harness, storage, &deploy); + put_transaction(harness, storage, &Transaction::from(deploy.clone())); let execution_result = ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng)); - let execution_info = DeployExecutionInfo { + let execution_info = ExecutionInfo { block_hash: *block_hash, block_height, execution_result: Some(execution_result.clone()), @@ -1109,11 +1111,12 @@ fn store_random_execution_results() { // At this point, we are all set up and ready to receive results. Iterate over every deploy and // see if its execution-data-per-block matches our expectations. for (deploy_hash, expected_exec_info) in expected_outcome.into_iter() { - let (deploy, maybe_exec_info) = - get_naive_deploy_and_execution_info(&mut harness, &mut storage, deploy_hash) - .expect("missing deploy"); + let transaction_hash = TransactionHash::from(deploy_hash); + let (transaction, maybe_exec_info) = + get_naive_transaction_and_execution_info(&mut harness, &mut storage, transaction_hash) + .expect("missing transaction"); - assert_eq!(deploy_hash, *deploy.hash()); + assert_eq!(transaction_hash, transaction.hash()); assert_eq!(maybe_exec_info, Some(expected_exec_info)); } } @@ -1128,7 +1131,11 @@ fn store_execution_results_twice_for_same_block_deploy_pair() { let deploy = Deploy::random(&mut harness.rng); let deploy_hash = *deploy.hash(); - put_deploy(&mut harness, &mut storage, &deploy); + put_transaction( + &mut harness, + &mut storage, + &Transaction::from(deploy.clone()), + ); let mut exec_result_1 = HashMap::new(); exec_result_1.insert( @@ -1158,16 +1165,19 @@ fn store_execution_results_twice_for_same_block_deploy_pair() { exec_result_2, ); - let (returned_deploy, returned_exec_info) = - get_naive_deploy_and_execution_info(&mut harness, &mut storage, deploy_hash) - .expect("missing deploy"); - let expected_exec_info = Some(DeployExecutionInfo { + let (returned_transaction, returned_exec_info) = get_naive_transaction_and_execution_info( + &mut harness, + &mut storage, + TransactionHash::from(deploy_hash), + ) + .expect("missing deploy"); + let expected_exec_info = Some(ExecutionInfo { block_hash, block_height, execution_result: Some(new_exec_result), }); - assert_eq!(returned_deploy, deploy); + assert_eq!(returned_transaction, Transaction::from(deploy)); assert_eq!(returned_exec_info, expected_exec_info); } @@ -1213,7 +1223,11 @@ fn persist_blocks_deploys_and_execution_info_across_instantiations() { let block_height = block.height(); let execution_result = ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng)); - put_deploy(&mut harness, &mut storage, &deploy); + put_transaction( + &mut harness, + &mut storage, + &Transaction::from(deploy.clone()), + ); put_complete_block(&mut harness, &mut storage, block.clone()); let mut execution_results = HashMap::new(); execution_results.insert(*deploy.hash(), execution_result.clone()); @@ -1241,12 +1255,22 @@ fn persist_blocks_deploys_and_execution_info_across_instantiations() { let actual_block = get_block(&mut harness, &mut storage, *block.hash()) .expect("missing block we stored earlier"); assert_eq!(actual_block, block); - let actual_deploys = get_naive_deploys(&mut harness, &mut storage, smallvec![*deploy.hash()]); - assert_eq!(actual_deploys, vec![Some(deploy.clone())]); + let actual_deploys = get_naive_transactions( + &mut harness, + &mut storage, + smallvec![TransactionHash::from(*deploy.hash())], + ); + assert_eq!( + actual_deploys, + vec![Some(Transaction::from(deploy.clone()))] + ); - let (_, maybe_exec_info) = - get_naive_deploy_and_execution_info(&mut harness, &mut storage, *deploy.hash()) - .expect("missing deploy we stored earlier"); + let (_, maybe_exec_info) = get_naive_transaction_and_execution_info( + &mut harness, + &mut storage, + TransactionHash::from(*deploy.hash()), + ) + .expect("missing deploy we stored earlier"); let retrieved_execution_result = maybe_exec_info .expect("should have execution info") @@ -1315,7 +1339,11 @@ fn should_hard_reset() { { let deploy = random_deploys.get(index).expect("should have deploys"); let execution_result = ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng)); - put_deploy(&mut harness, &mut storage, deploy); + put_transaction( + &mut harness, + &mut storage, + &Transaction::from(deploy.clone()), + ); let mut exec_results = HashMap::new(); exec_results.insert(*deploy.hash(), execution_result); put_execution_results( @@ -1368,12 +1396,15 @@ fn should_hard_reset() { // Check execution results in deleted blocks have been removed. for (index, deploy) in deploys.iter().enumerate() { - let (_, maybe_exec_info) = - get_naive_deploy_and_execution_info(&mut harness, &mut storage, *deploy.hash()) - .unwrap(); + let (_, maybe_exec_info) = get_naive_transaction_and_execution_info( + &mut harness, + &mut storage, + TransactionHash::from(*deploy.hash()), + ) + .unwrap(); let should_have_exec_results = index < blocks_per_era * reset_era; match maybe_exec_info { - Some(DeployExecutionInfo { + Some(ExecutionInfo { execution_result, .. }) => { assert_eq!(should_have_exec_results, execution_result.is_some()); @@ -1759,7 +1790,7 @@ fn should_restrict_returned_blocks() { .switch_block(false) .build_versioned(&mut harness.rng); - storage.write_block(&block).unwrap(); + storage.put_block(&block).unwrap(); storage.completed_blocks.insert(height); }); diff --git a/node/src/components/storage/versioned_databases.rs b/node/src/components/storage/versioned_databases.rs new file mode 100644 index 0000000000..5b0d4855e3 --- /dev/null +++ b/node/src/components/storage/versioned_databases.rs @@ -0,0 +1,557 @@ +#[cfg(test)] +use std::{cmp::Ord, collections::BTreeSet}; + +use datasize::DataSize; +use lmdb::{ + Cursor, Database, DatabaseFlags, Environment, RwCursor, RwTransaction, + Transaction as LmdbTransaction, +}; +use serde::de::DeserializeOwned; +#[cfg(test)] +use serde::Serialize; +use std::marker::PhantomData; + +#[cfg(test)] +use casper_types::bytesrepr; +use casper_types::{ + bytesrepr::{FromBytes, ToBytes}, + execution::ExecutionResult, + BlockBody, BlockBodyV1, BlockHash, BlockHeader, BlockHeaderV1, Deploy, DeployHash, Digest, + Transaction, TransactionHash, +}; + +use super::{ + lmdb_ext::{self, LmdbExtError, TransactionExt, WriteTransactionExt}, + DeployMetadataV1, FatalStorageError, +}; +use crate::types::{FinalizedApprovals, FinalizedDeployApprovals}; + +pub(super) trait VersionedKey: ToBytes { + type Legacy: AsRef<[u8]>; + + fn legacy_key(&self) -> Option<&Self::Legacy>; +} + +pub(super) trait VersionedValue: ToBytes + FromBytes { + type Legacy: 'static + DeserializeOwned + Into; +} + +impl VersionedKey for TransactionHash { + type Legacy = DeployHash; + + fn legacy_key(&self) -> Option<&Self::Legacy> { + match self { + TransactionHash::Deploy(deploy_hash) => Some(deploy_hash), + TransactionHash::V1(_) => None, + } + } +} + +impl VersionedKey for BlockHash { + type Legacy = BlockHash; + + fn legacy_key(&self) -> Option<&Self::Legacy> { + Some(self) + } +} + +impl VersionedKey for Digest { + type Legacy = Digest; + + fn legacy_key(&self) -> Option<&Self::Legacy> { + Some(self) + } +} + +impl VersionedValue for Transaction { + type Legacy = Deploy; +} + +impl VersionedValue for BlockHeader { + type Legacy = BlockHeaderV1; +} + +impl VersionedValue for BlockBody { + type Legacy = BlockBodyV1; +} + +impl VersionedValue for ExecutionResult { + type Legacy = DeployMetadataV1; +} + +impl VersionedValue for FinalizedApprovals { + type Legacy = FinalizedDeployApprovals; +} + +/// A pair of databases, one holding the original legacy form of the data, and the other holding the +/// new versioned, future-proof form of the data. +/// +/// Specific entries should generally not be repeated - they will either be held in the legacy or +/// the current DB, but not both. Data is not migrated from legacy to current, but newly-stored +/// data will always be written to the current DB, even if it is of the type `V::Legacy`. +/// +/// Exceptions to this can occur if a pre-existing legacy entry is re-stored, in which case there +/// will be a duplicated entry in the `legacy` and `current` DBs. This should not be a common +/// occurrence though. +#[derive(Eq, PartialEq, DataSize, Debug)] +pub(super) struct VersionedDatabases { + /// Legacy form of the data, with the key as `K::Legacy` type (converted to bytes using + /// `AsRef<[u8]>`) and the value bincode-encoded. + #[data_size(skip)] + legacy: Database, + /// Current form of the data, with the key as `K` bytesrepr-encoded and the value as `V` also + /// bytesrepr-encoded. + #[data_size(skip)] + current: Database, + _phantom: PhantomData<(K, V)>, +} + +impl Clone for VersionedDatabases { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for VersionedDatabases {} + +impl VersionedDatabases +where + K: VersionedKey, + V: VersionedValue, +{ + pub(super) fn new( + env: &Environment, + legacy_name: &str, + current_name: &str, + ) -> Result { + Ok(VersionedDatabases { + legacy: env.create_db(Some(legacy_name), DatabaseFlags::empty())?, + current: env.create_db(Some(current_name), DatabaseFlags::empty())?, + _phantom: PhantomData, + }) + } + + pub(super) fn put( + &self, + txn: &mut RwTransaction, + key: &K, + value: &V, + overwrite: bool, + ) -> Result { + txn.put_value_bytesrepr(self.current, key, value, overwrite) + } + + pub(super) fn get( + &self, + txn: &mut Tx, + key: &K, + ) -> Result, LmdbExtError> { + if let Some(value) = txn.get_value_bytesrepr(self.current, key)? { + return Ok(Some(value)); + } + + let legacy_key = match key.legacy_key() { + Some(key) => key, + None => return Ok(None), + }; + + Ok(txn + .get_value::<_, V::Legacy>(self.legacy, legacy_key)? + .map(Into::into)) + } + + pub(super) fn exists( + &self, + txn: &mut Tx, + key: &K, + ) -> Result { + if txn.value_exists_bytesrepr(self.current, key)? { + return Ok(true); + } + + let legacy_key = match key.legacy_key() { + Some(key) => key, + None => return Ok(false), + }; + + txn.value_exists(self.legacy, legacy_key) + } + + pub(super) fn delete(&self, txn: &mut RwTransaction, key: &K) -> Result<(), LmdbExtError> { + let serialized_key = lmdb_ext::serialize_bytesrepr(key)?; + let current_result = txn.del(self.current, &serialized_key, None); + // Avoid returning early for the case where `del_result` is Ok, since some + // `VersionedDatabases` could possibly have the same entry in both DBs. + + let legacy_key = match key.legacy_key() { + Some(key) => key, + None => return current_result.map_err(Into::into), + }; + + let legacy_result = txn.del(self.legacy, legacy_key, None).map_err(Into::into); + + if current_result.is_ok() || legacy_result.is_ok() { + return Ok(()); + } + + legacy_result + } + + /// Iterates every row in the current database, deserializing the value and calling `f` with the + /// cursor and the parsed value. + pub(super) fn for_each_value_in_current<'a, F>( + &self, + txn: &'a mut RwTransaction, + f: &mut F, + ) -> Result<(), FatalStorageError> + where + F: FnMut(&mut RwCursor<'a>, V) -> Result<(), FatalStorageError>, + { + let mut cursor = txn.open_rw_cursor(self.current)?; + for row in cursor.iter() { + let (_, raw_val) = row?; + let value: V = lmdb_ext::deserialize_bytesrepr(raw_val)?; + f(&mut cursor, value)?; + } + Ok(()) + } + + /// Iterates every row in the legacy database, deserializing the value and calling `f` with the + /// cursor and the parsed value. + pub(super) fn for_each_value_in_legacy<'a, F>( + &self, + txn: &'a mut RwTransaction, + f: &mut F, + ) -> Result<(), FatalStorageError> + where + F: FnMut(&mut RwCursor<'a>, V) -> Result<(), FatalStorageError>, + { + let mut cursor = txn.open_rw_cursor(self.legacy)?; + for row in cursor.iter() { + let (_, raw_val) = row?; + let value: V::Legacy = lmdb_ext::deserialize(raw_val)?; + f(&mut cursor, value.into())?; + } + Ok(()) + } + + /// Writes to the `legacy` database. + #[cfg(test)] + pub(super) fn put_legacy( + &self, + txn: &mut RwTransaction, + legacy_key: &K::Legacy, + legacy_value: &V::Legacy, + overwrite: bool, + ) -> bool + where + V::Legacy: Serialize, + { + txn.put_value(self.legacy, legacy_key, legacy_value, overwrite) + .expect("should put legacy value") + } + + /// Returns the keys from the `current` database only. + #[cfg(test)] + pub(super) fn keys(&self, txn: &Tx) -> BTreeSet + where + K: Ord + FromBytes, + { + let mut cursor = txn + .open_ro_cursor(self.current) + .expect("should create cursor"); + + cursor + .iter() + .map(Result::unwrap) + .map(|(raw_key, _)| { + bytesrepr::deserialize(raw_key.to_vec()).expect("malformed key in DB") + }) + .collect() + } +} + +#[cfg(test)] +mod tests { + use lmdb::WriteFlags; + use std::collections::HashMap; + + use tempfile::TempDir; + + use casper_types::testing::TestRng; + + use super::*; + + struct Fixture { + rng: TestRng, + env: Environment, + dbs: VersionedDatabases, + random_transactions: HashMap, + legacy_transactions: HashMap, + _data_dir: TempDir, + } + + impl Fixture { + fn new() -> Fixture { + let rng = TestRng::new(); + let data_dir = TempDir::new().expect("should create temp dir"); + let env = super::super::new_environment(1024 * 1024, data_dir.path()).unwrap(); + let dbs = VersionedDatabases::new(&env, "legacy", "current").unwrap(); + let mut fixture = Fixture { + rng, + env, + dbs, + random_transactions: HashMap::new(), + legacy_transactions: HashMap::new(), + _data_dir: data_dir, + }; + for _ in 0..3 { + let transaction = Transaction::random(&mut fixture.rng); + assert!(fixture + .random_transactions + .insert(transaction.hash(), transaction) + .is_none()); + let deploy = Deploy::random(&mut fixture.rng); + assert!(fixture + .legacy_transactions + .insert(*deploy.hash(), deploy) + .is_none()); + } + fixture + } + } + + #[test] + fn should_put() { + let fixture = Fixture::new(); + let (transaction_hash, transaction) = fixture.random_transactions.iter().next().unwrap(); + + // Should return `true` on first `put`. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + assert!(fixture + .dbs + .put(&mut txn, transaction_hash, transaction, true) + .unwrap()); + + // Should return `false` on duplicate `put` if not set to overwrite. + assert!(!fixture + .dbs + .put(&mut txn, transaction_hash, transaction, false) + .unwrap()); + + // Should return `true` on duplicate `put` if set to overwrite. + assert!(fixture + .dbs + .put(&mut txn, transaction_hash, transaction, true) + .unwrap()); + } + + #[test] + fn should_get() { + let mut fixture = Fixture::new(); + let (transaction_hash, transaction) = fixture.random_transactions.iter().next().unwrap(); + let (deploy_hash, deploy) = fixture.legacy_transactions.iter().next().unwrap(); + + // Inject the deploy into the legacy DB and store the random transaction in the current DB. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + assert!(fixture.dbs.put_legacy(&mut txn, deploy_hash, deploy, true)); + assert!(fixture + .dbs + .put(&mut txn, transaction_hash, transaction, true) + .unwrap()); + txn.commit().unwrap(); + + // Should get the deploy. + let mut txn = fixture.env.begin_ro_txn().unwrap(); + assert_eq!( + fixture + .dbs + .get(&mut txn, &TransactionHash::from(*deploy_hash)) + .unwrap(), + Some(Transaction::from(deploy.clone())) + ); + + // Should get the random transaction. + assert_eq!( + fixture.dbs.get(&mut txn, transaction_hash).unwrap(), + Some(transaction.clone()) + ); + + // Should return `Ok(None)` for non-existent data. + let random_hash = Transaction::random(&mut fixture.rng).hash(); + assert!(fixture.dbs.get(&mut txn, &random_hash).unwrap().is_none()); + } + + #[test] + fn should_exist() { + let mut fixture = Fixture::new(); + let (transaction_hash, transaction) = fixture.random_transactions.iter().next().unwrap(); + let (deploy_hash, deploy) = fixture.legacy_transactions.iter().next().unwrap(); + + // Inject the deploy into the legacy DB and store the random transaction in the current DB. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + assert!(fixture.dbs.put_legacy(&mut txn, deploy_hash, deploy, true)); + assert!(fixture + .dbs + .put(&mut txn, transaction_hash, transaction, true) + .unwrap()); + txn.commit().unwrap(); + + // The deploy should exist. + let mut txn = fixture.env.begin_ro_txn().unwrap(); + assert!(fixture + .dbs + .exists(&mut txn, &TransactionHash::from(*deploy_hash)) + .unwrap()); + + // The random transaction should exist. + assert!(fixture.dbs.exists(&mut txn, transaction_hash).unwrap()); + + // Random data should not exist. + let random_hash = Transaction::random(&mut fixture.rng).hash(); + assert!(!fixture.dbs.exists(&mut txn, &random_hash).unwrap()); + } + + #[test] + fn should_delete() { + let mut fixture = Fixture::new(); + let (transaction_hash, transaction) = fixture.random_transactions.iter().next().unwrap(); + let (deploy_hash, deploy) = fixture.legacy_transactions.iter().next().unwrap(); + + // Inject the deploy into the legacy DB and store the random transaction in the current DB. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + assert!(fixture.dbs.put_legacy(&mut txn, deploy_hash, deploy, true)); + assert!(fixture + .dbs + .put(&mut txn, transaction_hash, transaction, true) + .unwrap()); + // Also store the legacy deploy in the `current` DB. While being an edge case, we still + // need to ensure that deleting removes both copies of the deploy. + assert!(fixture + .dbs + .put( + &mut txn, + &TransactionHash::from(*deploy_hash), + &Transaction::from(deploy.clone()), + true + ) + .unwrap()); + txn.commit().unwrap(); + + // Should delete the deploy. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + fixture + .dbs + .delete(&mut txn, &TransactionHash::from(*deploy_hash)) + .unwrap(); + assert!(!fixture + .dbs + .exists(&mut txn, &TransactionHash::from(*deploy_hash)) + .unwrap()); + + // Should delete the random transaction. + fixture.dbs.delete(&mut txn, transaction_hash).unwrap(); + assert!(!fixture.dbs.exists(&mut txn, transaction_hash).unwrap()); + + // Should fail to delete non-existent data. + let random_hash = Transaction::random(&mut fixture.rng).hash(); + assert!(fixture.dbs.delete(&mut txn, &random_hash).is_err()); + } + + #[test] + fn should_iterate_current() { + let fixture = Fixture::new(); + + // Store all random transactions. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + for (transaction_hash, transaction) in fixture.random_transactions.iter() { + assert!(fixture + .dbs + .put(&mut txn, transaction_hash, transaction, true) + .unwrap()); + } + txn.commit().unwrap(); + + // Iterate `current`, deleting each cursor entry and gathering the visited values in a map. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + let mut visited = HashMap::new(); + let mut visitor = |cursor: &mut RwCursor, transaction: Transaction| { + cursor.del(WriteFlags::empty()).unwrap(); + let _ = visited.insert(transaction.hash(), transaction); + Ok(()) + }; + fixture + .dbs + .for_each_value_in_current(&mut txn, &mut visitor) + .unwrap(); + txn.commit().unwrap(); + + // Ensure all values were visited and the DB doesn't contain them any more. + assert_eq!(visited, fixture.random_transactions); + let mut txn = fixture.env.begin_ro_txn().unwrap(); + for transaction_hash in fixture.random_transactions.keys() { + assert!(!fixture.dbs.exists(&mut txn, transaction_hash).unwrap()); + } + + // Ensure a second run is a no-op. + let mut visitor = |_cursor: &mut RwCursor, _transaction: Transaction| { + panic!("should never get called"); + }; + let mut txn = fixture.env.begin_rw_txn().unwrap(); + fixture + .dbs + .for_each_value_in_current(&mut txn, &mut visitor) + .unwrap(); + } + + #[test] + fn should_iterate_legacy() { + let fixture = Fixture::new(); + + // Store all legacy transactions. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + for (deploy_hash, deploy) in fixture.legacy_transactions.iter() { + assert!(fixture.dbs.put_legacy(&mut txn, deploy_hash, deploy, true)); + } + txn.commit().unwrap(); + + // Iterate `legacy`, deleting each cursor entry and gathering the visited values in a map. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + let mut visited = HashMap::new(); + let mut visitor = |cursor: &mut RwCursor, transaction: Transaction| { + cursor.del(WriteFlags::empty()).unwrap(); + match transaction { + Transaction::Deploy(deploy) => { + let _ = visited.insert(*deploy.hash(), deploy); + } + Transaction::V1(_) => unreachable!(), + } + Ok(()) + }; + fixture + .dbs + .for_each_value_in_legacy(&mut txn, &mut visitor) + .unwrap(); + txn.commit().unwrap(); + + // Ensure all values were visited and the DB doesn't contain them any more. + assert_eq!(visited, fixture.legacy_transactions); + let mut txn = fixture.env.begin_ro_txn().unwrap(); + for deploy_hash in fixture.legacy_transactions.keys() { + assert!(!fixture + .dbs + .exists(&mut txn, &TransactionHash::from(*deploy_hash)) + .unwrap()); + } + + // Ensure a second run is a no-op. + let mut visitor = |_cursor: &mut RwCursor, _transaction: Transaction| { + panic!("should never get called"); + }; + let mut txn = fixture.env.begin_rw_txn().unwrap(); + fixture + .dbs + .for_each_value_in_legacy(&mut txn, &mut visitor) + .unwrap(); + } +} diff --git a/node/src/components/storage/write_block.rs b/node/src/components/storage/write_block.rs deleted file mode 100644 index ce731b4ef2..0000000000 --- a/node/src/components/storage/write_block.rs +++ /dev/null @@ -1,231 +0,0 @@ -use std::{collections::HashMap, rc::Rc}; - -use lmdb::{Database, RwTransaction, Transaction}; - -use casper_types::{ - execution::ExecutionResult, Block, BlockBody, BlockBodyV1, BlockHash, BlockHeader, - BlockHeaderV1, DeployHash, Digest, -}; -use tracing::error; - -use crate::types::ApprovalsHashes; - -use super::{ - lmdb_ext::{LmdbExtError, WriteTransactionExt}, - FatalStorageError, Storage, -}; - -impl Storage { - /// Verifies a block and writes it to a block to storage, updating indices as - /// necessary. This function should only be used by components that deal with historical blocks, - /// for example: `Fetcher`. - /// - /// Returns `Ok(true)` if the block has been successfully written, `Ok(false)` if a part of it - /// couldn't be written because it already existed, and `Err(_)` if there was an error. - #[cfg_attr(doc, aquamarine::aquamarine)] - /// ```mermaid - /// flowchart TD - /// style Start fill:#66ccff,stroke:#333,stroke-width:4px - /// style End fill:#66ccff,stroke:#333,stroke-width:4px - /// style write_block fill:#00FF00,stroke:#333,stroke-width:4px - /// - /// Start --> A[Block fetched] - /// A --> put_block_to_storage - /// put_block_to_storage --> StorageRequest::PutBlock - /// StorageRequest::PutBlock --> write_block - /// write_block --> write_validated_block - /// write_validated_block --> B{"is it a legacy block?
(V1)"} - /// B -->|Yes| put_single_legacy_block_body - /// B -->|No| put_single_block_body - /// put_single_legacy_block_body --> D[store header in DB] - /// put_single_block_body --> D[store header in DB] - /// D --> C[update indices] - /// C --> End - /// ``` - pub fn write_block(&mut self, block: &Block) -> Result { - block.verify()?; - let env = Rc::clone(&self.env); - let mut txn = env.begin_rw_txn()?; - let wrote = self.write_validated_block(&mut txn, block)?; - if wrote { - txn.commit()?; - } - Ok(wrote) - } - - /// Writes a block which has already been verified to storage, updating indices as necessary. - /// - /// Returns `Ok(true)` if the block has been successfully written, `Ok(false)` if a part of it - /// couldn't be written because it already existed, and `Err(_)` if there was an error. - #[cfg_attr(doc, aquamarine::aquamarine)] - /// ```mermaid - /// flowchart TD - /// style Start fill:#66ccff,stroke:#333,stroke-width:4px - /// style End fill:#66ccff,stroke:#333,stroke-width:4px - /// style B fill:#00FF00,stroke:#333,stroke-width:4px - /// - /// Start --> A["Validated block needs to be stored
(might be coming from contract runtime)"] - /// A --> put_executed_block_to_storage - /// put_executed_block_to_storage --> StorageRequest::PutExecutedBlock - /// StorageRequest::PutExecutedBlock --> put_executed_block - /// put_executed_block --> B["write_validated_block
(current version)"] - /// B --> C[convert into BlockBody] - /// C --> put_single_block_body - /// put_single_block_body --> write_block_header - /// write_block_header --> D[update indices] - /// D --> End - /// ``` - fn write_validated_block( - &mut self, - txn: &mut RwTransaction, - block: &Block, - ) -> Result { - // Insert the body: - { - let block_body_hash = block.body_hash(); - match block { - Block::V1(v1) => { - let block_body = v1.body(); - if !Self::put_single_legacy_block_body( - txn, - block_body_hash, - block_body, - self.block_body_dbs.legacy, - )? { - error!("could not insert body for: {}", block); - return Ok(false); - } - } - Block::V2(_) => { - let block_body = block.clone_body(); - if !Self::put_single_block_body( - txn, - block_body_hash, - &block_body, - self.block_body_dbs.current, - )? { - error!("could not insert body for: {}", block); - return Ok(false); - } - } - } - } - - // Insert the header: - { - let block_hash = block.hash(); - match block { - Block::V1(v1) => { - let block_header = v1.header(); - if !Self::put_single_legacy_block_header( - txn, - block_hash, - block_header, - self.block_header_dbs.legacy, - )? { - error!("could not insert header for: {}", block); - return Ok(false); - } - } - Block::V2(_) => { - let block_header = block.clone_header(); - if !Self::put_single_block_header( - txn, - block_hash, - &block_header, - self.block_header_dbs.current, - )? { - error!("could not insert header for: {}", block); - return Ok(false); - } - } - } - } - - { - Self::insert_to_block_header_indices( - &mut self.block_height_index, - &mut self.switch_block_era_id_index, - &block.clone_header(), - )?; - Self::insert_to_deploy_index( - &mut self.deploy_hash_index, - *block.hash(), - block.height(), - block.clone_body().deploy_and_transfer_hashes(), - )?; - } - Ok(true) - } - - /// Writes a single block header in a separate transaction to storage. - fn put_single_legacy_block_header( - txn: &mut RwTransaction, - block_hash: &BlockHash, - block_header: &BlockHeaderV1, - db: Database, - ) -> Result { - txn.put_value(db, block_hash, block_header, true) - .map_err(Into::into) - } - - /// Writes a single block header in a separate transaction to storage. - fn put_single_block_header( - txn: &mut RwTransaction, - block_hash: &BlockHash, - block_header: &BlockHeader, - db: Database, - ) -> Result { - debug_assert!(!matches!(block_header, BlockHeader::V1(_))); - txn.put_value(db, block_hash, block_header, true) - .map_err(Into::into) - } - - /// Writes a single block body in a separate transaction to storage. - fn put_single_legacy_block_body( - txn: &mut RwTransaction, - block_body_hash: &Digest, - block_body: &BlockBodyV1, - db: Database, - ) -> Result { - txn.put_value(db, block_body_hash, block_body, true) - .map_err(Into::into) - } - - /// Writes a single block body in a separate transaction to storage. - fn put_single_block_body( - txn: &mut RwTransaction, - block_body_hash: &Digest, - block_body: &BlockBody, - db: Database, - ) -> Result { - debug_assert!(!matches!(block_body, BlockBody::V1(_))); - txn.put_value(db, block_body_hash, block_body, true) - .map_err(Into::into) - } - - pub(crate) fn put_executed_block( - &mut self, - block: &Block, - approvals_hashes: &ApprovalsHashes, - execution_results: HashMap, - ) -> Result { - let env = Rc::clone(&self.env); - let mut txn = env.begin_rw_txn()?; - let wrote = self.write_validated_block(&mut txn, block)?; - if !wrote { - return Err(FatalStorageError::FailedToOverwriteBlock); - } - - let _ = self.write_approvals_hashes(&mut txn, approvals_hashes)?; - let _ = self.write_execution_results( - &mut txn, - block.hash(), - block.height(), - execution_results, - )?; - txn.commit()?; - - Ok(true) - } -} diff --git a/node/src/effect.rs b/node/src/effect.rs index 7797f59e78..c5e27bb1f3 100644 --- a/node/src/effect.rs +++ b/node/src/effect.rs @@ -126,7 +126,7 @@ use casper_types::{ package::Package, system::auction::EraValidators, AddressableEntity, Block, BlockHash, BlockHeader, BlockSignatures, BlockV2, ChainspecRawBytes, - Deploy, DeployHash, DeployHeader, Digest, EraId, FinalitySignature, FinalitySignatureId, Key, + DeployHash, DeployHeader, Digest, EraId, FinalitySignature, FinalitySignatureId, Key, PublicKey, TimeDiff, Timestamp, Transaction, TransactionHash, TransactionId, Transfer, U512, }; @@ -152,9 +152,9 @@ use crate::{ types::{ appendable_block::AppendableBlock, ApprovalsHashes, AvailableBlockRange, BlockExecutionResultsOrChunk, BlockExecutionResultsOrChunkId, BlockWithMetadata, - DeployExecutionInfo, DeployWithFinalizedApprovals, ExecutableBlock, FinalizedApprovals, - FinalizedBlock, LegacyDeploy, MetaBlock, MetaBlockState, NodeId, SignedBlock, TrieOrChunk, - TrieOrChunkId, + ExecutableBlock, ExecutionInfo, FinalizedApprovals, FinalizedBlock, LegacyDeploy, + MetaBlock, MetaBlockState, NodeId, SignedBlock, TransactionWithFinalizedApprovals, + TrieOrChunk, TrieOrChunkId, }, utils::{fmt_limit::FmtLimit, SharedFlag, Source}, }; @@ -1483,20 +1483,20 @@ impl EffectBuilder { .await } - /// Gets the requested deploys from the deploy store. + /// Gets the requested transactions from storage. /// - /// Returns the "original" deploys, which are the first received by the node, along with a + /// Returns the "original" transactions, which are the first received by the node, along with a /// potentially different set of approvals used during execution of the recorded block. - pub(crate) async fn get_deploys_from_storage( + pub(crate) async fn get_transactions_from_storage( self, - deploy_hashes: Vec, - ) -> SmallVec<[Option; 1]> + transaction_hashes: Vec, + ) -> SmallVec<[Option; 1]> where REv: From, { self.make_request( - |responder| StorageRequest::GetDeploys { - deploy_hashes, + |responder| StorageRequest::GetTransactions { + transaction_hashes, responder, }, QueueKind::FromStorage, @@ -1582,17 +1582,17 @@ impl EffectBuilder { .await } - /// Gets the requested deploys from the deploy store. - pub(crate) async fn get_deploy_and_execution_info_from_storage( + /// Gets the requested transaction and associated execution info if available. + pub(crate) async fn get_transaction_and_execution_info_from_storage( self, - deploy_hash: DeployHash, - ) -> Option<(DeployWithFinalizedApprovals, Option)> + transaction_hash: TransactionHash, + ) -> Option<(TransactionWithFinalizedApprovals, Option)> where REv: From, { self.make_request( - |responder| StorageRequest::GetDeployAndExecutionInfo { - deploy_hash, + |responder| StorageRequest::GetTransactionAndExecutionInfo { + transaction_hash, responder, }, QueueKind::FromStorage, @@ -2242,20 +2242,20 @@ impl EffectBuilder { .await } - /// Requests execution of a single deploy, without commiting its effects. - /// Inteded to be used for debugging & discovery purposes. - pub(crate) async fn speculative_execute_deploy( + /// Requests execution of a single transaction, without committing its effects. Intended to be + /// used for debugging & discovery purposes. + pub(crate) async fn speculatively_execute( self, execution_prestate: SpeculativeExecutionState, - deploy: Arc, + transaction: Box, ) -> Result, engine_state::Error> where REv: From, { self.make_request( - |responder| ContractRuntimeRequest::SpeculativeDeployExecution { + |responder| ContractRuntimeRequest::SpeculativelyExecute { execution_prestate, - deploy, + transaction, responder, }, QueueKind::ContractRuntime, diff --git a/node/src/effect/requests.rs b/node/src/effect/requests.rs index 277d5993ca..ab35cbcc92 100644 --- a/node/src/effect/requests.rs +++ b/node/src/effect/requests.rs @@ -29,7 +29,7 @@ use casper_types::{ bytesrepr::Bytes, execution::{ExecutionResult, ExecutionResultV2}, system::auction::EraValidators, - Block, BlockHash, BlockHeader, BlockSignatures, BlockV2, ChainspecRawBytes, Deploy, DeployHash, + Block, BlockHash, BlockHeader, BlockSignatures, BlockV2, ChainspecRawBytes, DeployHash, DeployHeader, Digest, DisplayIter, EraId, FinalitySignature, FinalitySignatureId, Key, ProtocolVersion, PublicKey, TimeDiff, Timestamp, Transaction, TransactionHash, TransactionId, Transfer, URef, U512, @@ -60,8 +60,8 @@ use crate::{ types::{ appendable_block::AppendableBlock, ApprovalsHashes, AvailableBlockRange, BlockExecutionResultsOrChunk, BlockExecutionResultsOrChunkId, BlockWithMetadata, - DeployExecutionInfo, DeployWithFinalizedApprovals, ExecutableBlock, FinalizedApprovals, - LegacyDeploy, MetaBlockState, NodeId, SignedBlock, StatusFeed, TrieOrChunk, TrieOrChunkId, + ExecutableBlock, ExecutionInfo, FinalizedApprovals, LegacyDeploy, MetaBlockState, NodeId, + SignedBlock, StatusFeed, TransactionWithFinalizedApprovals, TrieOrChunk, TrieOrChunkId, }, utils::Source, }; @@ -369,12 +369,10 @@ pub(crate) enum StorageRequest { /// previously stored. responder: Responder, }, - /// Retrieve deploys with given hashes. - GetDeploys { - /// Hashes of deploys to be retrieved. - deploy_hashes: Vec, - /// Responder to call with the results. - responder: Responder; 1]>>, + /// Retrieve transaction with given hashes. + GetTransactions { + transaction_hashes: Vec, + responder: Responder; 1]>>, }, /// Retrieve legacy deploy with given hash. GetLegacyDeploy { @@ -416,12 +414,9 @@ pub(crate) enum StorageRequest { /// None is returned when we don't have the block in the storage. responder: Responder>, }, - /// Retrieve deploy and its execution info. - GetDeployAndExecutionInfo { - /// Hash of deploy to be retrieved. - deploy_hash: DeployHash, - /// Responder to call with the results. - responder: Responder)>>, + GetTransactionAndExecutionInfo { + transaction_hash: TransactionHash, + responder: Responder)>>, }, /// Retrieve block and its signatures by its hash. GetSignedBlockByHash { @@ -563,8 +558,14 @@ impl Display for StorageRequest { StorageRequest::PutTransaction { transaction, .. } => { write!(formatter, "put {}", transaction) } - StorageRequest::GetDeploys { deploy_hashes, .. } => { - write!(formatter, "get {}", DisplayIter::new(deploy_hashes.iter())) + StorageRequest::GetTransactions { + transaction_hashes, .. + } => { + write!( + formatter, + "get {}", + DisplayIter::new(transaction_hashes.iter()) + ) } StorageRequest::GetLegacyDeploy { deploy_hash, .. } => { write!(formatter, "get legacy deploy {}", deploy_hash) @@ -585,8 +586,14 @@ impl Display for StorageRequest { write!(formatter, "get block execution results or chunk for {}", id) } - StorageRequest::GetDeployAndExecutionInfo { deploy_hash, .. } => { - write!(formatter, "get deploy and metadata for {}", deploy_hash) + StorageRequest::GetTransactionAndExecutionInfo { + transaction_hash, .. + } => { + write!( + formatter, + "get transaction and metadata for {}", + transaction_hash + ) } StorageRequest::GetFinalitySignature { id, .. } => { write!(formatter, "get finality signature {}", id) @@ -951,12 +958,12 @@ pub(crate) enum ContractRuntimeRequest { /// Responder to call with the result. Contains the hash of the stored trie. responder: Responder>, }, - /// Execute deploys without committing results - SpeculativeDeployExecution { - /// Hash of a block on top of which to execute the deploy. + /// Execute transaction without committing results + SpeculativelyExecute { + /// Hash of a block on top of which to execute the transaction. execution_prestate: SpeculativeExecutionState, - /// Deploy to execute. - deploy: Arc, + /// Transaction to execute. + transaction: Box, /// Results responder: Responder, engine_state::Error>>, }, @@ -1029,15 +1036,15 @@ impl Display for ContractRuntimeRequest { ContractRuntimeRequest::PutTrie { trie_bytes, .. } => { write!(formatter, "trie: {:?}", trie_bytes) } - ContractRuntimeRequest::SpeculativeDeployExecution { + ContractRuntimeRequest::SpeculativelyExecute { execution_prestate, - deploy, + transaction, .. } => { write!( formatter, "Execute {} on {}", - deploy.hash(), + transaction.hash(), execution_prestate.state_root_hash ) } diff --git a/node/src/types.rs b/node/src/types.rs index 75b4f81fb6..f449497e08 100644 --- a/node/src/types.rs +++ b/node/src/types.rs @@ -18,9 +18,12 @@ mod transaction; mod validator_matrix; mod value_or_chunk; +use std::fmt::Debug; + use rand::{CryptoRng, RngCore}; #[cfg(not(test))] use rand_chacha::ChaCha20Rng; +use thiserror::Error; pub use available_block_range::AvailableBlockRange; pub(crate) use block::{ @@ -41,9 +44,9 @@ pub use peers_map::PeersMap; pub use status_feed::{ChainspecInfo, GetStatusResult, StatusFeed}; pub(crate) use sync_leap::{GlobalStatesMetadata, SyncLeap, SyncLeapIdentifier}; pub(crate) use transaction::{ - DeployExecutionInfo, DeployHashWithApprovals, DeployOrTransferHash, - DeployWithFinalizedApprovals, FinalizedApprovals, FinalizedDeployApprovals, - FinalizedTransactionV1Approvals, LegacyDeploy, TransactionWithFinalizedApprovals, + DeployHashWithApprovals, DeployOrTransferHash, DeployWithFinalizedApprovals, ExecutionInfo, + FinalizedApprovals, FinalizedDeployApprovals, FinalizedTransactionV1Approvals, LegacyDeploy, + TransactionWithFinalizedApprovals, }; pub(crate) use validator_matrix::{EraValidatorWeights, SignatureWeight, ValidatorMatrix}; pub use value_or_chunk::{ @@ -62,3 +65,8 @@ pub type NodeRng = ChaCha20Rng; /// The RNG used throughout the node for testing. #[cfg(test)] pub type NodeRng = casper_types::testing::TestRng; + +/// The variants in the given types are expected to all be the same. +#[derive(Debug, Error)] +#[error("mismatch in variants: {0:?}")] +pub struct VariantMismatch(pub(super) Box); diff --git a/node/src/types/transaction.rs b/node/src/types/transaction.rs index 042e41eb57..effc8131dc 100644 --- a/node/src/types/transaction.rs +++ b/node/src/types/transaction.rs @@ -1,12 +1,14 @@ mod deploy; +mod execution_info; mod finalized_approvals; mod transaction_v1; mod transaction_with_finalized_approvals; pub(crate) use deploy::{ - DeployExecutionInfo, DeployHashWithApprovals, DeployOrTransferHash, - DeployWithFinalizedApprovals, FinalizedDeployApprovals, LegacyDeploy, + DeployHashWithApprovals, DeployOrTransferHash, DeployWithFinalizedApprovals, + FinalizedDeployApprovals, LegacyDeploy, }; +pub(crate) use execution_info::ExecutionInfo; pub(crate) use finalized_approvals::FinalizedApprovals; pub(crate) use transaction_v1::FinalizedTransactionV1Approvals; pub(crate) use transaction_with_finalized_approvals::TransactionWithFinalizedApprovals; diff --git a/node/src/types/transaction/deploy.rs b/node/src/types/transaction/deploy.rs index 9de79416ee..893b4eca38 100644 --- a/node/src/types/transaction/deploy.rs +++ b/node/src/types/transaction/deploy.rs @@ -1,11 +1,9 @@ -mod deploy_execution_info; mod deploy_hash_with_approvals; mod deploy_or_transfer_hash; mod deploy_with_finalized_approvals; mod finalized_deploy_approvals; mod legacy_deploy; -pub(crate) use deploy_execution_info::DeployExecutionInfo; pub(crate) use deploy_hash_with_approvals::DeployHashWithApprovals; pub(crate) use deploy_or_transfer_hash::DeployOrTransferHash; pub(crate) use deploy_with_finalized_approvals::DeployWithFinalizedApprovals; diff --git a/node/src/types/transaction/deploy/deploy_execution_info.rs b/node/src/types/transaction/execution_info.rs similarity index 93% rename from node/src/types/transaction/deploy/deploy_execution_info.rs rename to node/src/types/transaction/execution_info.rs index b8dfefd9ec..78d93f6102 100644 --- a/node/src/types/transaction/deploy/deploy_execution_info.rs +++ b/node/src/types/transaction/execution_info.rs @@ -7,7 +7,7 @@ use casper_types::{execution::ExecutionResult, BlockHash}; /// if known. #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq, JsonSchema)] #[serde(deny_unknown_fields)] -pub struct DeployExecutionInfo { +pub struct ExecutionInfo { pub(crate) block_hash: BlockHash, pub(crate) block_height: u64, pub(crate) execution_result: Option, diff --git a/node/src/types/transaction/transaction_with_finalized_approvals.rs b/node/src/types/transaction/transaction_with_finalized_approvals.rs index 63202e5f24..b47934a204 100644 --- a/node/src/types/transaction/transaction_with_finalized_approvals.rs +++ b/node/src/types/transaction/transaction_with_finalized_approvals.rs @@ -73,4 +73,14 @@ impl TransactionWithFinalizedApprovals { } } } + + /// Extracts the original transaction by discarding the finalized approvals. + pub(crate) fn discard_finalized_approvals(self) -> Transaction { + match self { + TransactionWithFinalizedApprovals::Deploy { deploy, .. } => Transaction::Deploy(deploy), + TransactionWithFinalizedApprovals::V1 { transaction, .. } => { + Transaction::V1(transaction) + } + } + } } diff --git a/resources/test/rpc_schema.json b/resources/test/rpc_schema.json index 18651a4550..345e61dd81 100644 --- a/resources/test/rpc_schema.json +++ b/resources/test/rpc_schema.json @@ -26,7 +26,7 @@ "methods": [ { "name": "account_put_deploy", - "summary": "receives a Deploy to be executed by the network", + "summary": "receives a Deploy to be executed by the network (DEPRECATED: use `account_put_transaction` instead)", "params": [ { "name": "deploy", @@ -127,9 +127,138 @@ } ] }, + { + "name": "account_put_transaction", + "summary": "receives a Transaction to be executed by the network", + "params": [ + { + "name": "transaction", + "schema": { + "description": "The `Transaction`.", + "$ref": "#/components/schemas/Transaction" + }, + "required": true + } + ], + "result": { + "name": "account_put_transaction_result", + "schema": { + "description": "Result for \"account_put_transaction\" RPC response.", + "type": "object", + "required": [ + "api_version", + "transaction_hash" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "transaction_hash": { + "description": "The transaction hash.", + "$ref": "#/components/schemas/TransactionHash" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "account_put_transaction_example", + "params": [ + { + "name": "transaction", + "value": { + "V1": { + "hash": "d1cb08851052c110a0df51c8b7ba8f3cbe3e5487a4e012f7db0d32ef02fdbde8", + "header": { + "account": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "timestamp": "2020-11-17T00:39:24.072Z", + "ttl": "1h", + "pricing_mode": { + "GasPriceMultiplier": 1 + }, + "body_hash": "b4586835d0c02afdf0ed94d46d823cdf1f8d3fc0e29f8a0d6e7fdbc315c9ddd7", + "chain_name": "casper-example" + }, + "payment": null, + "body": { + "Native": { + "MintTransfer": [ + [ + "source", + { + "cl_type": "URef", + "bytes": "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a07", + "parsed": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007" + } + ], + [ + "target", + { + "cl_type": "URef", + "bytes": "1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b00", + "parsed": "uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000" + } + ], + [ + "amount", + { + "cl_type": "U512", + "bytes": "0500ac23fc06", + "parsed": "30000000000" + } + ], + [ + "to", + { + "cl_type": { + "Option": { + "ByteArray": 32 + } + }, + "bytes": "012828282828282828282828282828282828282828282828282828282828282828", + "parsed": "2828282828282828282828282828282828282828282828282828282828282828" + } + ], + [ + "id", + { + "cl_type": { + "Option": "U64" + }, + "bytes": "01e703000000000000", + "parsed": 999 + } + ] + ] + } + }, + "approvals": [ + { + "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "011fc821cda1dfe388354ffc698e6ec82a2fa552cd8d29c819971e8e39e091eccd8b0201d835f46c474c2e37b3451132b5b15712183f02bd6c327638769c04d50d" + } + ] + } + } + } + ], + "result": { + "name": "account_put_transaction_example_result", + "value": { + "api_version": "1.5.2", + "transaction_hash": { + "V1": "d1cb08851052c110a0df51c8b7ba8f3cbe3e5487a4e012f7db0d32ef02fdbde8" + } + } + } + } + ] + }, { "name": "info_get_deploy", - "summary": "returns a Deploy from the network", + "summary": "returns a Deploy from the network (DEPRECATED: use `info_get_transaction` instead)", "params": [ { "name": "deploy_hash", @@ -286,6 +415,191 @@ } ] }, + { + "name": "info_get_transaction", + "summary": "returns a Transaction from the network", + "params": [ + { + "name": "transaction_hash", + "schema": { + "description": "The transaction hash.", + "$ref": "#/components/schemas/TransactionHash" + }, + "required": true + }, + { + "name": "finalized_approvals", + "schema": { + "description": "Whether to return the transaction with the finalized approvals substituted. If `false` or omitted, returns the transaction with the approvals that were originally received by the node.", + "default": false, + "type": "boolean" + }, + "required": false + } + ], + "result": { + "name": "info_get_transaction_result", + "schema": { + "description": "Result for \"info_get_transaction\" RPC response.", + "type": "object", + "required": [ + "api_version", + "transaction" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "transaction": { + "description": "The transaction.", + "$ref": "#/components/schemas/Transaction" + }, + "block_hash": { + "$ref": "#/components/schemas/BlockHash" + }, + "block_height": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "execution_result": { + "anyOf": [ + { + "$ref": "#/components/schemas/ExecutionResult" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "info_get_transaction_example", + "params": [ + { + "name": "transaction_hash", + "value": { + "V1": "d1cb08851052c110a0df51c8b7ba8f3cbe3e5487a4e012f7db0d32ef02fdbde8" + } + }, + { + "name": "finalized_approvals", + "value": true + } + ], + "result": { + "name": "info_get_transaction_example_result", + "value": { + "api_version": "1.5.2", + "transaction": { + "V1": { + "hash": "d1cb08851052c110a0df51c8b7ba8f3cbe3e5487a4e012f7db0d32ef02fdbde8", + "header": { + "account": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "timestamp": "2020-11-17T00:39:24.072Z", + "ttl": "1h", + "pricing_mode": { + "GasPriceMultiplier": 1 + }, + "body_hash": "b4586835d0c02afdf0ed94d46d823cdf1f8d3fc0e29f8a0d6e7fdbc315c9ddd7", + "chain_name": "casper-example" + }, + "payment": null, + "body": { + "Native": { + "MintTransfer": [ + [ + "source", + { + "cl_type": "URef", + "bytes": "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a07", + "parsed": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007" + } + ], + [ + "target", + { + "cl_type": "URef", + "bytes": "1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b00", + "parsed": "uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000" + } + ], + [ + "amount", + { + "cl_type": "U512", + "bytes": "0500ac23fc06", + "parsed": "30000000000" + } + ], + [ + "to", + { + "cl_type": { + "Option": { + "ByteArray": 32 + } + }, + "bytes": "012828282828282828282828282828282828282828282828282828282828282828", + "parsed": "2828282828282828282828282828282828282828282828282828282828282828" + } + ], + [ + "id", + { + "cl_type": { + "Option": "U64" + }, + "bytes": "01e703000000000000", + "parsed": 999 + } + ] + ] + } + }, + "approvals": [ + { + "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "011fc821cda1dfe388354ffc698e6ec82a2fa552cd8d29c819971e8e39e091eccd8b0201d835f46c474c2e37b3451132b5b15712183f02bd6c327638769c04d50d" + } + ] + } + }, + "block_hash": "24ec19bc9aee9726616271ccfb046e8a7ea6370a34f18d6fb54d7a8ae29bdb1b", + "block_height": 10, + "execution_result": { + "Version2": { + "Success": { + "effects": [ + { + "key": "account-hash-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb", + "kind": { + "AddUInt64": 8 + } + }, + { + "key": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1", + "kind": "Identity" + } + ], + "transfers": [ + "transfer-5959595959595959595959595959595959595959595959595959595959595959", + "transfer-8282828282828282828282828282828282828282828282828282828282828282" + ], + "cost": "123456" + } + } + } + } + } + } + ] + }, { "name": "state_get_account_info", "summary": "returns an Account from the network", @@ -2214,65 +2528,759 @@ "additionalProperties": false }, { - "description": "Fixed-length list of a single `CLType` (comparable to a Rust array).", + "description": "Fixed-length list of a single `CLType` (comparable to a Rust array).", + "type": "object", + "required": [ + "ByteArray" + ], + "properties": { + "ByteArray": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "`Result` with `Ok` and `Err` variants of `CLType`s.", + "type": "object", + "required": [ + "Result" + ], + "properties": { + "Result": { + "type": "object", + "required": [ + "err", + "ok" + ], + "properties": { + "ok": { + "$ref": "#/components/schemas/CLType" + }, + "err": { + "$ref": "#/components/schemas/CLType" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Map with keys of a single `CLType` and values of a single `CLType`.", + "type": "object", + "required": [ + "Map" + ], + "properties": { + "Map": { + "type": "object", + "required": [ + "key", + "value" + ], + "properties": { + "key": { + "$ref": "#/components/schemas/CLType" + }, + "value": { + "$ref": "#/components/schemas/CLType" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "1-ary tuple of a `CLType`.", + "type": "object", + "required": [ + "Tuple1" + ], + "properties": { + "Tuple1": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CLType" + }, + "maxItems": 1, + "minItems": 1 + } + }, + "additionalProperties": false + }, + { + "description": "2-ary tuple of `CLType`s.", + "type": "object", + "required": [ + "Tuple2" + ], + "properties": { + "Tuple2": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CLType" + }, + "maxItems": 2, + "minItems": 2 + } + }, + "additionalProperties": false + }, + { + "description": "3-ary tuple of `CLType`s.", + "type": "object", + "required": [ + "Tuple3" + ], + "properties": { + "Tuple3": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CLType" + }, + "maxItems": 3, + "minItems": 3 + } + }, + "additionalProperties": false + }, + { + "description": "Unspecified type.", + "type": "string", + "enum": [ + "Any" + ] + } + ] + }, + "DeployApproval": { + "description": "A struct containing a signature of a deploy hash and the public key of the signer.", + "type": "object", + "required": [ + "signature", + "signer" + ], + "properties": { + "signer": { + "$ref": "#/components/schemas/PublicKey" + }, + "signature": { + "$ref": "#/components/schemas/Signature" + } + }, + "additionalProperties": false + }, + "Signature": { + "description": "Hex-encoded cryptographic signature, including the algorithm tag prefix.", + "type": "string" + }, + "Transaction": { + "description": "A versioned wrapper for a transaction or deploy.", + "oneOf": [ + { + "description": "A deploy.", + "type": "object", + "required": [ + "Deploy" + ], + "properties": { + "Deploy": { + "$ref": "#/components/schemas/Deploy" + } + }, + "additionalProperties": false + }, + { + "description": "A version 1 transaction.", + "type": "object", + "required": [ + "V1" + ], + "properties": { + "V1": { + "$ref": "#/components/schemas/TransactionV1" + } + }, + "additionalProperties": false + } + ] + }, + "TransactionV1": { + "description": "A unit of work sent by a client to the network, which when executed can cause global state to be altered.", + "type": "object", + "required": [ + "approvals", + "body", + "hash", + "header" + ], + "properties": { + "hash": { + "$ref": "#/components/schemas/TransactionV1Hash" + }, + "header": { + "$ref": "#/components/schemas/TransactionV1Header" + }, + "payment": { + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "body": { + "$ref": "#/components/schemas/TransactionV1Kind" + }, + "approvals": { + "type": "array", + "items": { + "$ref": "#/components/schemas/TransactionV1Approval" + }, + "uniqueItems": true + } + }, + "additionalProperties": false + }, + "TransactionV1Hash": { + "description": "Hex-encoded TransactionV1 hash.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "TransactionV1Header": { + "description": "The header portion of a TransactionV1.", + "type": "object", + "required": [ + "account", + "body_hash", + "chain_name", + "pricing_mode", + "timestamp", + "ttl" + ], + "properties": { + "account": { + "$ref": "#/components/schemas/PublicKey" + }, + "timestamp": { + "$ref": "#/components/schemas/Timestamp" + }, + "ttl": { + "$ref": "#/components/schemas/TimeDiff" + }, + "pricing_mode": { + "$ref": "#/components/schemas/PricingModeV1" + }, + "body_hash": { + "$ref": "#/components/schemas/Digest" + }, + "chain_name": { + "type": "string" + } + }, + "additionalProperties": false + }, + "PricingModeV1": { + "description": "Pricing mode of a TransactionV1.", + "oneOf": [ + { + "description": "Multiplies the gas used by the given amount.\n\nThis is the same behaviour as for the `Deploy::gas_price`.", + "type": "object", + "required": [ + "GasPriceMultiplier" + ], + "properties": { + "GasPriceMultiplier": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "First-in-first-out handling of transactions, i.e. pricing mode is irrelevant to ordering.", + "type": "string", + "enum": [ + "Fixed" + ] + }, + { + "description": "The payment for this transaction was previously reserved.", + "type": "string", + "enum": [ + "Reserved" + ] + } + ] + }, + "TransactionV1Kind": { + "description": "The high-level kind of a given TransactionV1.", + "oneOf": [ + { + "description": "A transaction targeting native functionality.", + "type": "object", + "required": [ + "Native" + ], + "properties": { + "Native": { + "$ref": "#/components/schemas/NativeTransactionV1" + } + }, + "additionalProperties": false + }, + { + "description": "A transaction with userland (i.e. not native) functionality.", + "type": "object", + "required": [ + "Userland" + ], + "properties": { + "Userland": { + "$ref": "#/components/schemas/UserlandTransactionV1" + } + }, + "additionalProperties": false + } + ] + }, + "NativeTransactionV1": { + "description": "A TransactionV1 targeting native functionality.", + "oneOf": [ + { + "description": "Calls the `transfer` entry point of the mint to transfer `Motes` from a source purse to a target purse.", + "type": "object", + "required": [ + "MintTransfer" + ], + "properties": { + "MintTransfer": { + "$ref": "#/components/schemas/RuntimeArgs" + } + }, + "additionalProperties": false + }, + { + "description": "A transaction targeting the auction.", + "type": "object", + "required": [ + "Auction" + ], + "properties": { + "Auction": { + "$ref": "#/components/schemas/AuctionTransactionV1" + } + }, + "additionalProperties": false + }, + { + "description": "A transaction reserving a future execution.", + "type": "object", + "required": [ + "Reservation" + ], + "properties": { + "Reservation": { + "$ref": "#/components/schemas/RuntimeArgs" + } + }, + "additionalProperties": false + } + ] + }, + "AuctionTransactionV1": { + "description": "A TransactionV1 targeting the auction.", + "oneOf": [ + { + "description": "Calls the `add_bid` entry point to create or top off a bid purse.", + "type": "object", + "required": [ + "AddBid" + ], + "properties": { + "AddBid": { + "$ref": "#/components/schemas/RuntimeArgs" + } + }, + "additionalProperties": false + }, + { + "description": "Calls the `withdraw_bid` entry point to decrease a stake.", + "type": "object", + "required": [ + "WithdrawBid" + ], + "properties": { + "WithdrawBid": { + "$ref": "#/components/schemas/RuntimeArgs" + } + }, + "additionalProperties": false + }, + { + "description": "Calls the `delegate` entry point to add a new delegator or increase an existing delegator's stake.", + "type": "object", + "required": [ + "Delegate" + ], + "properties": { + "Delegate": { + "$ref": "#/components/schemas/RuntimeArgs" + } + }, + "additionalProperties": false + }, + { + "description": "Calls the `undelegate` entry point to reduce a delegator's stake or remove the delegator if the remaining stake is 0.", + "type": "object", + "required": [ + "Undelegate" + ], + "properties": { + "Undelegate": { + "$ref": "#/components/schemas/RuntimeArgs" + } + }, + "additionalProperties": false + }, + { + "description": "Calls the `redelegate` entry point to reduce a delegator's stake or remove the delegator if the remaining stake is 0, and after the unbonding delay, automatically delegate to a new validator.", + "type": "object", + "required": [ + "Redelegate" + ], + "properties": { + "Redelegate": { + "$ref": "#/components/schemas/RuntimeArgs" + } + }, + "additionalProperties": false + }, + { + "description": "Calls the `get_era_validators` entry point to provide the validators for the current era and configured number of future eras.", + "type": "object", + "required": [ + "GetEraValidators" + ], + "properties": { + "GetEraValidators": { + "$ref": "#/components/schemas/RuntimeArgs" + } + }, + "additionalProperties": false + }, + { + "description": "Calls the `read_era_id` entry point to provide the current era ID.", + "type": "object", + "required": [ + "ReadEraId" + ], + "properties": { + "ReadEraId": { + "$ref": "#/components/schemas/RuntimeArgs" + } + }, + "additionalProperties": false + } + ] + }, + "UserlandTransactionV1": { + "description": "A TransactionV1 with userland (i.e. not native) functionality.", + "oneOf": [ + { + "description": "A general purpose transaction.", + "type": "object", + "required": [ + "Standard" + ], + "properties": { + "Standard": { + "type": "object", + "required": [ + "args", + "module_bytes" + ], + "properties": { + "module_bytes": { + "description": "Raw Wasm module bytes with 'call' exported as an entrypoint.", + "allOf": [ + { + "$ref": "#/components/schemas/Bytes" + } + ] + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "An installer or upgrader for a stored contract.", + "type": "object", + "required": [ + "InstallerUpgrader" + ], + "properties": { + "InstallerUpgrader": { + "type": "object", + "required": [ + "args", + "module_bytes" + ], + "properties": { + "contract_package_id": { + "description": "If `Some`, this is an upgrade for the given contract, otherwise it is an installer.", + "anyOf": [ + { + "$ref": "#/components/schemas/PackageIdentifier" + }, + { + "type": "null" + } + ] + }, + "module_bytes": { + "description": "Raw Wasm module bytes with 'call' exported as an entrypoint.", + "allOf": [ + { + "$ref": "#/components/schemas/Bytes" + } + ] + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "A transaction targeting a stored contract.", + "type": "object", + "required": [ + "DirectCall" + ], + "properties": { + "DirectCall": { + "$ref": "#/components/schemas/DirectCallV1" + } + }, + "additionalProperties": false + }, + { + "description": "A transaction which doesn't modify global state.", + "type": "object", + "required": [ + "Noop" + ], + "properties": { + "Noop": { + "type": "object", + "required": [ + "args", + "module_bytes" + ], + "properties": { + "module_bytes": { + "description": "Raw Wasm module bytes with 'call' exported as an entrypoint.", + "allOf": [ + { + "$ref": "#/components/schemas/Bytes" + } + ] + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "A transaction which doesn't call stored contracts.", + "type": "object", + "required": [ + "Closed" + ], + "properties": { + "Closed": { + "type": "object", + "required": [ + "args", + "module_bytes" + ], + "properties": { + "module_bytes": { + "description": "Raw Wasm module bytes with 'call' exported as an entrypoint.", + "allOf": [ + { + "$ref": "#/components/schemas/Bytes" + } + ] + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "Bytes": { + "description": "Hex-encoded bytes.", + "type": "string" + }, + "PackageIdentifier": { + "description": "Identifier for a package.", + "oneOf": [ + { + "description": "The stored contract package within the deploy item is identified by its hash.", "type": "object", "required": [ - "ByteArray" + "Hash" ], "properties": { - "ByteArray": { - "type": "integer", - "format": "uint32", - "minimum": 0.0 + "Hash": { + "type": "object", + "required": [ + "package_hash" + ], + "properties": { + "package_hash": { + "description": "Hash of the contract package.", + "allOf": [ + { + "$ref": "#/components/schemas/PackageHash" + } + ] + }, + "version": { + "description": "The version specified in the deploy item.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + } + } } }, "additionalProperties": false }, { - "description": "`Result` with `Ok` and `Err` variants of `CLType`s.", + "description": "The stored contract package within the deploy item is identified by name.", "type": "object", "required": [ - "Result" + "Name" ], "properties": { - "Result": { + "Name": { "type": "object", "required": [ - "err", - "ok" + "name" ], "properties": { - "ok": { - "$ref": "#/components/schemas/CLType" + "name": { + "description": "Name of the contract package.", + "type": "string" }, - "err": { - "$ref": "#/components/schemas/CLType" + "version": { + "description": "The version specified in the deploy item.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 } - }, - "additionalProperties": false + } } }, "additionalProperties": false - }, + } + ] + }, + "PackageHash": { + "description": "The hex-encoded address of the Package.", + "type": "string" + }, + "DirectCallV1": { + "description": "A TransactionV1 targeting a stored contract.", + "oneOf": [ { - "description": "Map with keys of a single `CLType` and values of a single `CLType`.", + "description": "Stored contract referenced by its hash.", "type": "object", "required": [ - "Map" + "StoredContractByHash" ], "properties": { - "Map": { + "StoredContractByHash": { "type": "object", "required": [ - "key", - "value" + "args", + "entry_point", + "hash" ], "properties": { - "key": { - "$ref": "#/components/schemas/CLType" + "hash": { + "description": "Contract hash.", + "allOf": [ + { + "$ref": "#/components/schemas/AddressableEntityHash" + } + ] }, - "value": { - "$ref": "#/components/schemas/CLType" + "entry_point": { + "description": "Name of the entry point.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] } }, "additionalProperties": false @@ -2281,70 +3289,146 @@ "additionalProperties": false }, { - "description": "1-ary tuple of a `CLType`.", + "description": "Stored contract referenced by the name of a named key in the signer's account context.", "type": "object", "required": [ - "Tuple1" + "StoredContractByName" ], "properties": { - "Tuple1": { - "type": "array", - "items": { - "$ref": "#/components/schemas/CLType" + "StoredContractByName": { + "type": "object", + "required": [ + "args", + "entry_point", + "name" + ], + "properties": { + "name": { + "description": "Name of the named key.", + "type": "string" + }, + "entry_point": { + "description": "Name of the entry point.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } }, - "maxItems": 1, - "minItems": 1 + "additionalProperties": false } }, "additionalProperties": false }, { - "description": "2-ary tuple of `CLType`s.", + "description": "Stored versioned contract referenced by its hash.", "type": "object", "required": [ - "Tuple2" + "StoredVersionedContractByHash" ], "properties": { - "Tuple2": { - "type": "array", - "items": { - "$ref": "#/components/schemas/CLType" + "StoredVersionedContractByHash": { + "type": "object", + "required": [ + "args", + "entry_point", + "hash" + ], + "properties": { + "hash": { + "description": "Contract package hash.", + "allOf": [ + { + "$ref": "#/components/schemas/PackageHash" + } + ] + }, + "version": { + "description": "Version of the contract to call; defaults to highest enabled version if unspecified.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "entry_point": { + "description": "Name of the entry point.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } }, - "maxItems": 2, - "minItems": 2 + "additionalProperties": false } }, "additionalProperties": false }, { - "description": "3-ary tuple of `CLType`s.", + "description": "Stored versioned contract referenced by the name of a named key in the signer's account context.", "type": "object", "required": [ - "Tuple3" + "StoredVersionedContractByName" ], "properties": { - "Tuple3": { - "type": "array", - "items": { - "$ref": "#/components/schemas/CLType" + "StoredVersionedContractByName": { + "type": "object", + "required": [ + "args", + "entry_point", + "name" + ], + "properties": { + "name": { + "description": "Name of the named key.", + "type": "string" + }, + "version": { + "description": "Version of the contract to call; defaults to highest enabled version if unspecified.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "entry_point": { + "description": "Name of the entry point.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" + } + ] + } }, - "maxItems": 3, - "minItems": 3 + "additionalProperties": false } }, "additionalProperties": false - }, - { - "description": "Unspecified type.", - "type": "string", - "enum": [ - "Any" - ] } ] }, - "DeployApproval": { - "description": "A struct containing a signature of a deploy hash and the public key of the signer.", + "AddressableEntityHash": { + "description": "The hex-encoded address of the addressable entity.", + "type": "string" + }, + "TransactionV1Approval": { + "description": "A struct containing a signature of a transaction hash and the public key of the signer.", "type": "object", "required": [ "signature", @@ -2360,9 +3444,36 @@ }, "additionalProperties": false }, - "Signature": { - "description": "Hex-encoded cryptographic signature, including the algorithm tag prefix.", - "type": "string" + "TransactionHash": { + "description": "A versioned wrapper for a transaction hash or deploy hash.", + "oneOf": [ + { + "description": "A deploy hash.", + "type": "object", + "required": [ + "Deploy" + ], + "properties": { + "Deploy": { + "$ref": "#/components/schemas/DeployHash" + } + }, + "additionalProperties": false + }, + { + "description": "A version 1 transaction hash.", + "type": "object", + "required": [ + "V1" + ], + "properties": { + "V1": { + "$ref": "#/components/schemas/TransactionV1Hash" + } + }, + "additionalProperties": false + } + ] }, "BlockHash": { "description": "Hex-encoded cryptographic hash of a block.", @@ -4155,10 +5266,6 @@ } } }, - "Bytes": { - "description": "Hex-encoded bytes.", - "type": "string" - }, "Contract": { "description": "Methods and type signatures supported by a contract.", "type": "object", @@ -4498,10 +5605,6 @@ } } }, - "PackageHash": { - "description": "The hex-encoded address of the Package.", - "type": "string" - }, "ByteCodeHash": { "description": "The hash address of the contract wasm", "type": "string" @@ -4619,10 +5722,6 @@ } } }, - "AddressableEntityHash": { - "description": "The hex-encoded address of the addressable entity.", - "type": "string" - }, "PackageStatus": { "description": "A enum to determine the lock status of the package.", "oneOf": [ diff --git a/types/src/block.rs b/types/src/block.rs index 36ebac501f..a0f3d3c35a 100644 --- a/types/src/block.rs +++ b/types/src/block.rs @@ -213,7 +213,7 @@ impl Block { Ok(block) } - /// Clones the header, put it in the versioning enum, and returns it. + /// Clones the header, put it in the versioning enum, and returns it. pub fn clone_header(&self) -> BlockHeader { match self { Block::V1(v1) => BlockHeader::V1(v1.header().clone()), @@ -317,14 +317,6 @@ impl Block { } } - /// Returns the deploy hashes within the block. - pub fn deploy_hashes(&self) -> &[DeployHash] { - match self { - Block::V1(v1) => v1.body.deploy_hashes(), - Block::V2(v2) => v2.body.deploy_hashes(), - } - } - /// Returns the era ID in which this block was created. pub fn era_id(&self) -> EraId { match self { @@ -333,7 +325,7 @@ impl Block { } } - /// Clones the era end, put it in the versioning enum, and returns it. + /// Clones the era end, put it in the versioning enum, and returns it. pub fn clone_era_end(&self) -> Option { match self { Block::V1(v1) => v1.header().era_end().cloned().map(EraEnd::V1), @@ -357,6 +349,14 @@ impl Block { } } + /// Returns the deploy hashes within the block. + pub fn deploy_hashes(&self) -> &[DeployHash] { + match self { + Block::V1(v1) => v1.body.deploy_hashes(), + Block::V2(v2) => v2.body.deploy_hashes(), + } + } + /// Returns the transfer hashes within the block. pub fn transfer_hashes(&self) -> &[DeployHash] { match self { diff --git a/types/src/execution/execution_result.rs b/types/src/execution/execution_result.rs index 5b57ea0280..478e00ac6d 100644 --- a/types/src/execution/execution_result.rs +++ b/types/src/execution/execution_result.rs @@ -26,6 +26,12 @@ pub enum ExecutionResult { V2(ExecutionResultV2), } +impl From for ExecutionResult { + fn from(value: ExecutionResultV1) -> Self { + ExecutionResult::V1(value) + } +} + impl From for ExecutionResult { fn from(value: ExecutionResultV2) -> Self { ExecutionResult::V2(value) diff --git a/types/src/transaction.rs b/types/src/transaction.rs index dcf14d42e1..ba5239d992 100644 --- a/types/src/transaction.rs +++ b/types/src/transaction.rs @@ -12,16 +12,24 @@ use core::fmt::{self, Debug, Display, Formatter}; #[cfg(feature = "datasize")] use datasize::DataSize; #[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use rand::Rng; +#[cfg(feature = "json-schema")] use schemars::JsonSchema; #[cfg(any(feature = "std", test))] use serde::{Deserialize, Serialize}; use tracing::error; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::testing::TestRng; use crate::{ account::AccountHash, bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, Digest, PublicKey, Timestamp, }; +#[cfg(feature = "json-schema")] +use crate::{account::ACCOUNT_HASH_LENGTH, SecretKey, TimeDiff, URef}; #[cfg(any(feature = "std", test))] use account_and_secret_key::AccountAndSecretKey; pub use deploy::{ @@ -49,6 +57,31 @@ pub use transaction_v1::{TransactionV1Builder, TransactionV1BuilderError}; const DEPLOY_TAG: u8 = 0; const V1_TAG: u8 = 1; +#[cfg(feature = "json-schema")] +pub(super) static TRANSACTION: Lazy = Lazy::new(|| { + let secret_key = SecretKey::example(); + let source = URef::from_formatted_str( + "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007", + ) + .unwrap(); + let target = URef::from_formatted_str( + "uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000", + ) + .unwrap(); + let to = Some(AccountHash::new([40; ACCOUNT_HASH_LENGTH])); + let id = Some(999); + + Transaction::V1(TransactionV1::build( + *Timestamp::example(), + TimeDiff::from_seconds(3_600), + PricingModeV1::GasPriceMultiplier(1), + String::from("casper-example"), + None, + TransactionV1Kind::new_transfer(source, target, 30_000_000_000_u64, to, id).unwrap(), + AccountAndSecretKey::SecretKey(secret_key), + )) +}); + /// A versioned wrapper for a transaction or deploy. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] #[cfg_attr( @@ -136,6 +169,23 @@ impl Transaction { .collect(), } } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &TRANSACTION + } + + /// Returns a random, valid but possibly expired transaction. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random(rng: &mut TestRng) -> Self { + if rng.gen() { + Transaction::Deploy(Deploy::random_valid_native_transfer(rng)) + } else { + Transaction::V1(TransactionV1::random(rng)) + } + } } impl From for Transaction { diff --git a/types/src/transaction/transaction_v1.rs b/types/src/transaction/transaction_v1.rs index 796a2813d2..09f81edb1c 100644 --- a/types/src/transaction/transaction_v1.rs +++ b/types/src/transaction/transaction_v1.rs @@ -24,8 +24,6 @@ use core::{ #[cfg(feature = "datasize")] use datasize::DataSize; -#[cfg(feature = "json-schema")] -use once_cell::sync::Lazy; #[cfg(any(feature = "once_cell", test))] use once_cell::sync::OnceCell; #[cfg(feature = "json-schema")] @@ -36,12 +34,8 @@ use tracing::debug; #[cfg(any(feature = "std", test))] use super::AccountAndSecretKey; -#[cfg(feature = "json-schema")] -use crate::account::{AccountHash, ACCOUNT_HASH_LENGTH}; #[cfg(any(all(feature = "std", feature = "testing"), test))] use crate::testing::TestRng; -#[cfg(feature = "json-schema")] -use crate::URef; use crate::{ bytesrepr::{self, FromBytes, ToBytes}, crypto, CLTyped, CLValueError, Digest, DisplayIter, PublicKey, RuntimeArgs, SecretKey, @@ -68,31 +62,6 @@ pub use transaction_v1_header::TransactionV1Header; pub use transaction_v1_kind::TransactionV1Kind; pub use userland_transaction_v1::UserlandTransactionV1; -#[cfg(feature = "json-schema")] -static TRANSACTION: Lazy = Lazy::new(|| { - let secret_key = SecretKey::example(); - let source = URef::from_formatted_str( - "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007", - ) - .unwrap(); - let target = URef::from_formatted_str( - "uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000", - ) - .unwrap(); - let to = Some(AccountHash::new([40; ACCOUNT_HASH_LENGTH])); - let id = Some(999); - - TransactionV1::build( - *Timestamp::example(), - TimeDiff::from_seconds(3_600), - PricingModeV1::GasPriceMultiplier(1), - String::from("casper-example"), - None, - TransactionV1Kind::new_transfer(source, target, 30_000_000_000_u64, to, id).unwrap(), - AccountAndSecretKey::SecretKey(secret_key), - ) -}); - /// A unit of work sent by a client to the network, which when executed can cause global state to /// be altered. /// @@ -130,7 +99,7 @@ pub struct TransactionV1 { impl TransactionV1 { /// Called by the `TransactionBuilder` to construct a new `TransactionV1`. #[cfg(any(feature = "std", test))] - fn build( + pub(super) fn build( timestamp: Timestamp, ttl: TimeDiff, pricing_mode: PricingModeV1, @@ -413,13 +382,6 @@ impl TransactionV1 { TestTransactionV1Builder::new(rng).build() } - // This method is not intended to be used by third party crates. - #[doc(hidden)] - #[cfg(feature = "json-schema")] - pub fn example() -> &'static Self { - &TRANSACTION - } - /// Turns `self` into an invalid transaction by clearing the `chain_name`, invalidating the /// transaction header hash. #[cfg(any(all(feature = "std", feature = "testing"), test))]