From b184f880973abec33e909a8be1e8360f7ba00e2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Papierski?= Date: Tue, 9 May 2023 11:32:18 +0200 Subject: [PATCH 01/41] Cherry-picked wasmi upgrade to 0.13.2 This wasmi version deprecates allocating `MemoryInstance::get` that always allocates and copies data back and forth between VM linear memory and the heap on the host. With this commit host does not allocate data, but is deserializing data straight from a linear memory without allocating. Copies are made only when absolutely necessary. There's also update of casper-wasm-utils crate in this commit. --- Cargo.lock | 92 +++++------- execution_engine/Cargo.toml | 6 +- execution_engine/src/core/runtime/args.rs | 134 +++++++++--------- .../src/core/runtime/externals.rs | 28 ++-- execution_engine/src/core/runtime/mod.rs | 125 ++++++++++------ 5 files changed, 203 insertions(+), 182 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index de33d13ada..491c3c9ce8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -412,7 +412,7 @@ dependencies = [ "humantime", "lmdb-rkv", "log", - "num-rational 0.4.1", + "num-rational", "num-traits", "once_cell", "rand 0.8.5", @@ -441,7 +441,7 @@ dependencies = [ "gh-1470-regression", "gh-1470-regression-call", "log", - "num-rational 0.4.1", + "num-rational", "num-traits", "once_cell", "parity-wasm 0.41.0", @@ -480,11 +480,11 @@ dependencies = [ "log", "num", "num-derive", - "num-rational 0.4.1", + "num-rational", "num-traits", "num_cpus", "once_cell", - "parity-wasm 0.42.2", + "parity-wasm 0.45.0", "proptest", "rand 0.8.5", "rand_chacha 0.3.1", @@ -587,7 +587,7 @@ dependencies = [ "log", "num", "num-derive", - "num-rational 0.4.1", + "num-rational", "num-traits", "num_cpus", "once_cell", @@ -660,7 +660,7 @@ dependencies = [ "num", "num-derive", "num-integer", - "num-rational 0.4.1", + "num-rational", "num-traits", "once_cell", "openssl", @@ -710,13 +710,13 @@ dependencies = [ [[package]] name = "casper-wasm-utils" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9c4208106e8a95a83ab3cb5f4e800114bfc101df9e7cb8c2160c7e298c6397" +checksum = "b49e4ef1382d48c312809fe8f09d0c7beb434a74f5026c5f12efe384df51ca42" dependencies = [ "byteorder", "log", - "parity-wasm 0.42.2", + "parity-wasm 0.45.0", ] [[package]] @@ -1679,7 +1679,7 @@ version = "3.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a62bb1df8b45ecb7ffa78dca1c17a438fb193eb083db0b1b494d2a61bcb5096a" dependencies = [ - "num-bigint 0.4.3", + "num-bigint", "num-traits", "proc-macro2 1.0.56", "quote 1.0.26", @@ -2774,12 +2774,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "memory_units" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" - [[package]] name = "memory_units" version = "0.4.0" @@ -2955,22 +2949,11 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" dependencies = [ - "num-bigint 0.4.3", + "num-bigint", "num-complex", "num-integer", "num-iter", - "num-rational 0.4.1", - "num-traits", -] - -[[package]] -name = "num-bigint" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" -dependencies = [ - "autocfg", - "num-integer", + "num-rational", "num-traits", ] @@ -3026,18 +3009,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-rational" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" -dependencies = [ - "autocfg", - "num-bigint 0.2.6", - "num-integer", - "num-traits", -] - [[package]] name = "num-rational" version = "0.4.1" @@ -3045,7 +3016,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" dependencies = [ "autocfg", - "num-bigint 0.4.3", + "num-bigint", "num-integer", "num-traits", "serde", @@ -3197,9 +3168,9 @@ checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" [[package]] name = "parity-wasm" -version = "0.42.2" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" +checksum = "e1ad0aff30c1da14b1254fcb2af73e1fa9a28670e584a626f53a369d0e157304" [[package]] name = "parking_lot" @@ -5454,26 +5425,35 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.9.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca00c5147c319a8ec91ec1a0edbec31e566ce2c9cc93b3f9bb86a9efd0eb795d" +checksum = "06c326c93fbf86419608361a2c925a31754cf109da1b8b55737070b4d6669422" dependencies = [ - "downcast-rs", - "libc", - "memory_units 0.3.0", - "num-rational 0.2.4", - "num-traits", - "parity-wasm 0.42.2", + "parity-wasm 0.45.0", "wasmi-validation", + "wasmi_core", ] [[package]] name = "wasmi-validation" -version = "0.4.1" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ff416ad1ff0c42e5a926ed5d5fab74c0f098749aa0ad8b2a34b982ce0e867b" +dependencies = [ + "parity-wasm 0.45.0", +] + +[[package]] +name = "wasmi_core" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "165343ecd6c018fc09ebcae280752702c9a2ef3e6f8d02f1cfcbdb53ef6d7937" +checksum = "57d20cb3c59b788653d99541c646c561c9dd26506f25c0cebfe810659c54c6d7" dependencies = [ - "parity-wasm 0.42.2", + "downcast-rs", + "libm", + "memory_units", + "num-rational", + "num-traits", ] [[package]] @@ -5521,7 +5501,7 @@ checksum = "dbb3b5a6b2bb17cb6ad44a2e68a43e8d2722c997da10e928665c72ec6c0a0b8e" dependencies = [ "cfg-if 0.1.10", "libc", - "memory_units 0.4.0", + "memory_units", "winapi", ] diff --git a/execution_engine/Cargo.toml b/execution_engine/Cargo.toml index 90dc2afd7e..686adc1b4e 100644 --- a/execution_engine/Cargo.toml +++ b/execution_engine/Cargo.toml @@ -16,7 +16,7 @@ base16 = "0.2.1" bincode = "1.3.1" casper-hashing = { version = "2.0.0", path = "../hashing" } casper-types = { version = "3.0.0", path = "../types", default-features = false, features = ["datasize", "gens", "json-schema"] } -casper-wasm-utils = "1.0.0" +casper-wasm-utils = "2.0.0" datasize = "0.2.4" either = "1.8.1" hex_fmt = "0.3.0" @@ -34,7 +34,7 @@ num-rational = { version = "0.4.0", features = ["serde"] } num-traits = "0.2.10" num_cpus = "1" once_cell = "1.5.2" -parity-wasm = { version = "0.42", default-features = false } +parity-wasm = { version = "0.45.0", default-features = false } proptest = { version = "1.0.0", optional = true } rand = "0.8.3" rand_chacha = "0.3.0" @@ -47,7 +47,7 @@ thiserror = "1.0.18" tracing = "0.1.18" uint = "0.9.0" uuid = { version = "0.8.1", features = ["serde", "v4"] } -wasmi = "0.9.1" +wasmi = "0.13.2" [dev-dependencies] assert_matches = "1.3.0" diff --git a/execution_engine/src/core/runtime/args.rs b/execution_engine/src/core/runtime/args.rs index 988890adb9..17af96a8c0 100644 --- a/execution_engine/src/core/runtime/args.rs +++ b/execution_engine/src/core/runtime/args.rs @@ -1,4 +1,4 @@ -use wasmi::{FromRuntimeValue, RuntimeArgs, Trap}; +use wasmi::{FromValue, RuntimeArgs, Trap}; pub(crate) trait Args where @@ -9,7 +9,7 @@ where impl Args for (T1,) where - T1: FromRuntimeValue + Sized, + T1: FromValue + Sized, { fn parse(args: RuntimeArgs) -> Result { let a0: T1 = args.nth_checked(0)?; @@ -19,8 +19,8 @@ where impl Args for (T1, T2) where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, + T1: FromValue + Sized, + T2: FromValue + Sized, { fn parse(args: RuntimeArgs) -> Result { let a0: T1 = args.nth_checked(0)?; @@ -31,9 +31,9 @@ where impl Args for (T1, T2, T3) where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, { fn parse(args: RuntimeArgs) -> Result { let a0: T1 = args.nth_checked(0)?; @@ -45,10 +45,10 @@ where impl Args for (T1, T2, T3, T4) where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, { fn parse(args: RuntimeArgs) -> Result { let a0: T1 = args.nth_checked(0)?; @@ -61,11 +61,11 @@ where impl Args for (T1, T2, T3, T4, T5) where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, - T5: FromRuntimeValue + Sized, + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, + T5: FromValue + Sized, { fn parse(args: RuntimeArgs) -> Result { let a0: T1 = args.nth_checked(0)?; @@ -79,12 +79,12 @@ where impl Args for (T1, T2, T3, T4, T5, T6) where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, - T5: FromRuntimeValue + Sized, - T6: FromRuntimeValue + Sized, + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, + T5: FromValue + Sized, + T6: FromValue + Sized, { fn parse(args: RuntimeArgs) -> Result { let a0: T1 = args.nth_checked(0)?; @@ -99,13 +99,13 @@ where impl Args for (T1, T2, T3, T4, T5, T6, T7) where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, - T5: FromRuntimeValue + Sized, - T6: FromRuntimeValue + Sized, - T7: FromRuntimeValue + Sized, + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, + T5: FromValue + Sized, + T6: FromValue + Sized, + T7: FromValue + Sized, { fn parse(args: RuntimeArgs) -> Result { let a0: T1 = args.nth_checked(0)?; @@ -121,14 +121,14 @@ where impl Args for (T1, T2, T3, T4, T5, T6, T7, T8) where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, - T5: FromRuntimeValue + Sized, - T6: FromRuntimeValue + Sized, - T7: FromRuntimeValue + Sized, - T8: FromRuntimeValue + Sized, + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, + T5: FromValue + Sized, + T6: FromValue + Sized, + T7: FromValue + Sized, + T8: FromValue + Sized, { fn parse(args: RuntimeArgs) -> Result { let a0: T1 = args.nth_checked(0)?; @@ -145,15 +145,15 @@ where impl Args for (T1, T2, T3, T4, T5, T6, T7, T8, T9) where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, - T5: FromRuntimeValue + Sized, - T6: FromRuntimeValue + Sized, - T7: FromRuntimeValue + Sized, - T8: FromRuntimeValue + Sized, - T9: FromRuntimeValue + Sized, + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, + T5: FromValue + Sized, + T6: FromValue + Sized, + T7: FromValue + Sized, + T8: FromValue + Sized, + T9: FromValue + Sized, { fn parse(args: RuntimeArgs) -> Result { let a0: T1 = args.nth_checked(0)?; @@ -171,16 +171,16 @@ where impl Args for (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, - T5: FromRuntimeValue + Sized, - T6: FromRuntimeValue + Sized, - T7: FromRuntimeValue + Sized, - T8: FromRuntimeValue + Sized, - T9: FromRuntimeValue + Sized, - T10: FromRuntimeValue + Sized, + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, + T5: FromValue + Sized, + T6: FromValue + Sized, + T7: FromValue + Sized, + T8: FromValue + Sized, + T9: FromValue + Sized, + T10: FromValue + Sized, { fn parse(args: RuntimeArgs) -> Result { let a0: T1 = args.nth_checked(0)?; @@ -200,17 +200,17 @@ where impl Args for (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, - T5: FromRuntimeValue + Sized, - T6: FromRuntimeValue + Sized, - T7: FromRuntimeValue + Sized, - T8: FromRuntimeValue + Sized, - T9: FromRuntimeValue + Sized, - T10: FromRuntimeValue + Sized, - T11: FromRuntimeValue + Sized, + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, + T5: FromValue + Sized, + T6: FromValue + Sized, + T7: FromValue + Sized, + T8: FromValue + Sized, + T9: FromValue + Sized, + T10: FromValue + Sized, + T11: FromValue + Sized, { fn parse(args: RuntimeArgs) -> Result { let a0: T1 = args.nth_checked(0)?; diff --git a/execution_engine/src/core/runtime/externals.rs b/execution_engine/src/core/runtime/externals.rs index 570246502e..369883fa6a 100644 --- a/execution_engine/src/core/runtime/externals.rs +++ b/execution_engine/src/core/runtime/externals.rs @@ -320,15 +320,15 @@ where )?; let account_hash: AccountHash = { let bytes = self.bytes_from_mem(key_ptr, key_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? + bytesrepr::deserialize_from_slice(bytes).map_err(Error::BytesRepr)? }; let amount: U512 = { let bytes = self.bytes_from_mem(amount_ptr, amount_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? + bytesrepr::deserialize_from_slice(bytes).map_err(Error::BytesRepr)? }; let id: Option = { let bytes = self.bytes_from_mem(id_ptr, id_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? + bytesrepr::deserialize_from_slice(bytes).map_err(Error::BytesRepr)? }; let ret = match self.transfer_to_account(account_hash, amount, id)? { @@ -382,19 +382,19 @@ where )?; let source_purse = { let bytes = self.bytes_from_mem(source_ptr, source_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? + bytesrepr::deserialize_from_slice(bytes).map_err(Error::BytesRepr)? }; let account_hash: AccountHash = { let bytes = self.bytes_from_mem(key_ptr, key_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? + bytesrepr::deserialize_from_slice(bytes).map_err(Error::BytesRepr)? }; let amount: U512 = { let bytes = self.bytes_from_mem(amount_ptr, amount_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? + bytesrepr::deserialize_from_slice(bytes).map_err(Error::BytesRepr)? }; let id: Option = { let bytes = self.bytes_from_mem(id_ptr, id_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? + bytesrepr::deserialize_from_slice(bytes).map_err(Error::BytesRepr)? }; let ret = match self.transfer_from_purse_to_account( source_purse, @@ -695,13 +695,13 @@ where self.t_from_mem(entry_point_name_ptr, entry_point_name_size)?; let args_bytes: Vec = { let args_size: u32 = args_size; - self.bytes_from_mem(args_ptr, args_size as usize)? + self.bytes_from_mem(args_ptr, args_size as usize)?.to_vec() }; let ret = self.call_contract_host_buffer( contract_hash, &entry_point_name, - args_bytes, + &args_bytes, result_size_ptr, )?; Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) @@ -751,14 +751,14 @@ where self.t_from_mem(entry_point_name_ptr, entry_point_name_size)?; let args_bytes: Vec = { let args_size: u32 = args_size; - self.bytes_from_mem(args_ptr, args_size as usize)? + self.bytes_from_mem(args_ptr, args_size as usize)?.to_vec() }; let ret = self.call_versioned_contract_host_buffer( contract_package_hash, contract_version, entry_point_name, - args_bytes, + &args_bytes, result_size_ptr, )?; Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) @@ -882,8 +882,10 @@ where &host_function_costs.blake2b, [in_ptr, in_size, out_ptr, out_size], )?; - let input: Vec = self.bytes_from_mem(in_ptr, in_size as usize)?; - let digest = crypto::blake2b(input); + let digest = + self.checked_memory_slice(in_ptr as usize, in_size as usize, |input| { + crypto::blake2b(input) + })?; let result = if digest.len() != out_size as usize { Err(ApiError::BufferTooSmall) diff --git a/execution_engine/src/core/runtime/mod.rs b/execution_engine/src/core/runtime/mod.rs index 3486ed88c0..f9c1edc655 100644 --- a/execution_engine/src/core/runtime/mod.rs +++ b/execution_engine/src/core/runtime/mod.rs @@ -18,7 +18,7 @@ use std::{ use parity_wasm::elements::Module; use tracing::error; -use wasmi::{MemoryRef, Trap, TrapKind}; +use wasmi::{MemoryRef, Trap, TrapCode}; use casper_types::{ account::{Account, AccountHash, ActionType, Weight}, @@ -190,37 +190,76 @@ where self.context.charge_system_contract_call(amount) } + fn checked_memory_slice( + &self, + offset: usize, + size: usize, + func: impl FnOnce(&[u8]) -> Ret, + ) -> Result { + // This is mostly copied from a private function `MemoryInstance::checked_memory_region` + // that calls a user defined function with a validated slice of memory. This allows + // usage patterns that does not involve copying data onto heap first i.e. deserialize + // values without copying data first, etc. + // NOTE: Depending on the VM backend used in future, this may change, as not all VMs may + // support direct memory access. + self.try_get_memory()? + .with_direct_access(|buffer| { + let end = offset.checked_add(size).ok_or_else(|| { + wasmi::Error::Memory(format!( + "trying to access memory block of size {} from offset {}", + size, offset + )) + })?; + + if end > buffer.len() { + return Err(wasmi::Error::Memory(format!( + "trying to access region [{}..{}] in memory [0..{}]", + offset, + end, + buffer.len(), + ))); + } + + Ok(func(&buffer[offset..end])) + }) + .map_err(Into::into) + } + /// Returns bytes from the WASM memory instance. + #[inline] fn bytes_from_mem(&self, ptr: u32, size: usize) -> Result, Error> { - self.try_get_memory()?.get(ptr, size).map_err(Into::into) + self.checked_memory_slice(ptr as usize, size, |data| data.to_vec()) } /// Returns a deserialized type from the WASM memory instance. + #[inline] fn t_from_mem(&self, ptr: u32, size: u32) -> Result { - let bytes = self.bytes_from_mem(ptr, size as usize)?; - bytesrepr::deserialize(bytes).map_err(Into::into) + let result = self.checked_memory_slice(ptr as usize, size as usize, |data| { + bytesrepr::deserialize_from_slice(data) + })?; + Ok(result?) } /// Reads key (defined as `key_ptr` and `key_size` tuple) from Wasm memory. + #[inline] fn key_from_mem(&mut self, key_ptr: u32, key_size: u32) -> Result { - let bytes = self.bytes_from_mem(key_ptr, key_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Into::into) + self.t_from_mem(key_ptr, key_size) } /// Reads `CLValue` (defined as `cl_value_ptr` and `cl_value_size` tuple) from Wasm memory. + #[inline] fn cl_value_from_mem( &mut self, cl_value_ptr: u32, cl_value_size: u32, ) -> Result { - let bytes = self.bytes_from_mem(cl_value_ptr, cl_value_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Into::into) + self.t_from_mem(cl_value_ptr, cl_value_size) } /// Returns a deserialized string from the WASM memory instance. + #[inline] fn string_from_mem(&self, ptr: u32, size: u32) -> Result { - let bytes = self.bytes_from_mem(ptr, size as usize)?; - bytesrepr::deserialize(bytes).map_err(|e| Error::BytesRepr(e).into()) + self.t_from_mem(ptr, size).map_err(Trap::from) } fn get_module_from_entry_points( @@ -235,8 +274,7 @@ where #[allow(clippy::wrong_self_convention)] fn is_valid_uref(&self, uref_ptr: u32, uref_size: u32) -> Result { - let bytes = self.bytes_from_mem(uref_ptr, uref_size as usize)?; - let uref: URef = bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)?; + let uref: URef = self.t_from_mem(uref_ptr, uref_size)?; Ok(self.context.validate_uref(&uref).is_ok()) } @@ -444,18 +482,15 @@ where /// type is `Trap`, indicating that this function will always kill the current Wasm instance. fn ret(&mut self, value_ptr: u32, value_size: usize) -> Trap { self.host_buffer = None; - let memory = match self.try_get_memory() { - Ok(memory) => memory, - Err(error) => return Trap::from(error), - }; - let mem_get = memory - .get(value_ptr, value_size) - .map_err(|e| Error::Interpreter(e.into())); + + let mem_get = + self.checked_memory_slice(value_ptr as usize, value_size, |data| data.to_vec()); + match mem_get { Ok(buf) => { // Set the result field in the runtime and return the proper element of the `Error` // enum indicating that the reason for exiting the module was a call to ret. - self.host_buffer = bytesrepr::deserialize(buf).ok(); + self.host_buffer = bytesrepr::deserialize_from_slice(buf).ok(); let urefs = match &self.host_buffer { Some(buf) => utils::extract_urefs(buf), @@ -1416,14 +1451,14 @@ where &mut self, contract_hash: ContractHash, entry_point_name: &str, - args_bytes: Vec, + args_bytes: &[u8], result_size_ptr: u32, ) -> Result, Error> { // Exit early if the host buffer is already occupied if let Err(err) = self.check_host_buffer() { return Ok(Err(err)); } - let args: RuntimeArgs = bytesrepr::deserialize(args_bytes)?; + let args: RuntimeArgs = bytesrepr::deserialize_from_slice(args_bytes)?; let result = self.call_contract(contract_hash, entry_point_name, args)?; self.manage_call_contract_host_buffer(result_size_ptr, result) } @@ -1433,14 +1468,14 @@ where contract_package_hash: ContractPackageHash, contract_version: Option, entry_point_name: String, - args_bytes: Vec, + args_bytes: &[u8], result_size_ptr: u32, ) -> Result, Error> { // Exit early if the host buffer is already occupied if let Err(err) = self.check_host_buffer() { return Ok(Err(err)); } - let args: RuntimeArgs = bytesrepr::deserialize(args_bytes)?; + let args: RuntimeArgs = bytesrepr::deserialize_from_slice(args_bytes)?; let result = self.call_versioned_contract( contract_package_hash, contract_version, @@ -1912,7 +1947,7 @@ where let source_serialized = self.bytes_from_mem(account_hash_ptr, account_hash_size)?; // Account hash deserialized let source: AccountHash = - bytesrepr::deserialize(source_serialized).map_err(Error::BytesRepr)?; + bytesrepr::deserialize_from_slice(source_serialized).map_err(Error::BytesRepr)?; source }; let weight = Weight::new(weight_value); @@ -1939,7 +1974,7 @@ where let source_serialized = self.bytes_from_mem(account_hash_ptr, account_hash_size)?; // Account hash deserialized let source: AccountHash = - bytesrepr::deserialize(source_serialized).map_err(Error::BytesRepr)?; + bytesrepr::deserialize_from_slice(source_serialized).map_err(Error::BytesRepr)?; source }; match self.context.remove_associated_key(account_hash) { @@ -1960,7 +1995,7 @@ where let source_serialized = self.bytes_from_mem(account_hash_ptr, account_hash_size)?; // Account hash deserialized let source: AccountHash = - bytesrepr::deserialize(source_serialized).map_err(Error::BytesRepr)?; + bytesrepr::deserialize_from_slice(source_serialized).map_err(Error::BytesRepr)?; source }; let weight = Weight::new(weight_value); @@ -1991,7 +2026,7 @@ where Err(e) => Err(e.into()), } } - Err(_) => Err(Trap::new(TrapKind::Unreachable)), + Err(_) => Err(Trap::Code(TrapCode::Unreachable)), } } @@ -2280,22 +2315,22 @@ where ) -> Result, Error> { let source: URef = { let bytes = self.bytes_from_mem(source_ptr, source_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? + bytesrepr::deserialize_from_slice(bytes).map_err(Error::BytesRepr)? }; let target: URef = { let bytes = self.bytes_from_mem(target_ptr, target_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? + bytesrepr::deserialize_from_slice(bytes).map_err(Error::BytesRepr)? }; let amount: U512 = { let bytes = self.bytes_from_mem(amount_ptr, amount_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? + bytesrepr::deserialize_from_slice(bytes).map_err(Error::BytesRepr)? }; let id: Option = { let bytes = self.bytes_from_mem(id_ptr, id_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? + bytesrepr::deserialize_from_slice(bytes).map_err(Error::BytesRepr)? }; self.context.validate_uref(&source)?; @@ -2333,7 +2368,7 @@ where let purse: URef = { let bytes = self.bytes_from_mem(purse_ptr, purse_size)?; - match bytesrepr::deserialize(bytes) { + match bytesrepr::deserialize_from_slice(bytes) { Ok(purse) => purse, Err(error) => return Ok(Err(error.into())), } @@ -2744,13 +2779,13 @@ where } let uref: URef = self.t_from_mem(uref_ptr, uref_size)?; - let dictionary_item_key_bytes = self.bytes_from_mem( - dictionary_item_key_bytes_ptr, + let dictionary_item_key = self.checked_memory_slice( + dictionary_item_key_bytes_ptr as usize, dictionary_item_key_bytes_size as usize, + |utf8_bytes| std::str::from_utf8(utf8_bytes).map(ToOwned::to_owned), )?; - let dictionary_item_key = if let Ok(item_key) = String::from_utf8(dictionary_item_key_bytes) - { + let dictionary_item_key = if let Ok(item_key) = dictionary_item_key { item_key } else { return Ok(Err(ApiError::InvalidDictionaryItemKey)); @@ -2824,12 +2859,16 @@ where value_size: u32, ) -> Result, Trap> { let uref: URef = self.t_from_mem(uref_ptr, uref_size)?; - let dictionary_item_key_bytes = self.bytes_from_mem(key_ptr, key_size as usize)?; - if dictionary_item_key_bytes.len() > DICTIONARY_ITEM_KEY_MAX_LENGTH { - return Ok(Err(ApiError::DictionaryItemKeyExceedsLength)); - } - let dictionary_item_key = if let Ok(item_key) = String::from_utf8(dictionary_item_key_bytes) - { + let dictionary_item_key_bytes = { + if (key_size as usize) > DICTIONARY_ITEM_KEY_MAX_LENGTH { + return Ok(Err(ApiError::DictionaryItemKeyExceedsLength)); + } + self.checked_memory_slice(key_ptr as usize, key_size as usize, |data| { + std::str::from_utf8(data).map(ToOwned::to_owned) + })? + }; + + let dictionary_item_key = if let Ok(item_key) = dictionary_item_key_bytes { item_key } else { return Ok(Err(ApiError::InvalidDictionaryItemKey)); From 11c455ba46cebbb0c3d979accfcb7653ffb885da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Papierski?= Date: Wed, 5 Jul 2023 18:01:23 +0200 Subject: [PATCH 02/41] Add a [patch] entry to use fixed parity-wasm. This also fixes issue for parity-wasm being pulled by wasmi, and with this commit parity-wasm is pulled from git across the board. --- Cargo.lock | 19 ++++++------------- Cargo.toml | 3 +++ execution_engine_testing/tests/Cargo.toml | 2 +- 3 files changed, 10 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 491c3c9ce8..ee60b77051 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -444,7 +444,7 @@ dependencies = [ "num-rational", "num-traits", "once_cell", - "parity-wasm 0.41.0", + "parity-wasm", "rand 0.8.5", "regex", "serde", @@ -484,7 +484,7 @@ dependencies = [ "num-traits", "num_cpus", "once_cell", - "parity-wasm 0.45.0", + "parity-wasm", "proptest", "rand 0.8.5", "rand_chacha 0.3.1", @@ -716,7 +716,7 @@ checksum = "b49e4ef1382d48c312809fe8f09d0c7beb434a74f5026c5f12efe384df51ca42" dependencies = [ "byteorder", "log", - "parity-wasm 0.45.0", + "parity-wasm", ] [[package]] @@ -3160,17 +3160,10 @@ dependencies = [ "casper-types", ] -[[package]] -name = "parity-wasm" -version = "0.41.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" - [[package]] name = "parity-wasm" version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1ad0aff30c1da14b1254fcb2af73e1fa9a28670e584a626f53a369d0e157304" +source = "git+https://github.com/casper-network/casper-wasm.git?branch=casper-0.45.0#49752a84f34d2f8748133cdd95e3064d1158b0af" [[package]] name = "parking_lot" @@ -5429,7 +5422,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06c326c93fbf86419608361a2c925a31754cf109da1b8b55737070b4d6669422" dependencies = [ - "parity-wasm 0.45.0", + "parity-wasm", "wasmi-validation", "wasmi_core", ] @@ -5440,7 +5433,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ff416ad1ff0c42e5a926ed5d5fab74c0f098749aa0ad8b2a34b982ce0e867b" dependencies = [ - "parity-wasm 0.45.0", + "parity-wasm", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 3c4773e543..ec6b18c2dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,3 +41,6 @@ lto = true [profile.bench] codegen-units = 1 lto = true + +[patch.crates-io] +parity-wasm = { git = "https://github.com/casper-network/casper-wasm.git", branch = "casper-0.45.0" } diff --git a/execution_engine_testing/tests/Cargo.toml b/execution_engine_testing/tests/Cargo.toml index 1246c0e444..e9aee94e1f 100644 --- a/execution_engine_testing/tests/Cargo.toml +++ b/execution_engine_testing/tests/Cargo.toml @@ -13,7 +13,7 @@ casper-types = { path = "../../types", features = ["datasize", "json-schema"] } clap = "2" fs_extra = "1.2.0" log = "0.4.8" -parity-wasm = "0.41.0" +parity-wasm = "0.45.0" rand = "0.8.3" serde = "1" serde_json = "1" From dc2f8a20b38d823693fcede7a39d715f0e72a7e5 Mon Sep 17 00:00:00 2001 From: Alexandru Sardan Date: Tue, 11 Jul 2023 18:27:43 +0000 Subject: [PATCH 03/41] Add test for network recovery after at least 1/3 validators go down If more than 1/3 of the validator weight leaves the network, the network will stop producing blocks. When validators join back again such that more than 2/3 of weight is reached, the network should start producing blocks again. Add a test that checks this scenario. Signed-off-by: Alexandru Sardan --- ci/nightly-test.sh | 1 + ...lidators_disconnect.accounts.toml.override | 154 ++++++++++++++++++ ...idators_disconnect.chainspec.toml.override | 2 + utils/nctl/sh/scenarios/common/itst.sh | 8 + .../sh/scenarios/validators_disconnect.sh | 101 ++++++++++++ 5 files changed, 266 insertions(+) create mode 100644 utils/nctl/sh/scenarios/accounts_toml/validators_disconnect.accounts.toml.override create mode 100644 utils/nctl/sh/scenarios/chainspecs/validators_disconnect.chainspec.toml.override create mode 100644 utils/nctl/sh/scenarios/validators_disconnect.sh diff --git a/ci/nightly-test.sh b/ci/nightly-test.sh index de40d55e5d..9ad96c5b1b 100755 --- a/ci/nightly-test.sh +++ b/ci/nightly-test.sh @@ -114,6 +114,7 @@ start_run_teardown "sync_test.sh timeout=500" start_run_teardown "gov96.sh" start_run_teardown "swap_validator_set.sh" start_run_teardown "sync_upgrade_test.sh node=6 era=5 timeout=500" +start_run_teardown "validators_disconnect.sh" # Without start_run_teardown - these ones perform their own assets setup, network start and teardown source "$SCENARIOS_DIR/upgrade_after_emergency_upgrade_test_pre_1.5.sh" source "$SCENARIOS_DIR/regression_3976.sh" diff --git a/utils/nctl/sh/scenarios/accounts_toml/validators_disconnect.accounts.toml.override b/utils/nctl/sh/scenarios/accounts_toml/validators_disconnect.accounts.toml.override new file mode 100644 index 0000000000..322951c407 --- /dev/null +++ b/utils/nctl/sh/scenarios/accounts_toml/validators_disconnect.accounts.toml.override @@ -0,0 +1,154 @@ +# FAUCET. +[[accounts]] +public_key = "PBK_FAUCET" +balance = "1000000000000000000000000000000000" + +# VALIDATOR 1. +[[accounts]] +public_key = "PBK_V1" +balance = "1000000000000000000000000000000000" + +[accounts.validator] +bonded_amount = "1000" +delegation_rate = 1 + +# VALIDATOR 2. +[[accounts]] +public_key = "PBK_V2" +balance = "1000000000000000000000000000000000" + +[accounts.validator] +bonded_amount = "1000" +delegation_rate = 1 + +# VALIDATOR 3. +[[accounts]] +public_key = "PBK_V3" +balance = "1000000000000000000000000000000000" + +[accounts.validator] +bonded_amount = "1000" +delegation_rate = 1 + +# VALIDATOR 4. +[[accounts]] +public_key = "PBK_V4" +balance = "1000000000000000000000000000000000" + +[accounts.validator] +bonded_amount = "1000" +delegation_rate = 1 + +# VALIDATOR 5. +[[accounts]] +public_key = "PBK_V5" +balance = "1000000000000000000000000000000000" + +[accounts.validator] +bonded_amount = "1000" +delegation_rate = 1 + +# VALIDATOR 6. +[[accounts]] +public_key = "PBK_V6" +balance = "1000000000000000000000000000000000" + +[accounts.validator] +bonded_amount = "1000" +delegation_rate = 1 + +# VALIDATOR 7. +[[accounts]] +public_key = "PBK_V7" +balance = "1000000000000000000000000000000000" + +[accounts.validator] +bonded_amount = "1000" +delegation_rate = 1 + +# VALIDATOR 8. +[[accounts]] +public_key = "PBK_V8" +balance = "1000000000000000000000000000000000" + +[accounts.validator] +bonded_amount = "1000" +delegation_rate = 1 + +# VALIDATOR 9. +[[accounts]] +public_key = "PBK_V9" +balance = "1000000000000000000000000000000000" + +[accounts.validator] +bonded_amount = "1000" +delegation_rate = 1 + +# VALIDATOR 10. +[[accounts]] +public_key = "PBK_V10" +balance = "1000000000000000000000000000000000" + +[accounts.validator] +bonded_amount = "1000" +delegation_rate = 1 + +# USER 1. +[[delegators]] +validator_public_key = "PBK_V1" +delegator_public_key = "PBK_U1" +balance = "1000000000000000000000000000000000" +delegated_amount = "1" + +# USER 2. +[[delegators]] +validator_public_key = "PBK_V2" +delegator_public_key = "PBK_U2" +balance = "1000000000000000000000000000000000" +delegated_amount = "1" + +# USER 3. +[[delegators]] +validator_public_key = "PBK_V3" +delegator_public_key = "PBK_U3" +balance = "1000000000000000000000000000000000" +delegated_amount = "1" + +# USER 4. +[[delegators]] +validator_public_key = "PBK_V4" +delegator_public_key = "PBK_U4" +balance = "1000000000000000000000000000000000" +delegated_amount = "1" + +# USER 5. +[[delegators]] +validator_public_key = "PBK_V5" +delegator_public_key = "PBK_U5" +balance = "1000000000000000000000000000000000" +delegated_amount = "1" + +# USER 6. +[[accounts]] +public_key = "PBK_U6" +balance = "1000000000000000000000000000000000" + +# USER 7. +[[accounts]] +public_key = "PBK_U7" +balance = "1000000000000000000000000000000000" + +# USER 8. +[[accounts]] +public_key = "PBK_U8" +balance = "1000000000000000000000000000000000" + +# USER 9. +[[accounts]] +public_key = "PBK_U9" +balance = "1000000000000000000000000000000000" + +# USER 10. +[[accounts]] +public_key = "PBK_U10" +balance = "1000000000000000000000000000000000" diff --git a/utils/nctl/sh/scenarios/chainspecs/validators_disconnect.chainspec.toml.override b/utils/nctl/sh/scenarios/chainspecs/validators_disconnect.chainspec.toml.override new file mode 100644 index 0000000000..b967865e40 --- /dev/null +++ b/utils/nctl/sh/scenarios/chainspecs/validators_disconnect.chainspec.toml.override @@ -0,0 +1,2 @@ +[core] +validator_slots = 10 \ No newline at end of file diff --git a/utils/nctl/sh/scenarios/common/itst.sh b/utils/nctl/sh/scenarios/common/itst.sh index 60b10a2a22..ff093ed036 100644 --- a/utils/nctl/sh/scenarios/common/itst.sh +++ b/utils/nctl/sh/scenarios/common/itst.sh @@ -225,6 +225,14 @@ function do_await_era_change() { nctl-await-n-eras offset="$ERA_COUNT" sleep_interval='5.0' } +function do_await_era_change_with_timeout() { + # allow chain height to grow + local ERA_COUNT=${1:-"1"} + local TIME_OUT=${2:-''} + log_step "awaiting $ERA_COUNT eras…" + nctl-await-n-eras offset="$ERA_COUNT" sleep_interval='5.0' timeout="$TIME_OUT" +} + function check_current_era { local NODE_ID=${1:-$(get_node_for_dispatch)} local ERA="null" diff --git a/utils/nctl/sh/scenarios/validators_disconnect.sh b/utils/nctl/sh/scenarios/validators_disconnect.sh new file mode 100644 index 0000000000..6dea53709a --- /dev/null +++ b/utils/nctl/sh/scenarios/validators_disconnect.sh @@ -0,0 +1,101 @@ +#!/usr/bin/env bash + +source "$NCTL"/sh/utils/main.sh +source "$NCTL"/sh/views/utils.sh +source "$NCTL"/sh/node/svc_"$NCTL_DAEMON_TYPE".sh +source "$NCTL"/sh/scenarios/common/itst.sh + +# Exit if any of the commands fail. +set -e + +function main() { + log "------------------------------------------------------------" + log "Starting Scenario: Half of validators disconnecting" + log "------------------------------------------------------------" + + # 0. Start the rest of the validator nodes + log_step "Starting nodes 6-10" + nctl-start node=6 + nctl-start node=7 + nctl-start node=8 + nctl-start node=9 + nctl-start node=10 + # 1. Wait for the genesis era to complete + do_await_genesis_era_to_complete + # 2. Allow the chain to progress + do_await_era_change 1 + # 3. Verify all nodes are in sync + parallel_check_network_sync 1 10 + # 4. Stop half of the validators + log_step "Stopping nodes 6-10" + nctl-stop node=6 + nctl-stop node=7 + nctl-stop node=8 + nctl-stop node=9 + nctl-stop node=10 + # 5. Wait for a period longer than the dead air interval + sleep_and_display_reactor_state '260.0' + # 6. Start 2 previously stopped validator node. + # Now the network should have 7/10 active and start progressing. + log_step "Starting nodes 6, 7" + nctl-start node=6 + nctl-start node=7 + # 7. Check if the network is progressing + do_await_era_change_with_timeout 1 "500" + source "$NCTL"/sh/scenarios/common/health_checks.sh \ + errors=0 \ + equivocators=0 \ + doppels=0 \ + crashes=0 \ + restarts=2 \ + ejections=0 + + log "------------------------------------------------------------" + log "Scenario half of validators disconnecting complete" + log "------------------------------------------------------------" +} + +function sleep_and_display_reactor_state() { + local TIME_OUT=${1:-'360.0'} + local NODE_ID=${2:-"1"} + local SLEEP_INTERVAL='10.0' + + log_step "Waiting $TIME_OUT seconds to pass…" + while true + do + local REACTOR_STATUS=$(nctl-view-node-status node=1 | tail -n +2) + local REACTOR_STATE=$(echo $REACTOR_STATUS | jq '.reactor_state') + local HIGHEST_BLOCK=$(echo $REACTOR_STATUS | jq '.available_block_range.high' ) + LOG_OUTPUT="reactor state for node $NODE_ID = $REACTOR_STATE; highest block = $HIGHEST_BLOCK :: sleeping $SLEEP_INTERVAL seconds" + + if [ "$EMIT_LOG" = true ] && [ ! -z "$TIME_OUT" ]; then + log "$LOG_OUTPUT :: sleep time = $TIME_OUT seconds" + elif [ "$EMIT_LOG" = true ]; then + log "$LOG_OUTPUT" + fi + + sleep "$SLEEP_INTERVAL" + + if [ ! -z "$TIME_OUT" ]; then + # Using jq since its required by NCTL anyway to do this floating point arith + # ... done to maintain backwards compatibility + TIME_OUT=$(jq -n "$TIME_OUT-$SLEEP_INTERVAL") + + if [ "$TIME_OUT" -le "0" ]; then + log "Finished waiting" + break + fi + else + log "Finished waiting" + break + fi + done +} + +# ---------------------------------------------------------------- +# ENTRY POINT +# ---------------------------------------------------------------- + +STEP=0 + +main From cf20a8f0851ba4218629854964684792caac3106 Mon Sep 17 00:00:00 2001 From: Alexandru Sardan Date: Thu, 13 Jul 2023 18:27:54 +0000 Subject: [PATCH 04/41] nctl/validators_disconnect: add step to check network stalled Signed-off-by: Alexandru Sardan --- utils/nctl/sh/scenarios/common/itst.sh | 25 +++++++++++++++++++ utils/nctl/sh/scenarios/itst02.sh | 25 ------------------- .../sh/scenarios/validators_disconnect.sh | 10 ++++---- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/utils/nctl/sh/scenarios/common/itst.sh b/utils/nctl/sh/scenarios/common/itst.sh index ff093ed036..1d209275a7 100644 --- a/utils/nctl/sh/scenarios/common/itst.sh +++ b/utils/nctl/sh/scenarios/common/itst.sh @@ -588,3 +588,28 @@ function delegate_to() { delegator="$ACCOUNT_ID" \ validator="$NODE_ID" } + +function assert_chain_stalled() { + # Fucntion checks that the two remaining node's LFB checked + # n-seconds apart doesnt progress + log_step "ensuring chain stalled" + local SLEEP_TIME=${1} + # Sleep 5 seconds to allow for final message propagation. + sleep 5 + local LFB_1_PRE=$(do_read_lfb_hash 1) + local LFB_2_PRE=$(do_read_lfb_hash 2) + log "Sleeping ${SLEEP_TIME}s..." + sleep $SLEEP_TIME + local LFB_1_POST=$(do_read_lfb_hash 1) + local LFB_2_POST=$(do_read_lfb_hash 2) + + if [ "$LFB_1_PRE" != "$LFB_1_POST" ] && [ "$LFB_2_PRE" != "$LFB_2_POST" ]; then + log "Error: Chain progressed." + exit 1 + else + STALLED_LFB=$LFB_1_POST + log "node-1 LFB: $LFB_1_PRE = $LFB_1_POST" + log "node-2 LFB: $LFB_2_PRE = $LFB_2_POST" + log "Stall successfully detected, continuing..." + fi +} \ No newline at end of file diff --git a/utils/nctl/sh/scenarios/itst02.sh b/utils/nctl/sh/scenarios/itst02.sh index b3d92ad7a4..045223d398 100755 --- a/utils/nctl/sh/scenarios/itst02.sh +++ b/utils/nctl/sh/scenarios/itst02.sh @@ -73,31 +73,6 @@ function assert_chain_progressed() { fi } -function assert_chain_stalled() { - # Fucntion checks that the two remaining node's LFB checked - # n-seconds apart doesnt progress - log_step "ensuring chain stalled" - local SLEEP_TIME=${1} - # Sleep 5 seconds to allow for final message propagation. - sleep 5 - local LFB_1_PRE=$(do_read_lfb_hash 1) - local LFB_2_PRE=$(do_read_lfb_hash 2) - log "Sleeping ${SLEEP_TIME}s..." - sleep $SLEEP_TIME - local LFB_1_POST=$(do_read_lfb_hash 1) - local LFB_2_POST=$(do_read_lfb_hash 2) - - if [ "$LFB_1_PRE" != "$LFB_1_POST" ] && [ "$LFB_2_PRE" != "$LFB_2_POST" ]; then - log "Error: Chain progressed." - exit 1 - else - STALLED_LFB=$LFB_1_POST - log "node-1 LFB: $LFB_1_PRE = $LFB_1_POST" - log "node-2 LFB: $LFB_2_PRE = $LFB_2_POST" - log "Stall successfully detected, continuing..." - fi -} - # ---------------------------------------------------------------- # ENTRY POINT # ---------------------------------------------------------------- diff --git a/utils/nctl/sh/scenarios/validators_disconnect.sh b/utils/nctl/sh/scenarios/validators_disconnect.sh index 6dea53709a..8b62664f65 100644 --- a/utils/nctl/sh/scenarios/validators_disconnect.sh +++ b/utils/nctl/sh/scenarios/validators_disconnect.sh @@ -22,20 +22,20 @@ function main() { nctl-start node=10 # 1. Wait for the genesis era to complete do_await_genesis_era_to_complete - # 2. Allow the chain to progress - do_await_era_change 1 - # 3. Verify all nodes are in sync + # 2. Verify all nodes are in sync parallel_check_network_sync 1 10 - # 4. Stop half of the validators + # 3. Stop half of the validators log_step "Stopping nodes 6-10" nctl-stop node=6 nctl-stop node=7 nctl-stop node=8 nctl-stop node=9 nctl-stop node=10 + # 4. Assert that the chain actually stalled + assert_chain_stalled "30" # 5. Wait for a period longer than the dead air interval sleep_and_display_reactor_state '260.0' - # 6. Start 2 previously stopped validator node. + # 6. Start 2 previously stopped validator nodes. # Now the network should have 7/10 active and start progressing. log_step "Starting nodes 6, 7" nctl-start node=6 From 17d89bf06aa6b64b8caaff67960fcf39cb264f4f Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Fri, 1 Sep 2023 22:20:19 +0100 Subject: [PATCH 05/41] add examples for public key variants to JSON schema --- Makefile | 2 +- .../components/event_stream_server/tests.rs | 5 +- node/src/components/rest_server.rs | 20 +- node/src/components/rpc_server.rs | 7 +- node/src/testing.rs | 6 +- resources/test/rest_schema_status.json | 17 + .../test/rest_schema_validator_changes.json | 17 + resources/test/rpc_schema_hashing.json | 8064 ++++++++--------- resources/test/sse_data_schema.json | 19 +- types/src/crypto/asymmetric_key.rs | 23 + 10 files changed, 4105 insertions(+), 4075 deletions(-) diff --git a/Makefile b/Makefile index eead9dbeed..8431368701 100644 --- a/Makefile +++ b/Makefile @@ -134,7 +134,7 @@ lint-smart-contracts: .PHONY: audit-rs audit-rs: - $(CARGO) audit + $(CARGO) audit --ignore RUSTSEC-2022-0093 .PHONY: audit-as audit-as: diff --git a/node/src/components/event_stream_server/tests.rs b/node/src/components/event_stream_server/tests.rs index 0639228ad4..2a21172be2 100644 --- a/node/src/components/event_stream_server/tests.rs +++ b/node/src/components/event_stream_server/tests.rs @@ -1194,5 +1194,8 @@ fn schema_test() { "{}/../resources/test/sse_data_schema.json", env!("CARGO_MANIFEST_DIR") ); - assert_schema(schema_path, schema_for!(SseData)); + assert_schema( + schema_path, + serde_json::to_string_pretty(&schema_for!(SseData)).unwrap(), + ); } diff --git a/node/src/components/rest_server.rs b/node/src/components/rest_server.rs index 37ea51f426..f733081098 100644 --- a/node/src/components/rest_server.rs +++ b/node/src/components/rest_server.rs @@ -370,7 +370,10 @@ mod schema_tests { "{}/../resources/test/rest_schema_status.json", env!("CARGO_MANIFEST_DIR") ); - assert_schema(schema_path, schema_for!(GetStatusResult)); + assert_schema( + schema_path, + serde_json::to_string_pretty(&schema_for!(GetStatusResult)).unwrap(), + ); } #[test] @@ -379,7 +382,10 @@ mod schema_tests { "{}/../resources/test/rest_schema_validator_changes.json", env!("CARGO_MANIFEST_DIR") ); - assert_schema(schema_path, schema_for!(GetValidatorChangesResult)); + assert_schema( + schema_path, + serde_json::to_string_pretty(&schema_for!(GetValidatorChangesResult)).unwrap(), + ); } #[test] @@ -388,7 +394,10 @@ mod schema_tests { "{}/../resources/test/rest_schema_rpc_schema.json", env!("CARGO_MANIFEST_DIR") ); - assert_schema(schema_path, schema_for!(OpenRpcSchema)); + assert_schema( + schema_path, + serde_json::to_string_pretty(&schema_for!(OpenRpcSchema)).unwrap(), + ); } #[test] @@ -397,6 +406,9 @@ mod schema_tests { "{}/../resources/test/rest_schema_chainspec_bytes.json", env!("CARGO_MANIFEST_DIR") ); - assert_schema(schema_path, schema_for!(GetChainspecResult)); + assert_schema( + schema_path, + serde_json::to_string_pretty(&schema_for!(GetChainspecResult)).unwrap(), + ); } } diff --git a/node/src/components/rpc_server.rs b/node/src/components/rpc_server.rs index 7c55c816c1..45f538681c 100644 --- a/node/src/components/rpc_server.rs +++ b/node/src/components/rpc_server.rs @@ -477,8 +477,6 @@ where #[cfg(test)] mod tests { - use schemars::schema_for_value; - use crate::{rpcs::docs::OPEN_RPC_SCHEMA, testing::assert_schema}; #[test] @@ -487,6 +485,9 @@ mod tests { "{}/../resources/test/rpc_schema_hashing.json", env!("CARGO_MANIFEST_DIR") ); - assert_schema(schema_path, schema_for_value!(OPEN_RPC_SCHEMA.clone())); + assert_schema( + schema_path, + serde_json::to_string_pretty(&*OPEN_RPC_SCHEMA).unwrap(), + ); } } diff --git a/node/src/testing.rs b/node/src/testing.rs index 8c98ba557f..b4ed616434 100644 --- a/node/src/testing.rs +++ b/node/src/testing.rs @@ -26,7 +26,6 @@ use derive_more::From; use futures::channel::oneshot; use once_cell::sync::Lazy; use rand::Rng; -use schemars::schema::RootSchema; use serde_json::Value; use tempfile::TempDir; use tokio::runtime::{self, Runtime}; @@ -395,13 +394,12 @@ pub(crate) fn create_not_expired_deploy(now: Timestamp, test_rng: &mut TestRng) ) } -/// Assert that the file at `schema_path` matches the provided `RootSchema`, which can be derived +/// Assert that the file at `schema_path` matches the provided `actual_schema`, which can be derived /// from `schemars::schema_for!` or `schemars::schema_for_value!`, for example. This method will /// create a temporary file with the actual schema and print the location if it fails. -pub fn assert_schema(schema_path: String, actual_schema: RootSchema) { +pub fn assert_schema(schema_path: String, actual_schema: String) { let expected_schema = fs::read_to_string(&schema_path).unwrap(); let expected_schema: Value = serde_json::from_str(&expected_schema).unwrap(); - let actual_schema = serde_json::to_string_pretty(&actual_schema).unwrap(); let mut temp_file = tempfile::Builder::new() .suffix(".json") .tempfile_in(env!("OUT_DIR")) diff --git a/resources/test/rest_schema_status.json b/resources/test/rest_schema_status.json index e8168fbc06..6ea345c68f 100644 --- a/resources/test/rest_schema_status.json +++ b/resources/test/rest_schema_status.json @@ -218,6 +218,23 @@ }, "PublicKey": { "description": "Hex-encoded cryptographic public key, including the algorithm tag prefix.", + "examples": [ + { + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }, + { + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }, + { + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + } + ], "type": "string" }, "TimeDiff": { diff --git a/resources/test/rest_schema_validator_changes.json b/resources/test/rest_schema_validator_changes.json index 165516fca8..23adedb907 100644 --- a/resources/test/rest_schema_validator_changes.json +++ b/resources/test/rest_schema_validator_changes.json @@ -50,6 +50,23 @@ }, "PublicKey": { "description": "Hex-encoded cryptographic public key, including the algorithm tag prefix.", + "examples": [ + { + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }, + { + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }, + { + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + } + ], "type": "string" }, "JsonValidatorStatusChange": { diff --git a/resources/test/rpc_schema_hashing.json b/resources/test/rpc_schema_hashing.json index 13cb9a46db..99dea9ea0f 100644 --- a/resources/test/rpc_schema_hashing.json +++ b/resources/test/rpc_schema_hashing.json @@ -1,4781 +1,4723 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "OpenRpcSchema", - "examples": [ + "openrpc": "1.0.0-rc1", + "info": { + "version": "1.5.2", + "title": "Client API of Casper Node", + "description": "This describes the JSON-RPC 2.0 API of a node on the Casper network.", + "contact": { + "name": "CasperLabs", + "url": "https://casperlabs.io" + }, + "license": { + "name": "CasperLabs Open Source License Version 1.0", + "url": "https://raw.githubusercontent.com/CasperLabs/casper-node/master/LICENSE" + } + }, + "servers": [ { - "openrpc": "1.0.0-rc1", - "info": { - "version": "1.5.2", - "title": "Client API of Casper Node", - "description": "This describes the JSON-RPC 2.0 API of a node on the Casper network.", - "contact": { - "name": "CasperLabs", - "url": "https://casperlabs.io" - }, - "license": { - "name": "CasperLabs Open Source License Version 1.0", - "url": "https://raw.githubusercontent.com/CasperLabs/casper-node/master/LICENSE" - } - }, - "servers": [ + "name": "any Casper Network node", + "url": "http://IP:PORT/rpc/" + } + ], + "methods": [ + { + "name": "account_put_deploy", + "summary": "receives a Deploy to be executed by the network", + "params": [ { - "name": "any Casper Network node", - "url": "http://IP:PORT/rpc/" + "name": "deploy", + "schema": { + "description": "The `Deploy`.", + "$ref": "#/components/schemas/Deploy" + }, + "required": true } ], - "methods": [ + "result": { + "name": "account_put_deploy_result", + "schema": { + "description": "Result for \"account_put_deploy\" RPC response.", + "type": "object", + "required": [ + "api_version", + "deploy_hash" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "deploy_hash": { + "description": "The deploy hash.", + "$ref": "#/components/schemas/DeployHash" + } + }, + "additionalProperties": false + } + }, + "examples": [ { - "name": "account_put_deploy", - "summary": "receives a Deploy to be executed by the network", + "name": "account_put_deploy_example", "params": [ { "name": "deploy", - "schema": { - "description": "The `Deploy`.", - "$ref": "#/components/schemas/Deploy" - }, - "required": true + "value": { + "hash": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa", + "header": { + "account": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "timestamp": "2020-11-17T00:39:24.072Z", + "ttl": "1h", + "gas_price": 1, + "body_hash": "d53cf72d17278fd47d399013ca389c50d589352f1a12593c0b8e01872a641b50", + "dependencies": [ + "0101010101010101010101010101010101010101010101010101010101010101" + ], + "chain_name": "casper-example" + }, + "payment": { + "StoredContractByName": { + "name": "casper-example", + "entry_point": "example-entry-point", + "args": [ + [ + "amount", + { + "cl_type": "I32", + "bytes": "e8030000", + "parsed": 1000 + } + ] + ] + } + }, + "session": { + "Transfer": { + "args": [ + [ + "amount", + { + "cl_type": "I32", + "bytes": "e8030000", + "parsed": 1000 + } + ] + ] + } + }, + "approvals": [ + { + "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "014c1a89f92e29dd74fc648f741137d9caf4edba97c5f9799ce0c9aa6b0c9b58db368c64098603dbecef645774c05dff057cb1f91f2cf390bbacce78aa6f084007" + } + ] + } } ], "result": { - "name": "account_put_deploy_result", - "schema": { - "description": "Result for \"account_put_deploy\" RPC response.", - "type": "object", - "required": [ - "api_version", - "deploy_hash" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "deploy_hash": { - "description": "The deploy hash.", - "$ref": "#/components/schemas/DeployHash" - } - }, - "additionalProperties": false + "name": "account_put_deploy_example_result", + "value": { + "api_version": "1.5.2", + "deploy_hash": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa" } + } + } + ] + }, + { + "name": "info_get_deploy", + "summary": "returns a Deploy from the network", + "params": [ + { + "name": "deploy_hash", + "schema": { + "description": "The deploy hash.", + "$ref": "#/components/schemas/DeployHash" }, - "examples": [ - { - "name": "account_put_deploy_example", - "params": [ - { - "name": "deploy", - "value": { - "hash": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa", - "header": { - "account": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", - "timestamp": "2020-11-17T00:39:24.072Z", - "ttl": "1h", - "gas_price": 1, - "body_hash": "d53cf72d17278fd47d399013ca389c50d589352f1a12593c0b8e01872a641b50", - "dependencies": [ - "0101010101010101010101010101010101010101010101010101010101010101" - ], - "chain_name": "casper-example" - }, - "payment": { - "StoredContractByName": { - "name": "casper-example", - "entry_point": "example-entry-point", - "args": [ - [ - "amount", - { - "cl_type": "I32", - "bytes": "e8030000", - "parsed": 1000 - } - ] - ] - } - }, - "session": { - "Transfer": { - "args": [ - [ - "amount", - { - "cl_type": "I32", - "bytes": "e8030000", - "parsed": 1000 - } - ] - ] - } - }, - "approvals": [ - { - "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", - "signature": "014c1a89f92e29dd74fc648f741137d9caf4edba97c5f9799ce0c9aa6b0c9b58db368c64098603dbecef645774c05dff057cb1f91f2cf390bbacce78aa6f084007" - } - ] - } - } - ], - "result": { - "name": "account_put_deploy_example_result", - "value": { - "api_version": "1.5.2", - "deploy_hash": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa" - } + "required": true + }, + { + "name": "finalized_approvals", + "schema": { + "description": "Whether to return the deploy with the finalized approvals substituted. If `false` or omitted, returns the deploy with the approvals that were originally received by the node.", + "default": false, + "type": "boolean" + }, + "required": false + } + ], + "result": { + "name": "info_get_deploy_result", + "schema": { + "description": "Result for \"info_get_deploy\" RPC response.", + "type": "object", + "required": [ + "api_version", + "deploy", + "execution_results" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "deploy": { + "description": "The deploy.", + "$ref": "#/components/schemas/Deploy" + }, + "execution_results": { + "description": "The map of block hash to execution result.", + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonExecutionResult" } + }, + "block_hash": { + "description": "The hash of this deploy's block.", + "$ref": "#/components/schemas/BlockHash" + }, + "block_height": { + "description": "The height of this deploy's block.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 } - ] - }, + }, + "additionalProperties": false + } + }, + "examples": [ { - "name": "info_get_deploy", - "summary": "returns a Deploy from the network", + "name": "info_get_deploy_example", "params": [ { "name": "deploy_hash", - "schema": { - "description": "The deploy hash.", - "$ref": "#/components/schemas/DeployHash" - }, - "required": true + "value": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa" }, { "name": "finalized_approvals", - "schema": { - "description": "Whether to return the deploy with the finalized approvals substituted. If `false` or omitted, returns the deploy with the approvals that were originally received by the node.", - "default": false, - "type": "boolean" - }, - "required": false + "value": true } ], "result": { - "name": "info_get_deploy_result", - "schema": { - "description": "Result for \"info_get_deploy\" RPC response.", - "type": "object", - "required": [ - "api_version", - "deploy", - "execution_results" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "deploy": { - "description": "The deploy.", - "$ref": "#/components/schemas/Deploy" + "name": "info_get_deploy_example_result", + "value": { + "api_version": "1.5.2", + "deploy": { + "hash": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa", + "header": { + "account": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "timestamp": "2020-11-17T00:39:24.072Z", + "ttl": "1h", + "gas_price": 1, + "body_hash": "d53cf72d17278fd47d399013ca389c50d589352f1a12593c0b8e01872a641b50", + "dependencies": [ + "0101010101010101010101010101010101010101010101010101010101010101" + ], + "chain_name": "casper-example" }, - "execution_results": { - "description": "The map of block hash to execution result.", - "type": "array", - "items": { - "$ref": "#/components/schemas/JsonExecutionResult" + "payment": { + "StoredContractByName": { + "name": "casper-example", + "entry_point": "example-entry-point", + "args": [ + [ + "amount", + { + "cl_type": "I32", + "bytes": "e8030000", + "parsed": 1000 + } + ] + ] } }, - "block_hash": { - "description": "The hash of this deploy's block.", - "$ref": "#/components/schemas/BlockHash" + "session": { + "Transfer": { + "args": [ + [ + "amount", + { + "cl_type": "I32", + "bytes": "e8030000", + "parsed": 1000 + } + ] + ] + } }, - "block_height": { - "description": "The height of this deploy's block.", - "type": "integer", - "format": "uint64", - "minimum": 0.0 - } + "approvals": [ + { + "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "014c1a89f92e29dd74fc648f741137d9caf4edba97c5f9799ce0c9aa6b0c9b58db368c64098603dbecef645774c05dff057cb1f91f2cf390bbacce78aa6f084007" + } + ] }, - "additionalProperties": false - } - }, - "examples": [ - { - "name": "info_get_deploy_example", - "params": [ - { - "name": "deploy_hash", - "value": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa" - }, + "execution_results": [ { - "name": "finalized_approvals", - "value": true - } - ], - "result": { - "name": "info_get_deploy_example_result", - "value": { - "api_version": "1.5.2", - "deploy": { - "hash": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa", - "header": { - "account": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", - "timestamp": "2020-11-17T00:39:24.072Z", - "ttl": "1h", - "gas_price": 1, - "body_hash": "d53cf72d17278fd47d399013ca389c50d589352f1a12593c0b8e01872a641b50", - "dependencies": [ - "0101010101010101010101010101010101010101010101010101010101010101" - ], - "chain_name": "casper-example" - }, - "payment": { - "StoredContractByName": { - "name": "casper-example", - "entry_point": "example-entry-point", - "args": [ - [ - "amount", - { - "cl_type": "I32", - "bytes": "e8030000", - "parsed": 1000 - } - ] - ] - } - }, - "session": { - "Transfer": { - "args": [ - [ - "amount", - { - "cl_type": "I32", - "bytes": "e8030000", - "parsed": 1000 + "block_hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", + "result": { + "Success": { + "effect": { + "operations": [ + { + "key": "account-hash-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb", + "kind": "Write" + }, + { + "key": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1", + "kind": "Read" + } + ], + "transforms": [ + { + "key": "uref-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb-007", + "transform": { + "AddUInt64": 8 } - ] - ] - } - }, - "approvals": [ - { - "signer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", - "signature": "014c1a89f92e29dd74fc648f741137d9caf4edba97c5f9799ce0c9aa6b0c9b58db368c64098603dbecef645774c05dff057cb1f91f2cf390bbacce78aa6f084007" - } - ] - }, - "execution_results": [ - { - "block_hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", - "result": { - "Success": { - "effect": { - "operations": [ - { - "key": "account-hash-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb", - "kind": "Write" - }, - { - "key": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1", - "kind": "Read" - } - ], - "transforms": [ - { - "key": "uref-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb-007", - "transform": { - "AddUInt64": 8 - } - }, - { - "key": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1", - "transform": "Identity" - } - ] }, - "transfers": [ - "transfer-5959595959595959595959595959595959595959595959595959595959595959", - "transfer-8282828282828282828282828282828282828282828282828282828282828282" - ], - "cost": "123456" - } - } + { + "key": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1", + "transform": "Identity" + } + ] + }, + "transfers": [ + "transfer-5959595959595959595959595959595959595959595959595959595959595959", + "transfer-8282828282828282828282828282828282828282828282828282828282828282" + ], + "cost": "123456" } - ] + } } - } + ] } - ] + } + } + ] + }, + { + "name": "state_get_account_info", + "summary": "returns an Account from the network", + "params": [ + { + "name": "public_key", + "schema": { + "description": "The public key of the Account.", + "$ref": "#/components/schemas/PublicKey" + }, + "required": true }, { - "name": "state_get_account_info", - "summary": "returns an Account from the network", + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockIdentifier" + }, + { + "type": "null" + } + ] + }, + "required": false + } + ], + "result": { + "name": "state_get_account_info_result", + "schema": { + "description": "Result for \"state_get_account_info\" RPC response.", + "type": "object", + "required": [ + "account", + "api_version", + "merkle_proof" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "account": { + "description": "The account.", + "$ref": "#/components/schemas/Account" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_account_info_example", "params": [ { "name": "public_key", - "schema": { - "description": "The public key of the Account.", - "$ref": "#/components/schemas/PublicKey" - }, - "required": true + "value": "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29" }, { "name": "block_identifier", - "schema": { - "description": "The block identifier.", - "anyOf": [ - { - "$ref": "#/components/schemas/BlockIdentifier" - }, - { - "type": "null" - } - ] - }, - "required": false + "value": { + "Hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" + } } ], "result": { - "name": "state_get_account_info_result", - "schema": { - "description": "Result for \"state_get_account_info\" RPC response.", - "type": "object", - "required": [ - "account", - "api_version", - "merkle_proof" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "account": { - "description": "The account.", - "$ref": "#/components/schemas/Account" - }, - "merkle_proof": { - "description": "The Merkle proof.", - "type": "string" + "name": "state_get_account_info_example_result", + "value": { + "api_version": "1.5.2", + "account": { + "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", + "named_keys": [], + "main_purse": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", + "associated_keys": [ + { + "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", + "weight": 1 + } + ], + "action_thresholds": { + "deployment": 1, + "key_management": 1 } }, - "additionalProperties": false + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" } + } + } + ] + }, + { + "name": "state_get_dictionary_item", + "summary": "returns an item from a Dictionary", + "params": [ + { + "name": "state_root_hash", + "schema": { + "description": "Hash of the state root", + "$ref": "#/components/schemas/Digest" }, - "examples": [ - { - "name": "state_get_account_info_example", - "params": [ - { - "name": "public_key", - "value": "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29" - }, - { - "name": "block_identifier", - "value": { - "Hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" - } - } - ], - "result": { - "name": "state_get_account_info_example_result", - "value": { - "api_version": "1.5.2", - "account": { - "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", - "named_keys": [], - "main_purse": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", - "associated_keys": [ - { - "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", - "weight": 1 - } - ], - "action_thresholds": { - "deployment": 1, - "key_management": 1 - } - }, - "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" - } - } - } - ] + "required": true }, { - "name": "state_get_dictionary_item", - "summary": "returns an item from a Dictionary", + "name": "dictionary_identifier", + "schema": { + "description": "The Dictionary query identifier.", + "$ref": "#/components/schemas/DictionaryIdentifier" + }, + "required": true + } + ], + "result": { + "name": "state_get_dictionary_item_result", + "schema": { + "description": "Result for \"state_get_dictionary_item\" RPC response.", + "type": "object", + "required": [ + "api_version", + "dictionary_key", + "merkle_proof", + "stored_value" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "dictionary_key": { + "description": "The key under which the value is stored.", + "type": "string" + }, + "stored_value": { + "description": "The stored value.", + "$ref": "#/components/schemas/StoredValue" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_dictionary_item_example", "params": [ { "name": "state_root_hash", - "schema": { - "description": "Hash of the state root", - "$ref": "#/components/schemas/Digest" - }, - "required": true + "value": "0808080808080808080808080808080808080808080808080808080808080808" }, { "name": "dictionary_identifier", - "schema": { - "description": "The Dictionary query identifier.", - "$ref": "#/components/schemas/DictionaryIdentifier" - }, - "required": true + "value": { + "URef": { + "seed_uref": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", + "dictionary_item_key": "a_unique_entry_identifier" + } + } } ], "result": { - "name": "state_get_dictionary_item_result", - "schema": { - "description": "Result for \"state_get_dictionary_item\" RPC response.", - "type": "object", - "required": [ - "api_version", - "dictionary_key", - "merkle_proof", - "stored_value" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "dictionary_key": { - "description": "The key under which the value is stored.", - "type": "string" - }, - "stored_value": { - "description": "The stored value.", - "$ref": "#/components/schemas/StoredValue" - }, - "merkle_proof": { - "description": "The Merkle proof.", - "type": "string" + "name": "state_get_dictionary_item_example_result", + "value": { + "api_version": "1.5.2", + "dictionary_key": "dictionary-67518854aa916c97d4e53df8570c8217ccc259da2721b692102d76acd0ee8d1f", + "stored_value": { + "CLValue": { + "cl_type": "U64", + "bytes": "0100000000000000", + "parsed": 1 } }, - "additionalProperties": false + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" } + } + } + ] + }, + { + "name": "query_global_state", + "summary": "a query to global state using either a Block hash or state root hash", + "params": [ + { + "name": "state_identifier", + "schema": { + "description": "The identifier used for the query.", + "$ref": "#/components/schemas/GlobalStateIdentifier" }, - "examples": [ - { - "name": "state_get_dictionary_item_example", - "params": [ - { - "name": "state_root_hash", - "value": "0808080808080808080808080808080808080808080808080808080808080808" - }, - { - "name": "dictionary_identifier", - "value": { - "URef": { - "seed_uref": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", - "dictionary_item_key": "a_unique_entry_identifier" - } - } - } - ], - "result": { - "name": "state_get_dictionary_item_example_result", - "value": { - "api_version": "1.5.2", - "dictionary_key": "dictionary-67518854aa916c97d4e53df8570c8217ccc259da2721b692102d76acd0ee8d1f", - "stored_value": { - "CLValue": { - "cl_type": "U64", - "bytes": "0100000000000000", - "parsed": 1 - } - }, - "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + "required": true + }, + { + "name": "key", + "schema": { + "description": "`casper_types::Key` as formatted string.", + "type": "string" + }, + "required": true + }, + { + "name": "path", + "schema": { + "description": "The path components starting from the key as base.", + "default": [], + "type": "array", + "items": { + "type": "string" + } + }, + "required": false + } + ], + "result": { + "name": "query_global_state_result", + "schema": { + "description": "Result for \"query_global_state\" RPC response.", + "type": "object", + "required": [ + "api_version", + "merkle_proof", + "stored_value" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "block_header": { + "description": "The block header if a Block hash was provided.", + "anyOf": [ + { + "$ref": "#/components/schemas/JsonBlockHeader" + }, + { + "type": "null" } - } + ] + }, + "stored_value": { + "description": "The stored value.", + "$ref": "#/components/schemas/StoredValue" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" } - ] - }, + }, + "additionalProperties": false + } + }, + "examples": [ { - "name": "query_global_state", - "summary": "a query to global state using either a Block hash or state root hash", + "name": "query_global_state_example", "params": [ { "name": "state_identifier", - "schema": { - "description": "The identifier used for the query.", - "$ref": "#/components/schemas/GlobalStateIdentifier" - }, - "required": true + "value": { + "BlockHash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" + } }, { "name": "key", - "schema": { - "description": "`casper_types::Key` as formatted string.", - "type": "string" - }, - "required": true + "value": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1" }, { "name": "path", - "schema": { - "description": "The path components starting from the key as base.", - "default": [], - "type": "array", - "items": { - "type": "string" - } - }, - "required": false + "value": [] } ], "result": { - "name": "query_global_state_result", - "schema": { - "description": "Result for \"query_global_state\" RPC response.", - "type": "object", - "required": [ - "api_version", - "merkle_proof", - "stored_value" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "block_header": { - "description": "The block header if a Block hash was provided.", - "anyOf": [ + "name": "query_global_state_example_result", + "value": { + "api_version": "1.5.2", + "block_header": { + "parent_hash": "0707070707070707070707070707070707070707070707070707070707070707", + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", + "body_hash": "cd502c5393a3c8b66d6979ad7857507c9baf5a8ba16ba99c28378d3a970fff42", + "random_bit": true, + "accumulated_seed": "ac979f51525cfd979b14aa7dc0737c5154eabe0db9280eceaa8dc8d2905b20d5", + "era_end": { + "era_report": { + "equivocators": [ + "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29" + ], + "rewards": [ + { + "validator": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c", + "amount": 1000 + } + ], + "inactive_validators": [ + "018139770ea87d175f56a35466c34c7ecccb8d8a91b4ee37a25df60f5b8fc9b394" + ] + }, + "next_era_validator_weights": [ + { + "validator": "016e7a1cdd29b0b78fd13af4c5598feff4ef2a97166e3ca6f2e4fbfccd80505bf1", + "weight": "456" + }, { - "$ref": "#/components/schemas/JsonBlockHeader" + "validator": "018a875fff1eb38451577acd5afee405456568dd7c89e090863a0557bc7af49f17", + "weight": "789" }, { - "type": "null" + "validator": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "weight": "123" } ] }, - "stored_value": { - "description": "The stored value.", - "$ref": "#/components/schemas/StoredValue" - }, - "merkle_proof": { - "description": "The Merkle proof.", - "type": "string" - } + "timestamp": "2020-11-17T00:39:24.072Z", + "era_id": 1, + "height": 10, + "protocol_version": "1.0.0" }, - "additionalProperties": false - } - }, - "examples": [ - { - "name": "query_global_state_example", - "params": [ - { - "name": "state_identifier", - "value": { - "BlockHash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" - } - }, - { - "name": "key", - "value": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1" - }, - { - "name": "path", - "value": [] - } - ], - "result": { - "name": "query_global_state_example_result", - "value": { - "api_version": "1.5.2", - "block_header": { - "parent_hash": "0707070707070707070707070707070707070707070707070707070707070707", - "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", - "body_hash": "cd502c5393a3c8b66d6979ad7857507c9baf5a8ba16ba99c28378d3a970fff42", - "random_bit": true, - "accumulated_seed": "ac979f51525cfd979b14aa7dc0737c5154eabe0db9280eceaa8dc8d2905b20d5", - "era_end": { - "era_report": { - "equivocators": [ - "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29" - ], - "rewards": [ - { - "validator": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c", - "amount": 1000 - } - ], - "inactive_validators": [ - "018139770ea87d175f56a35466c34c7ecccb8d8a91b4ee37a25df60f5b8fc9b394" - ] - }, - "next_era_validator_weights": [ - { - "validator": "016e7a1cdd29b0b78fd13af4c5598feff4ef2a97166e3ca6f2e4fbfccd80505bf1", - "weight": "456" - }, - { - "validator": "018a875fff1eb38451577acd5afee405456568dd7c89e090863a0557bc7af49f17", - "weight": "789" - }, - { - "validator": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", - "weight": "123" - } - ] - }, - "timestamp": "2020-11-17T00:39:24.072Z", - "era_id": 1, - "height": 10, - "protocol_version": "1.0.0" - }, - "stored_value": { - "Account": { + "stored_value": { + "Account": { + "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", + "named_keys": [], + "main_purse": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", + "associated_keys": [ + { "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", - "named_keys": [], - "main_purse": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", - "associated_keys": [ - { - "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", - "weight": 1 - } - ], - "action_thresholds": { - "deployment": 1, - "key_management": 1 - } + "weight": 1 } - }, - "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + ], + "action_thresholds": { + "deployment": 1, + "key_management": 1 + } } - } + }, + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" } - ] + } + } + ] + }, + { + "name": "query_balance", + "summary": "query for a balance using a purse identifier and a state identifier", + "params": [ + { + "name": "purse_identifier", + "schema": { + "description": "The identifier to obtain the purse corresponding to balance query.", + "$ref": "#/components/schemas/PurseIdentifier" + }, + "required": true }, { - "name": "query_balance", - "summary": "query for a balance using a purse identifier and a state identifier", - "params": [ - { - "name": "purse_identifier", - "schema": { - "description": "The identifier to obtain the purse corresponding to balance query.", - "$ref": "#/components/schemas/PurseIdentifier" + "name": "state_identifier", + "schema": { + "description": "The state identifier used for the query, if none is passed the tip of the chain will be used.", + "anyOf": [ + { + "$ref": "#/components/schemas/GlobalStateIdentifier" }, - "required": true + { + "type": "null" + } + ] + }, + "required": false + } + ], + "result": { + "name": "query_balance_result", + "schema": { + "description": "Result for \"query_balance\" RPC response.", + "type": "object", + "required": [ + "api_version", + "balance" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" }, + "balance": { + "description": "The balance represented in motes.", + "$ref": "#/components/schemas/U512" + } + } + } + }, + "examples": [ + { + "name": "query_balance_example", + "params": [ { "name": "state_identifier", - "schema": { - "description": "The state identifier used for the query, if none is passed the tip of the chain will be used.", - "anyOf": [ - { - "$ref": "#/components/schemas/GlobalStateIdentifier" - }, - { - "type": "null" - } - ] - }, - "required": false + "value": { + "BlockHash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" + } + }, + { + "name": "purse_identifier", + "value": { + "main_purse_under_account_hash": "account-hash-0909090909090909090909090909090909090909090909090909090909090909" + } } ], "result": { - "name": "query_balance_result", - "schema": { - "description": "Result for \"query_balance\" RPC response.", - "type": "object", - "required": [ - "api_version", - "balance" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "balance": { - "description": "The balance represented in motes.", - "$ref": "#/components/schemas/U512" - } - } + "name": "query_balance_example_result", + "value": { + "api_version": "1.5.2", + "balance": "123456" + } + } + } + ] + }, + { + "name": "info_get_peers", + "summary": "returns a list of peers connected to the node", + "params": [], + "result": { + "name": "info_get_peers_result", + "schema": { + "description": "Result for \"info_get_peers\" RPC response.", + "type": "object", + "required": [ + "api_version", + "peers" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "peers": { + "description": "The node ID and network address of each connected peer.", + "$ref": "#/components/schemas/PeersMap" } }, - "examples": [ - { - "name": "query_balance_example", - "params": [ + "additionalProperties": false + } + }, + "examples": [ + { + "name": "info_get_peers_example", + "params": [], + "result": { + "name": "info_get_peers_example_result", + "value": { + "api_version": "1.5.2", + "peers": [ { - "name": "state_identifier", - "value": { - "BlockHash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" - } + "node_id": "tls:0101..0101", + "address": "127.0.0.1:54321" + } + ] + } + } + } + ] + }, + { + "name": "info_get_status", + "summary": "returns the current status of the node", + "params": [], + "result": { + "name": "info_get_status_result", + "schema": { + "description": "Result for \"info_get_status\" RPC response.", + "type": "object", + "required": [ + "api_version", + "available_block_range", + "block_sync", + "build_version", + "chainspec_name", + "last_progress", + "peers", + "reactor_state", + "starting_state_root_hash", + "uptime" + ], + "properties": { + "peers": { + "description": "The node ID and network address of each connected peer.", + "$ref": "#/components/schemas/PeersMap" + }, + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "build_version": { + "description": "The compiled node version.", + "type": "string" + }, + "chainspec_name": { + "description": "The chainspec name.", + "type": "string" + }, + "starting_state_root_hash": { + "description": "The state root hash of the lowest block in the available block range.", + "$ref": "#/components/schemas/Digest" + }, + "last_added_block_info": { + "description": "The minimal info of the last block from the linear chain.", + "anyOf": [ + { + "$ref": "#/components/schemas/MinimalBlockInfo" }, { - "name": "purse_identifier", - "value": { - "main_purse_under_account_hash": "account-hash-0909090909090909090909090909090909090909090909090909090909090909" - } + "type": "null" } - ], - "result": { - "name": "query_balance_example_result", - "value": { - "api_version": "1.5.2", - "balance": "123456" + ] + }, + "our_public_signing_key": { + "description": "Our public signing key.", + "anyOf": [ + { + "$ref": "#/components/schemas/PublicKey" + }, + { + "type": "null" } - } + ] + }, + "round_length": { + "description": "The next round length if this node is a validator.", + "anyOf": [ + { + "$ref": "#/components/schemas/TimeDiff" + }, + { + "type": "null" + } + ] + }, + "next_upgrade": { + "description": "Information about the next scheduled upgrade.", + "anyOf": [ + { + "$ref": "#/components/schemas/NextUpgrade" + }, + { + "type": "null" + } + ] + }, + "uptime": { + "description": "Time that passed since the node has started.", + "$ref": "#/components/schemas/TimeDiff" + }, + "reactor_state": { + "description": "The current state of node reactor.", + "$ref": "#/components/schemas/ReactorState" + }, + "last_progress": { + "description": "Timestamp of the last recorded progress in the reactor.", + "$ref": "#/components/schemas/Timestamp" + }, + "available_block_range": { + "description": "The available block range in storage.", + "$ref": "#/components/schemas/AvailableBlockRange" + }, + "block_sync": { + "description": "The status of the block synchronizer builders.", + "$ref": "#/components/schemas/BlockSynchronizerStatus" } - ] - }, + }, + "additionalProperties": false + } + }, + "examples": [ { - "name": "info_get_peers", - "summary": "returns a list of peers connected to the node", + "name": "info_get_status_example", "params": [], "result": { - "name": "info_get_peers_result", - "schema": { - "description": "Result for \"info_get_peers\" RPC response.", - "type": "object", - "required": [ - "api_version", - "peers" + "name": "info_get_status_example_result", + "value": { + "peers": [ + { + "node_id": "tls:0101..0101", + "address": "127.0.0.1:54321" + } ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" + "api_version": "1.5.2", + "build_version": "1.0.0-xxxxxxxxx@DEBUG", + "chainspec_name": "casper-example", + "starting_state_root_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "last_added_block_info": { + "hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", + "timestamp": "2020-11-17T00:39:24.072Z", + "era_id": 1, + "height": 10, + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", + "creator": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c" + }, + "our_public_signing_key": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "round_length": "1m 5s 536ms", + "next_upgrade": { + "activation_point": 42, + "protocol_version": "2.0.1" + }, + "uptime": "13s", + "reactor_state": "Initialize", + "last_progress": "1970-01-01T00:00:00.000Z", + "available_block_range": { + "low": 0, + "high": 0 + }, + "block_sync": { + "historical": { + "block_hash": "16ddf28e2b3d2e17f4cef36f8b58827eca917af225d139b0c77df3b4a67dc55e", + "block_height": 40, + "acquisition_state": "have strict finality(40) for: block hash 16dd..c55e" }, - "peers": { - "description": "The node ID and network address of each connected peer.", - "$ref": "#/components/schemas/PeersMap" + "forward": { + "block_hash": "59907b1e32a9158169c4d89d9ce5ac9164fc31240bfcfb0969227ece06d74983", + "block_height": 6701, + "acquisition_state": "have block body(6701) for: block hash 5990..4983" } - }, - "additionalProperties": false + } } - }, - "examples": [ - { - "name": "info_get_peers_example", - "params": [], - "result": { - "name": "info_get_peers_example_result", - "value": { - "api_version": "1.5.2", - "peers": [ - { - "node_id": "tls:0101..0101", - "address": "127.0.0.1:54321" - } - ] - } + } + } + ] + }, + { + "name": "info_get_validator_changes", + "summary": "returns status changes of active validators", + "params": [], + "result": { + "name": "info_get_validator_changes_result", + "schema": { + "description": "Result for the \"info_get_validator_changes\" RPC.", + "type": "object", + "required": [ + "api_version", + "changes" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "changes": { + "description": "The validators' status changes.", + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonValidatorChanges" } } - ] - }, + }, + "additionalProperties": false + } + }, + "examples": [ { - "name": "info_get_status", - "summary": "returns the current status of the node", + "name": "info_get_validator_changes_example", "params": [], "result": { - "name": "info_get_status_result", - "schema": { - "description": "Result for \"info_get_status\" RPC response.", - "type": "object", - "required": [ - "api_version", - "available_block_range", - "block_sync", - "build_version", - "chainspec_name", - "last_progress", - "peers", - "reactor_state", - "starting_state_root_hash", - "uptime" - ], - "properties": { - "peers": { - "description": "The node ID and network address of each connected peer.", - "$ref": "#/components/schemas/PeersMap" - }, - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "build_version": { - "description": "The compiled node version.", - "type": "string" - }, - "chainspec_name": { - "description": "The chainspec name.", - "type": "string" - }, - "starting_state_root_hash": { - "description": "The state root hash of the lowest block in the available block range.", - "$ref": "#/components/schemas/Digest" - }, - "last_added_block_info": { - "description": "The minimal info of the last block from the linear chain.", - "anyOf": [ - { - "$ref": "#/components/schemas/MinimalBlockInfo" - }, - { - "type": "null" - } - ] - }, - "our_public_signing_key": { - "description": "Our public signing key.", - "anyOf": [ - { - "$ref": "#/components/schemas/PublicKey" - }, - { - "type": "null" - } - ] - }, - "round_length": { - "description": "The next round length if this node is a validator.", - "anyOf": [ - { - "$ref": "#/components/schemas/TimeDiff" - }, - { - "type": "null" - } - ] - }, - "next_upgrade": { - "description": "Information about the next scheduled upgrade.", - "anyOf": [ - { - "$ref": "#/components/schemas/NextUpgrade" - }, + "name": "info_get_validator_changes_example_result", + "value": { + "api_version": "1.5.2", + "changes": [ + { + "public_key": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "status_changes": [ { - "type": "null" + "era_id": 1, + "validator_change": "Added" } ] - }, - "uptime": { - "description": "Time that passed since the node has started.", - "$ref": "#/components/schemas/TimeDiff" - }, - "reactor_state": { - "description": "The current state of node reactor.", - "$ref": "#/components/schemas/ReactorState" - }, - "last_progress": { - "description": "Timestamp of the last recorded progress in the reactor.", - "$ref": "#/components/schemas/Timestamp" - }, - "available_block_range": { - "description": "The available block range in storage.", - "$ref": "#/components/schemas/AvailableBlockRange" - }, - "block_sync": { - "description": "The status of the block synchronizer builders.", - "$ref": "#/components/schemas/BlockSynchronizerStatus" } - }, - "additionalProperties": false + ] } - }, - "examples": [ - { - "name": "info_get_status_example", - "params": [], - "result": { - "name": "info_get_status_example_result", - "value": { - "peers": [ - { - "node_id": "tls:0101..0101", - "address": "127.0.0.1:54321" - } - ], - "api_version": "1.5.2", - "build_version": "1.0.0-xxxxxxxxx@DEBUG", - "chainspec_name": "casper-example", - "starting_state_root_hash": "0000000000000000000000000000000000000000000000000000000000000000", - "last_added_block_info": { - "hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", - "timestamp": "2020-11-17T00:39:24.072Z", - "era_id": 1, - "height": 10, - "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", - "creator": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c" - }, - "our_public_signing_key": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", - "round_length": "1m 5s 536ms", - "next_upgrade": { - "activation_point": 42, - "protocol_version": "2.0.1" - }, - "uptime": "13s", - "reactor_state": "Initialize", - "last_progress": "1970-01-01T00:00:00.000Z", - "available_block_range": { - "low": 0, - "high": 0 - }, - "block_sync": { - "historical": { - "block_hash": "16ddf28e2b3d2e17f4cef36f8b58827eca917af225d139b0c77df3b4a67dc55e", - "block_height": 40, - "acquisition_state": "have strict finality(40) for: block hash 16dd..c55e" - }, - "forward": { - "block_hash": "59907b1e32a9158169c4d89d9ce5ac9164fc31240bfcfb0969227ece06d74983", - "block_height": 6701, - "acquisition_state": "have block body(6701) for: block hash 5990..4983" - } - } - } - } + } + } + ] + }, + { + "name": "info_get_chainspec", + "summary": "returns the raw bytes of the chainspec.toml, genesis accounts.toml, and global_state.toml files", + "params": [], + "result": { + "name": "info_get_chainspec_result", + "schema": { + "description": "Result for the \"info_get_chainspec\" RPC.", + "type": "object", + "required": [ + "api_version", + "chainspec_bytes" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "chainspec_bytes": { + "description": "The chainspec file bytes.", + "$ref": "#/components/schemas/ChainspecRawBytes" } - ] - }, + } + } + }, + "examples": [ { - "name": "info_get_validator_changes", - "summary": "returns status changes of active validators", + "name": "info_get_chainspec_example", "params": [], "result": { - "name": "info_get_validator_changes_result", - "schema": { - "description": "Result for the \"info_get_validator_changes\" RPC.", - "type": "object", - "required": [ - "api_version", - "changes" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "changes": { - "description": "The validators' status changes.", - "type": "array", - "items": { - "$ref": "#/components/schemas/JsonValidatorChanges" - } - } - }, - "additionalProperties": false - } - }, - "examples": [ - { - "name": "info_get_validator_changes_example", - "params": [], - "result": { - "name": "info_get_validator_changes_example_result", - "value": { - "api_version": "1.5.2", - "changes": [ - { - "public_key": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", - "status_changes": [ - { - "era_id": 1, - "validator_change": "Added" - } - ] - } - ] - } + "name": "info_get_chainspec_example_result", + "value": { + "api_version": "1.5.2", + "chainspec_bytes": { + "chainspec_bytes": "2a2a", + "maybe_genesis_accounts_bytes": null, + "maybe_global_state_bytes": null } } - ] - }, + } + } + ] + }, + { + "name": "chain_get_block", + "summary": "returns a Block from the network", + "params": [ { - "name": "info_get_chainspec", - "summary": "returns the raw bytes of the chainspec.toml, genesis accounts.toml, and global_state.toml files", - "params": [], - "result": { - "name": "info_get_chainspec_result", - "schema": { - "description": "Result for the \"info_get_chainspec\" RPC.", - "type": "object", - "required": [ - "api_version", - "chainspec_bytes" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "chain_get_block_result", + "schema": { + "description": "Result for \"chain_get_block\" RPC response.", + "type": "object", + "required": [ + "api_version" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "block": { + "description": "The block, if found.", + "anyOf": [ + { + "$ref": "#/components/schemas/JsonBlock" }, - "chainspec_bytes": { - "description": "The chainspec file bytes.", - "$ref": "#/components/schemas/ChainspecRawBytes" + { + "type": "null" } - } + ] } }, - "examples": [ - { - "name": "info_get_chainspec_example", - "params": [], - "result": { - "name": "info_get_chainspec_example_result", - "value": { - "api_version": "1.5.2", - "chainspec_bytes": { - "chainspec_bytes": "2a2a", - "maybe_genesis_accounts_bytes": null, - "maybe_global_state_bytes": null - } - } - } - } - ] - }, + "additionalProperties": false + } + }, + "examples": [ { - "name": "chain_get_block", - "summary": "returns a Block from the network", + "name": "chain_get_block_example", "params": [ { "name": "block_identifier", - "schema": { - "description": "The block identifier.", - "$ref": "#/components/schemas/BlockIdentifier" - }, - "required": false + "value": { + "Hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" + } } ], "result": { - "name": "chain_get_block_result", - "schema": { - "description": "Result for \"chain_get_block\" RPC response.", - "type": "object", - "required": [ - "api_version" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "block": { - "description": "The block, if found.", - "anyOf": [ - { - "$ref": "#/components/schemas/JsonBlock" - }, - { - "type": "null" - } - ] - } - }, - "additionalProperties": false - } - }, - "examples": [ - { - "name": "chain_get_block_example", - "params": [ - { - "name": "block_identifier", - "value": { - "Hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" - } - } - ], - "result": { - "name": "chain_get_block_example_result", - "value": { - "api_version": "1.5.2", - "block": { - "hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", - "header": { - "parent_hash": "0707070707070707070707070707070707070707070707070707070707070707", - "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", - "body_hash": "cd502c5393a3c8b66d6979ad7857507c9baf5a8ba16ba99c28378d3a970fff42", - "random_bit": true, - "accumulated_seed": "ac979f51525cfd979b14aa7dc0737c5154eabe0db9280eceaa8dc8d2905b20d5", - "era_end": { - "era_report": { - "equivocators": [ - "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29" - ], - "rewards": [ - { - "validator": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c", - "amount": 1000 - } - ], - "inactive_validators": [ - "018139770ea87d175f56a35466c34c7ecccb8d8a91b4ee37a25df60f5b8fc9b394" - ] - }, - "next_era_validator_weights": [ - { - "validator": "016e7a1cdd29b0b78fd13af4c5598feff4ef2a97166e3ca6f2e4fbfccd80505bf1", - "weight": "456" - }, - { - "validator": "018a875fff1eb38451577acd5afee405456568dd7c89e090863a0557bc7af49f17", - "weight": "789" - }, - { - "validator": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", - "weight": "123" - } - ] - }, - "timestamp": "2020-11-17T00:39:24.072Z", - "era_id": 1, - "height": 10, - "protocol_version": "1.0.0" - }, - "body": { - "proposer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", - "deploy_hashes": [], - "transfer_hashes": [ - "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa" + "name": "chain_get_block_example_result", + "value": { + "api_version": "1.5.2", + "block": { + "hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", + "header": { + "parent_hash": "0707070707070707070707070707070707070707070707070707070707070707", + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", + "body_hash": "cd502c5393a3c8b66d6979ad7857507c9baf5a8ba16ba99c28378d3a970fff42", + "random_bit": true, + "accumulated_seed": "ac979f51525cfd979b14aa7dc0737c5154eabe0db9280eceaa8dc8d2905b20d5", + "era_end": { + "era_report": { + "equivocators": [ + "013b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da29" + ], + "rewards": [ + { + "validator": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c", + "amount": 1000 + } + ], + "inactive_validators": [ + "018139770ea87d175f56a35466c34c7ecccb8d8a91b4ee37a25df60f5b8fc9b394" ] }, - "proofs": [ + "next_era_validator_weights": [ + { + "validator": "016e7a1cdd29b0b78fd13af4c5598feff4ef2a97166e3ca6f2e4fbfccd80505bf1", + "weight": "456" + }, + { + "validator": "018a875fff1eb38451577acd5afee405456568dd7c89e090863a0557bc7af49f17", + "weight": "789" + }, { - "public_key": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", - "signature": "016291a7b2689e2edcc6e79030be50edd02f9bd7d809921ae2654012f808c7b9a0f125bc32d6aa610cbd012395a9832ccfaa9262023339f1db71ca073a13bb9707" + "validator": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "weight": "123" } ] + }, + "timestamp": "2020-11-17T00:39:24.072Z", + "era_id": 1, + "height": 10, + "protocol_version": "1.0.0" + }, + "body": { + "proposer": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "deploy_hashes": [], + "transfer_hashes": [ + "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa" + ] + }, + "proofs": [ + { + "public_key": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", + "signature": "016291a7b2689e2edcc6e79030be50edd02f9bd7d809921ae2654012f808c7b9a0f125bc32d6aa610cbd012395a9832ccfaa9262023339f1db71ca073a13bb9707" } + ] + } + } + } + } + ] + }, + { + "name": "chain_get_block_transfers", + "summary": "returns all transfers for a Block from the network", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block hash.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "chain_get_block_transfers_result", + "schema": { + "description": "Result for \"chain_get_block_transfers\" RPC response.", + "type": "object", + "required": [ + "api_version" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "block_hash": { + "description": "The block hash, if found.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockHash" + }, + { + "type": "null" } + ] + }, + "transfers": { + "description": "The block's transfers, if found.", + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/components/schemas/Transfer" } } - ] - }, + }, + "additionalProperties": false + } + }, + "examples": [ { - "name": "chain_get_block_transfers", - "summary": "returns all transfers for a Block from the network", + "name": "chain_get_block_transfers_example", "params": [ { "name": "block_identifier", - "schema": { - "description": "The block hash.", - "$ref": "#/components/schemas/BlockIdentifier" - }, - "required": false + "value": { + "Hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" + } } ], "result": { - "name": "chain_get_block_transfers_result", - "schema": { - "description": "Result for \"chain_get_block_transfers\" RPC response.", - "type": "object", - "required": [ - "api_version" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "block_hash": { - "description": "The block hash, if found.", - "anyOf": [ - { - "$ref": "#/components/schemas/BlockHash" - }, - { - "type": "null" - } - ] - }, - "transfers": { - "description": "The block's transfers, if found.", - "type": [ - "array", - "null" - ], - "items": { - "$ref": "#/components/schemas/Transfer" - } + "name": "chain_get_block_transfers_example_result", + "value": { + "api_version": "1.5.2", + "block_hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", + "transfers": [ + { + "deploy_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "from": "account-hash-0000000000000000000000000000000000000000000000000000000000000000", + "to": null, + "source": "uref-0000000000000000000000000000000000000000000000000000000000000000-000", + "target": "uref-0000000000000000000000000000000000000000000000000000000000000000-000", + "amount": "0", + "gas": "0", + "id": null } - }, - "additionalProperties": false + ] } + } + } + ] + }, + { + "name": "chain_get_state_root_hash", + "summary": "returns a state root hash at a given Block", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block hash.", + "$ref": "#/components/schemas/BlockIdentifier" }, - "examples": [ - { - "name": "chain_get_block_transfers_example", - "params": [ + "required": false + } + ], + "result": { + "name": "chain_get_state_root_hash_result", + "schema": { + "description": "Result for \"chain_get_state_root_hash\" RPC response.", + "type": "object", + "required": [ + "api_version" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "state_root_hash": { + "description": "Hex-encoded hash of the state root.", + "anyOf": [ { - "name": "block_identifier", - "value": { - "Hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" - } - } - ], - "result": { - "name": "chain_get_block_transfers_example_result", - "value": { - "api_version": "1.5.2", - "block_hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", - "transfers": [ - { - "deploy_hash": "0000000000000000000000000000000000000000000000000000000000000000", - "from": "account-hash-0000000000000000000000000000000000000000000000000000000000000000", - "to": null, - "source": "uref-0000000000000000000000000000000000000000000000000000000000000000-000", - "target": "uref-0000000000000000000000000000000000000000000000000000000000000000-000", - "amount": "0", - "gas": "0", - "id": null - } - ] + "$ref": "#/components/schemas/Digest" + }, + { + "type": "null" } - } + ] } - ] - }, + }, + "additionalProperties": false + } + }, + "examples": [ { - "name": "chain_get_state_root_hash", - "summary": "returns a state root hash at a given Block", + "name": "chain_get_state_root_hash_example", "params": [ { "name": "block_identifier", - "schema": { - "description": "The block hash.", - "$ref": "#/components/schemas/BlockIdentifier" - }, - "required": false + "value": { + "Height": 10 + } } ], "result": { - "name": "chain_get_state_root_hash_result", - "schema": { - "description": "Result for \"chain_get_state_root_hash\" RPC response.", - "type": "object", - "required": [ - "api_version" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "state_root_hash": { - "description": "Hex-encoded hash of the state root.", - "anyOf": [ - { - "$ref": "#/components/schemas/Digest" - }, - { - "type": "null" - } - ] - } - }, - "additionalProperties": false + "name": "chain_get_state_root_hash_example_result", + "value": { + "api_version": "1.5.2", + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808" } + } + } + ] + }, + { + "name": "state_get_item", + "summary": "returns a stored value from the network. This RPC is deprecated, use `query_global_state` instead.", + "params": [ + { + "name": "state_root_hash", + "schema": { + "description": "Hash of the state root.", + "$ref": "#/components/schemas/Digest" }, - "examples": [ - { - "name": "chain_get_state_root_hash_example", - "params": [ - { - "name": "block_identifier", - "value": { - "Height": 10 - } - } - ], - "result": { - "name": "chain_get_state_root_hash_example_result", - "value": { - "api_version": "1.5.2", - "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808" - } - } - } - ] + "required": true }, { - "name": "state_get_item", - "summary": "returns a stored value from the network. This RPC is deprecated, use `query_global_state` instead.", + "name": "key", + "schema": { + "description": "`casper_types::Key` as formatted string.", + "type": "string" + }, + "required": true + }, + { + "name": "path", + "schema": { + "description": "The path components starting from the key as base.", + "default": [], + "type": "array", + "items": { + "type": "string" + } + }, + "required": false + } + ], + "result": { + "name": "state_get_item_result", + "schema": { + "description": "Result for \"state_get_item\" RPC response.", + "type": "object", + "required": [ + "api_version", + "merkle_proof", + "stored_value" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "stored_value": { + "description": "The stored value.", + "$ref": "#/components/schemas/StoredValue" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_item_example", "params": [ { "name": "state_root_hash", - "schema": { - "description": "Hash of the state root.", - "$ref": "#/components/schemas/Digest" - }, - "required": true + "value": "0808080808080808080808080808080808080808080808080808080808080808" }, { "name": "key", - "schema": { - "description": "`casper_types::Key` as formatted string.", - "type": "string" - }, - "required": true + "value": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1" }, { "name": "path", - "schema": { - "description": "The path components starting from the key as base.", - "default": [], - "type": "array", - "items": { - "type": "string" - } - }, - "required": false + "value": [ + "inner" + ] } ], "result": { - "name": "state_get_item_result", - "schema": { - "description": "Result for \"state_get_item\" RPC response.", - "type": "object", - "required": [ - "api_version", - "merkle_proof", - "stored_value" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "stored_value": { - "description": "The stored value.", - "$ref": "#/components/schemas/StoredValue" - }, - "merkle_proof": { - "description": "The Merkle proof.", - "type": "string" + "name": "state_get_item_example_result", + "value": { + "api_version": "1.5.2", + "stored_value": { + "CLValue": { + "cl_type": "U64", + "bytes": "0100000000000000", + "parsed": 1 } }, - "additionalProperties": false + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" } + } + } + ] + }, + { + "name": "state_get_balance", + "summary": "returns a purse's balance from the network", + "params": [ + { + "name": "state_root_hash", + "schema": { + "description": "The hash of state root.", + "$ref": "#/components/schemas/Digest" }, - "examples": [ - { - "name": "state_get_item_example", - "params": [ - { - "name": "state_root_hash", - "value": "0808080808080808080808080808080808080808080808080808080808080808" - }, - { - "name": "key", - "value": "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1" - }, - { - "name": "path", - "value": [ - "inner" - ] - } - ], - "result": { - "name": "state_get_item_example_result", - "value": { - "api_version": "1.5.2", - "stored_value": { - "CLValue": { - "cl_type": "U64", - "bytes": "0100000000000000", - "parsed": 1 - } - }, - "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" - } - } - } - ] + "required": true }, { - "name": "state_get_balance", - "summary": "returns a purse's balance from the network", + "name": "purse_uref", + "schema": { + "description": "Formatted URef.", + "type": "string" + }, + "required": true + } + ], + "result": { + "name": "state_get_balance_result", + "schema": { + "description": "Result for \"state_get_balance\" RPC response.", + "type": "object", + "required": [ + "api_version", + "balance_value", + "merkle_proof" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "balance_value": { + "description": "The balance value.", + "$ref": "#/components/schemas/U512" + }, + "merkle_proof": { + "description": "The Merkle proof.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_balance_example", "params": [ { "name": "state_root_hash", - "schema": { - "description": "The hash of state root.", - "$ref": "#/components/schemas/Digest" - }, - "required": true + "value": "0808080808080808080808080808080808080808080808080808080808080808" }, { "name": "purse_uref", - "schema": { - "description": "Formatted URef.", - "type": "string" - }, - "required": true + "value": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007" } ], "result": { - "name": "state_get_balance_result", - "schema": { - "description": "Result for \"state_get_balance\" RPC response.", - "type": "object", - "required": [ - "api_version", - "balance_value", - "merkle_proof" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "balance_value": { - "description": "The balance value.", - "$ref": "#/components/schemas/U512" - }, - "merkle_proof": { - "description": "The Merkle proof.", - "type": "string" - } - }, - "additionalProperties": false + "name": "state_get_balance_example_result", + "value": { + "api_version": "1.5.2", + "balance_value": "123456", + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" } + } + } + ] + }, + { + "name": "chain_get_era_info_by_switch_block", + "summary": "returns an EraInfo from the network", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "$ref": "#/components/schemas/BlockIdentifier" }, - "examples": [ - { - "name": "state_get_balance_example", - "params": [ + "required": false + } + ], + "result": { + "name": "chain_get_era_info_by_switch_block_result", + "schema": { + "description": "Result for \"chain_get_era_info\" RPC response.", + "type": "object", + "required": [ + "api_version" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "era_summary": { + "description": "The era summary.", + "anyOf": [ { - "name": "state_root_hash", - "value": "0808080808080808080808080808080808080808080808080808080808080808" + "$ref": "#/components/schemas/EraSummary" }, { - "name": "purse_uref", - "value": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007" - } - ], - "result": { - "name": "state_get_balance_example_result", - "value": { - "api_version": "1.5.2", - "balance_value": "123456", - "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + "type": "null" } - } + ] } - ] - }, + }, + "additionalProperties": false + } + }, + "examples": [ { - "name": "chain_get_era_info_by_switch_block", - "summary": "returns an EraInfo from the network", + "name": "chain_get_era_info_by_switch_block_example", "params": [ { "name": "block_identifier", - "schema": { - "description": "The block identifier.", - "$ref": "#/components/schemas/BlockIdentifier" - }, - "required": false + "value": { + "Hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" + } } ], "result": { - "name": "chain_get_era_info_by_switch_block_result", - "schema": { - "description": "Result for \"chain_get_era_info\" RPC response.", - "type": "object", - "required": [ - "api_version" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "era_summary": { - "description": "The era summary.", - "anyOf": [ - { - "$ref": "#/components/schemas/EraSummary" - }, - { - "type": "null" - } - ] - } - }, - "additionalProperties": false - } - }, - "examples": [ - { - "name": "chain_get_era_info_by_switch_block_example", - "params": [ - { - "name": "block_identifier", - "value": { - "Hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" - } - } - ], - "result": { - "name": "chain_get_era_info_by_switch_block_example_result", - "value": { - "api_version": "1.5.2", - "era_summary": { - "block_hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", - "era_id": 42, - "stored_value": { - "EraInfo": { - "seigniorage_allocations": [ - { - "Delegator": { - "delegator_public_key": "01e1b46a25baa8a5c28beb3c9cfb79b572effa04076f00befa57eb70b016153f18", - "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", - "amount": "1000" - } - }, - { - "Validator": { - "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", - "amount": "2000" - } - } - ] + "name": "chain_get_era_info_by_switch_block_example_result", + "value": { + "api_version": "1.5.2", + "era_summary": { + "block_hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", + "era_id": 42, + "stored_value": { + "EraInfo": { + "seigniorage_allocations": [ + { + "Delegator": { + "delegator_public_key": "01e1b46a25baa8a5c28beb3c9cfb79b572effa04076f00befa57eb70b016153f18", + "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", + "amount": "1000" + } + }, + { + "Validator": { + "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", + "amount": "2000" + } } - }, - "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", - "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + ] } - } + }, + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" } } - ] - }, + } + } + ] + }, + { + "name": "state_get_auction_info", + "summary": "returns the bids and validators as of either a specific block (by height or hash), or the most recently added block", + "params": [ { - "name": "state_get_auction_info", - "summary": "returns the bids and validators as of either a specific block (by height or hash), or the most recently added block", + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "state_get_auction_info_result", + "schema": { + "description": "Result for \"state_get_auction_info\" RPC response.", + "type": "object", + "required": [ + "api_version", + "auction_state" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "auction_state": { + "description": "The auction state.", + "$ref": "#/components/schemas/AuctionState" + } + }, + "additionalProperties": false + } + }, + "examples": [ + { + "name": "state_get_auction_info_example", "params": [ { "name": "block_identifier", - "schema": { - "description": "The block identifier.", - "$ref": "#/components/schemas/BlockIdentifier" - }, - "required": false + "value": { + "Hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" + } } ], "result": { - "name": "state_get_auction_info_result", - "schema": { - "description": "Result for \"state_get_auction_info\" RPC response.", - "type": "object", - "required": [ - "api_version", - "auction_state" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "auction_state": { - "description": "The auction state.", - "$ref": "#/components/schemas/AuctionState" - } - }, - "additionalProperties": false - } - }, - "examples": [ - { - "name": "state_get_auction_info_example", - "params": [ - { - "name": "block_identifier", - "value": { - "Hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" - } - } - ], - "result": { - "name": "state_get_auction_info_example_result", - "value": { - "api_version": "1.5.2", - "auction_state": { - "state_root_hash": "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", - "block_height": 10, - "era_validators": [ - { - "era_id": 10, - "validator_weights": [ - { - "public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", - "weight": "10" - } - ] - } - ], - "bids": [ + "name": "state_get_auction_info_example_result", + "value": { + "api_version": "1.5.2", + "auction_state": { + "state_root_hash": "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", + "block_height": 10, + "era_validators": [ + { + "era_id": 10, + "validator_weights": [ { "public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", - "bid": { - "bonding_purse": "uref-fafafafafafafafafafafafafafafafafafafafafafafafafafafafafafafafa-007", - "staked_amount": "10", - "delegation_rate": 0, - "delegators": [], - "inactive": false - } + "weight": "10" } ] } - } + ], + "bids": [ + { + "public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "bid": { + "bonding_purse": "uref-fafafafafafafafafafafafafafafafafafafafafafafafafafafafafafafafa-007", + "staked_amount": "10", + "delegation_rate": 0, + "delegators": [], + "inactive": false + } + } + ] } } - ] - }, + } + } + ] + }, + { + "name": "chain_get_era_summary", + "summary": "returns the era summary at either a specific block (by height or hash), or the most recently added block", + "params": [ + { + "name": "block_identifier", + "schema": { + "description": "The block identifier.", + "$ref": "#/components/schemas/BlockIdentifier" + }, + "required": false + } + ], + "result": { + "name": "chain_get_era_summary_result", + "schema": { + "description": "Result for \"chain_get_era_summary\" RPC response.", + "type": "object", + "required": [ + "api_version", + "era_summary" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "era_summary": { + "description": "The era summary.", + "$ref": "#/components/schemas/EraSummary" + } + }, + "additionalProperties": false + } + }, + "examples": [ { - "name": "chain_get_era_summary", - "summary": "returns the era summary at either a specific block (by height or hash), or the most recently added block", + "name": "chain_get_era_summary_example", "params": [ { "name": "block_identifier", - "schema": { - "description": "The block identifier.", - "$ref": "#/components/schemas/BlockIdentifier" - }, - "required": false + "value": { + "Hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" + } } ], "result": { - "name": "chain_get_era_summary_result", - "schema": { - "description": "Result for \"chain_get_era_summary\" RPC response.", - "type": "object", - "required": [ - "api_version", - "era_summary" - ], - "properties": { - "api_version": { - "description": "The RPC API version.", - "type": "string" - }, - "era_summary": { - "description": "The era summary.", - "$ref": "#/components/schemas/EraSummary" - } - }, - "additionalProperties": false - } - }, - "examples": [ - { - "name": "chain_get_era_summary_example", - "params": [ - { - "name": "block_identifier", - "value": { - "Hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb" - } - } - ], - "result": { - "name": "chain_get_era_summary_example_result", - "value": { - "api_version": "1.5.2", - "era_summary": { - "block_hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", - "era_id": 42, - "stored_value": { - "EraInfo": { - "seigniorage_allocations": [ - { - "Delegator": { - "delegator_public_key": "01e1b46a25baa8a5c28beb3c9cfb79b572effa04076f00befa57eb70b016153f18", - "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", - "amount": "1000" - } - }, - { - "Validator": { - "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", - "amount": "2000" - } - } - ] + "name": "chain_get_era_summary_example_result", + "value": { + "api_version": "1.5.2", + "era_summary": { + "block_hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", + "era_id": 42, + "stored_value": { + "EraInfo": { + "seigniorage_allocations": [ + { + "Delegator": { + "delegator_public_key": "01e1b46a25baa8a5c28beb3c9cfb79b572effa04076f00befa57eb70b016153f18", + "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", + "amount": "1000" + } + }, + { + "Validator": { + "validator_public_key": "012a1732addc639ea43a89e25d3ad912e40232156dcaa4b9edfc709f43d2fb0876", + "amount": "2000" + } } - }, - "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", - "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" + ] } - } + }, + "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", + "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" } } - ] + } } - ], - "components": { - "schemas": { - "Deploy": { - "description": "A deploy; an item containing a smart contract along with the requester's signature(s).", - "type": "object", - "required": [ - "approvals", - "hash", - "header", - "payment", - "session" - ], - "properties": { - "hash": { - "$ref": "#/components/schemas/DeployHash" - }, - "header": { - "$ref": "#/components/schemas/DeployHeader" - }, - "payment": { - "$ref": "#/components/schemas/ExecutableDeployItem" - }, - "session": { - "$ref": "#/components/schemas/ExecutableDeployItem" - }, - "approvals": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Approval" - }, - "uniqueItems": true - } - }, - "additionalProperties": false + ] + } + ], + "components": { + "schemas": { + "Deploy": { + "description": "A deploy; an item containing a smart contract along with the requester's signature(s).", + "type": "object", + "required": [ + "approvals", + "hash", + "header", + "payment", + "session" + ], + "properties": { + "hash": { + "$ref": "#/components/schemas/DeployHash" }, - "DeployHash": { - "description": "Hex-encoded deploy hash.", - "allOf": [ - { - "$ref": "#/components/schemas/Digest" - } - ] + "header": { + "$ref": "#/components/schemas/DeployHeader" }, - "Digest": { - "description": "Hex-encoded hash digest.", - "type": "string" + "payment": { + "$ref": "#/components/schemas/ExecutableDeployItem" }, - "DeployHeader": { - "description": "The header portion of a [`Deploy`].", - "type": "object", - "required": [ - "account", - "body_hash", - "chain_name", - "dependencies", - "gas_price", - "timestamp", - "ttl" - ], - "properties": { - "account": { - "$ref": "#/components/schemas/PublicKey" - }, - "timestamp": { - "$ref": "#/components/schemas/Timestamp" - }, - "ttl": { - "$ref": "#/components/schemas/TimeDiff" - }, - "gas_price": { - "type": "integer", - "format": "uint64", - "minimum": 0.0 - }, - "body_hash": { - "$ref": "#/components/schemas/Digest" - }, - "dependencies": { - "type": "array", - "items": { - "$ref": "#/components/schemas/DeployHash" - } - }, - "chain_name": { - "type": "string" - } + "session": { + "$ref": "#/components/schemas/ExecutableDeployItem" + }, + "approvals": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Approval" }, - "additionalProperties": false + "uniqueItems": true + } + }, + "additionalProperties": false + }, + "DeployHash": { + "description": "Hex-encoded deploy hash.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "Digest": { + "description": "Hex-encoded hash digest.", + "type": "string" + }, + "DeployHeader": { + "description": "The header portion of a [`Deploy`].", + "type": "object", + "required": [ + "account", + "body_hash", + "chain_name", + "dependencies", + "gas_price", + "timestamp", + "ttl" + ], + "properties": { + "account": { + "$ref": "#/components/schemas/PublicKey" }, - "PublicKey": { - "description": "Hex-encoded cryptographic public key, including the algorithm tag prefix.", - "type": "string" + "timestamp": { + "$ref": "#/components/schemas/Timestamp" }, - "Timestamp": { - "description": "Timestamp formatted as per RFC 3339", - "type": "integer", - "format": "uint64", - "minimum": 0.0 + "ttl": { + "$ref": "#/components/schemas/TimeDiff" }, - "TimeDiff": { - "description": "Human-readable duration.", + "gas_price": { "type": "integer", "format": "uint64", "minimum": 0.0 }, - "ExecutableDeployItem": { - "description": "Represents possible variants of an executable deploy.", - "anyOf": [ - { - "description": "Executable specified as raw bytes that represent WASM code and an instance of [`RuntimeArgs`].", + "body_hash": { + "$ref": "#/components/schemas/Digest" + }, + "dependencies": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DeployHash" + } + }, + "chain_name": { + "type": "string" + } + }, + "additionalProperties": false + }, + "PublicKey": { + "description": "Hex-encoded cryptographic public key, including the algorithm tag prefix.", + "examples": [ + { + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }, + { + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }, + { + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + } + ], + "type": "string" + }, + "Timestamp": { + "description": "Timestamp formatted as per RFC 3339", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "TimeDiff": { + "description": "Human-readable duration.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "ExecutableDeployItem": { + "description": "Represents possible variants of an executable deploy.", + "anyOf": [ + { + "description": "Executable specified as raw bytes that represent WASM code and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "ModuleBytes" + ], + "properties": { + "ModuleBytes": { "type": "object", "required": [ - "ModuleBytes" + "args", + "module_bytes" ], "properties": { - "ModuleBytes": { - "type": "object", - "required": [ - "args", - "module_bytes" - ], - "properties": { - "module_bytes": { - "description": "Hex-encoded raw Wasm bytes.", - "type": "string" - }, - "args": { - "description": "Runtime arguments.", - "allOf": [ - { - "$ref": "#/components/schemas/RuntimeArgs" - } - ] + "module_bytes": { + "description": "Hex-encoded raw Wasm bytes.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" } - }, - "additionalProperties": false + ] } }, "additionalProperties": false - }, - { - "description": "Stored contract referenced by its [`ContractHash`], entry point and an instance of [`RuntimeArgs`].", + } + }, + "additionalProperties": false + }, + { + "description": "Stored contract referenced by its [`ContractHash`], entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredContractByHash" + ], + "properties": { + "StoredContractByHash": { "type": "object", "required": [ - "StoredContractByHash" + "args", + "entry_point", + "hash" ], "properties": { - "StoredContractByHash": { - "type": "object", - "required": [ - "args", - "entry_point", - "hash" - ], - "properties": { - "hash": { - "description": "Hex-encoded hash.", - "type": "string" - }, - "entry_point": { - "description": "Name of an entry point.", - "type": "string" - }, - "args": { - "description": "Runtime arguments.", - "allOf": [ - { - "$ref": "#/components/schemas/RuntimeArgs" - } - ] + "hash": { + "description": "Hex-encoded hash.", + "type": "string" + }, + "entry_point": { + "description": "Name of an entry point.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" } - }, - "additionalProperties": false + ] } }, "additionalProperties": false - }, - { - "description": "Stored contract referenced by a named key existing in the signer's account context, entry point and an instance of [`RuntimeArgs`].", + } + }, + "additionalProperties": false + }, + { + "description": "Stored contract referenced by a named key existing in the signer's account context, entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredContractByName" + ], + "properties": { + "StoredContractByName": { "type": "object", "required": [ - "StoredContractByName" + "args", + "entry_point", + "name" ], "properties": { - "StoredContractByName": { - "type": "object", - "required": [ - "args", - "entry_point", - "name" - ], - "properties": { - "name": { - "description": "Named key.", - "type": "string" - }, - "entry_point": { - "description": "Name of an entry point.", - "type": "string" - }, - "args": { - "description": "Runtime arguments.", - "allOf": [ - { - "$ref": "#/components/schemas/RuntimeArgs" - } - ] + "name": { + "description": "Named key.", + "type": "string" + }, + "entry_point": { + "description": "Name of an entry point.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" } - }, - "additionalProperties": false + ] } }, "additionalProperties": false - }, - { - "description": "Stored versioned contract referenced by its [`ContractPackageHash`], entry point and an instance of [`RuntimeArgs`].", + } + }, + "additionalProperties": false + }, + { + "description": "Stored versioned contract referenced by its [`ContractPackageHash`], entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredVersionedContractByHash" + ], + "properties": { + "StoredVersionedContractByHash": { "type": "object", "required": [ - "StoredVersionedContractByHash" + "args", + "entry_point", + "hash" ], "properties": { - "StoredVersionedContractByHash": { - "type": "object", - "required": [ - "args", - "entry_point", - "hash" + "hash": { + "description": "Hex-encoded hash.", + "type": "string" + }, + "version": { + "description": "An optional version of the contract to call. It will default to the highest enabled version if no value is specified.", + "type": [ + "integer", + "null" ], - "properties": { - "hash": { - "description": "Hex-encoded hash.", - "type": "string" - }, - "version": { - "description": "An optional version of the contract to call. It will default to the highest enabled version if no value is specified.", - "type": [ - "integer", - "null" - ], - "format": "uint32", - "minimum": 0.0 - }, - "entry_point": { - "description": "Entry point name.", - "type": "string" - }, - "args": { - "description": "Runtime arguments.", - "allOf": [ - { - "$ref": "#/components/schemas/RuntimeArgs" - } - ] + "format": "uint32", + "minimum": 0.0 + }, + "entry_point": { + "description": "Entry point name.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" } - }, - "additionalProperties": false + ] } }, "additionalProperties": false - }, - { - "description": "Stored versioned contract referenced by a named key existing in the signer's account context, entry point and an instance of [`RuntimeArgs`].", + } + }, + "additionalProperties": false + }, + { + "description": "Stored versioned contract referenced by a named key existing in the signer's account context, entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredVersionedContractByName" + ], + "properties": { + "StoredVersionedContractByName": { "type": "object", "required": [ - "StoredVersionedContractByName" + "args", + "entry_point", + "name" ], "properties": { - "StoredVersionedContractByName": { - "type": "object", - "required": [ - "args", - "entry_point", - "name" + "name": { + "description": "Named key.", + "type": "string" + }, + "version": { + "description": "An optional version of the contract to call. It will default to the highest enabled version if no value is specified.", + "type": [ + "integer", + "null" ], - "properties": { - "name": { - "description": "Named key.", - "type": "string" - }, - "version": { - "description": "An optional version of the contract to call. It will default to the highest enabled version if no value is specified.", - "type": [ - "integer", - "null" - ], - "format": "uint32", - "minimum": 0.0 - }, - "entry_point": { - "description": "Entry point name.", - "type": "string" - }, - "args": { - "description": "Runtime arguments.", - "allOf": [ - { - "$ref": "#/components/schemas/RuntimeArgs" - } - ] + "format": "uint32", + "minimum": 0.0 + }, + "entry_point": { + "description": "Entry point name.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" } - }, - "additionalProperties": false + ] } }, "additionalProperties": false - }, - { - "description": "A native transfer which does not contain or reference a WASM code.", + } + }, + "additionalProperties": false + }, + { + "description": "A native transfer which does not contain or reference a WASM code.", + "type": "object", + "required": [ + "Transfer" + ], + "properties": { + "Transfer": { "type": "object", "required": [ - "Transfer" + "args" ], "properties": { - "Transfer": { - "type": "object", - "required": [ - "args" - ], - "properties": { - "args": { - "description": "Runtime arguments.", - "allOf": [ - { - "$ref": "#/components/schemas/RuntimeArgs" - } - ] + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/components/schemas/RuntimeArgs" } - }, - "additionalProperties": false + ] } }, "additionalProperties": false } - ] + }, + "additionalProperties": false + } + ] + }, + "RuntimeArgs": { + "description": "Represents a collection of arguments passed to a smart contract.", + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedArg" + } + }, + "NamedArg": { + "description": "Named arguments to a contract.", + "type": "array", + "items": [ + { + "type": "string" }, - "RuntimeArgs": { - "description": "Represents a collection of arguments passed to a smart contract.", - "type": "array", - "items": { - "$ref": "#/components/schemas/NamedArg" - } + { + "$ref": "#/components/schemas/CLValue" + } + ], + "maxItems": 2, + "minItems": 2 + }, + "CLValue": { + "description": "A Casper value, i.e. a value which can be stored and manipulated by smart contracts.\n\nIt holds the underlying data as a type-erased, serialized `Vec` and also holds the CLType of the underlying data as a separate member.\n\nThe `parsed` field, representing the original value, is a convenience only available when a CLValue is encoded to JSON, and can always be set to null if preferred.", + "type": "object", + "required": [ + "bytes", + "cl_type" + ], + "properties": { + "cl_type": { + "$ref": "#/components/schemas/CLType" }, - "NamedArg": { - "description": "Named arguments to a contract.", - "type": "array", - "items": [ - { - "type": "string" - }, - { - "$ref": "#/components/schemas/CLValue" - } + "bytes": { + "type": "string" + }, + "parsed": true + }, + "additionalProperties": false + }, + "CLType": { + "description": "Casper types, i.e. types which can be stored and manipulated by smart contracts.\n\nProvides a description of the underlying data type of a [`CLValue`](crate::CLValue).", + "anyOf": [ + { + "type": "string", + "enum": [ + "Bool", + "I32", + "I64", + "U8", + "U32", + "U64", + "U128", + "U256", + "U512", + "Unit", + "String", + "Key", + "URef", + "PublicKey", + "Any" + ] + }, + { + "description": "`Option` of a `CLType`.", + "type": "object", + "required": [ + "Option" ], - "maxItems": 2, - "minItems": 2 + "properties": { + "Option": { + "$ref": "#/components/schemas/CLType" + } + }, + "additionalProperties": false }, - "CLValue": { - "description": "A Casper value, i.e. a value which can be stored and manipulated by smart contracts.\n\nIt holds the underlying data as a type-erased, serialized `Vec` and also holds the CLType of the underlying data as a separate member.\n\nThe `parsed` field, representing the original value, is a convenience only available when a CLValue is encoded to JSON, and can always be set to null if preferred.", + { + "description": "Variable-length list of a single `CLType` (comparable to a `Vec`).", "type": "object", "required": [ - "bytes", - "cl_type" + "List" ], "properties": { - "cl_type": { + "List": { "$ref": "#/components/schemas/CLType" - }, - "bytes": { - "type": "string" - }, - "parsed": true + } }, "additionalProperties": false }, - "CLType": { - "description": "Casper types, i.e. types which can be stored and manipulated by smart contracts.\n\nProvides a description of the underlying data type of a [`CLValue`](crate::CLValue).", - "anyOf": [ - { - "type": "string", - "enum": [ - "Bool", - "I32", - "I64", - "U8", - "U32", - "U64", - "U128", - "U256", - "U512", - "Unit", - "String", - "Key", - "URef", - "PublicKey", - "Any" - ] - }, - { - "description": "`Option` of a `CLType`.", + { + "description": "Fixed-length list of a single `CLType` (comparable to a Rust array).", + "type": "object", + "required": [ + "ByteArray" + ], + "properties": { + "ByteArray": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "`Result` with `Ok` and `Err` variants of `CLType`s.", + "type": "object", + "required": [ + "Result" + ], + "properties": { + "Result": { "type": "object", "required": [ - "Option" + "err", + "ok" ], "properties": { - "Option": { + "ok": { + "$ref": "#/components/schemas/CLType" + }, + "err": { "$ref": "#/components/schemas/CLType" } }, "additionalProperties": false - }, - { - "description": "Variable-length list of a single `CLType` (comparable to a `Vec`).", + } + }, + "additionalProperties": false + }, + { + "description": "Map with keys of a single `CLType` and values of a single `CLType`.", + "type": "object", + "required": [ + "Map" + ], + "properties": { + "Map": { "type": "object", "required": [ - "List" + "key", + "value" ], "properties": { - "List": { + "key": { + "$ref": "#/components/schemas/CLType" + }, + "value": { "$ref": "#/components/schemas/CLType" } }, "additionalProperties": false - }, - { - "description": "Fixed-length list of a single `CLType` (comparable to a Rust array).", - "type": "object", - "required": [ - "ByteArray" - ], - "properties": { - "ByteArray": { - "type": "integer", - "format": "uint32", - "minimum": 0.0 - } + } + }, + "additionalProperties": false + }, + { + "description": "1-ary tuple of a `CLType`.", + "type": "object", + "required": [ + "Tuple1" + ], + "properties": { + "Tuple1": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CLType" }, - "additionalProperties": false - }, - { - "description": "`Result` with `Ok` and `Err` variants of `CLType`s.", - "type": "object", - "required": [ - "Result" - ], - "properties": { - "Result": { - "type": "object", - "required": [ - "err", - "ok" - ], - "properties": { - "ok": { - "$ref": "#/components/schemas/CLType" - }, - "err": { - "$ref": "#/components/schemas/CLType" - } - }, - "additionalProperties": false - } + "maxItems": 1, + "minItems": 1 + } + }, + "additionalProperties": false + }, + { + "description": "2-ary tuple of `CLType`s.", + "type": "object", + "required": [ + "Tuple2" + ], + "properties": { + "Tuple2": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CLType" }, - "additionalProperties": false - }, - { - "description": "Map with keys of a single `CLType` and values of a single `CLType`.", - "type": "object", - "required": [ - "Map" - ], - "properties": { - "Map": { - "type": "object", - "required": [ - "key", - "value" - ], - "properties": { - "key": { - "$ref": "#/components/schemas/CLType" - }, - "value": { - "$ref": "#/components/schemas/CLType" - } - }, - "additionalProperties": false - } + "maxItems": 2, + "minItems": 2 + } + }, + "additionalProperties": false + }, + { + "description": "3-ary tuple of `CLType`s.", + "type": "object", + "required": [ + "Tuple3" + ], + "properties": { + "Tuple3": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CLType" }, - "additionalProperties": false - }, + "maxItems": 3, + "minItems": 3 + } + }, + "additionalProperties": false + } + ] + }, + "Approval": { + "description": "A struct containing a signature of a deploy hash and the public key of the signer.", + "type": "object", + "required": [ + "signature", + "signer" + ], + "properties": { + "signer": { + "$ref": "#/components/schemas/PublicKey" + }, + "signature": { + "$ref": "#/components/schemas/Signature" + } + }, + "additionalProperties": false + }, + "Signature": { + "description": "Hex-encoded cryptographic signature, including the algorithm tag prefix.", + "type": "string" + }, + "JsonExecutionResult": { + "description": "The execution result of a single deploy.", + "type": "object", + "required": [ + "block_hash", + "result" + ], + "properties": { + "block_hash": { + "description": "The block hash.", + "allOf": [ { - "description": "1-ary tuple of a `CLType`.", - "type": "object", - "required": [ - "Tuple1" - ], - "properties": { - "Tuple1": { - "type": "array", - "items": { - "$ref": "#/components/schemas/CLType" - }, - "maxItems": 1, - "minItems": 1 - } - }, - "additionalProperties": false - }, + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "result": { + "description": "Execution result.", + "allOf": [ { - "description": "2-ary tuple of `CLType`s.", + "$ref": "#/components/schemas/ExecutionResult" + } + ] + } + }, + "additionalProperties": false + }, + "BlockHash": { + "description": "A cryptographic hash identifying a [`Block`](struct.Block.html).", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "ExecutionResult": { + "description": "The result of executing a single deploy.", + "anyOf": [ + { + "description": "The result of a failed execution.", + "type": "object", + "required": [ + "Failure" + ], + "properties": { + "Failure": { "type": "object", "required": [ - "Tuple2" + "cost", + "effect", + "error_message", + "transfers" ], "properties": { - "Tuple2": { + "effect": { + "description": "The effect of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/ExecutionEffect" + } + ] + }, + "transfers": { + "description": "A record of Transfers performed while executing the deploy.", "type": "array", "items": { - "$ref": "#/components/schemas/CLType" - }, - "maxItems": 2, - "minItems": 2 + "$ref": "#/components/schemas/TransferAddr" + } + }, + "cost": { + "description": "The cost of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "error_message": { + "description": "The error message associated with executing the deploy.", + "type": "string" } }, "additionalProperties": false - }, - { - "description": "3-ary tuple of `CLType`s.", + } + }, + "additionalProperties": false + }, + { + "description": "The result of a successful execution.", + "type": "object", + "required": [ + "Success" + ], + "properties": { + "Success": { "type": "object", "required": [ - "Tuple3" + "cost", + "effect", + "transfers" ], "properties": { - "Tuple3": { + "effect": { + "description": "The effect of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/ExecutionEffect" + } + ] + }, + "transfers": { + "description": "A record of Transfers performed while executing the deploy.", "type": "array", "items": { - "$ref": "#/components/schemas/CLType" - }, - "maxItems": 3, - "minItems": 3 + "$ref": "#/components/schemas/TransferAddr" + } + }, + "cost": { + "description": "The cost of executing the deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] } }, "additionalProperties": false } + }, + "additionalProperties": false + } + ] + }, + "ExecutionEffect": { + "description": "The journal of execution transforms from a single deploy.", + "type": "object", + "required": [ + "operations", + "transforms" + ], + "properties": { + "operations": { + "description": "The resulting operations.", + "type": "array", + "items": { + "$ref": "#/components/schemas/Operation" + } + }, + "transforms": { + "description": "The journal of execution transforms.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransformEntry" + } + } + }, + "additionalProperties": false + }, + "Operation": { + "description": "An operation performed while executing a deploy.", + "type": "object", + "required": [ + "key", + "kind" + ], + "properties": { + "key": { + "description": "The formatted string of the `Key`.", + "type": "string" + }, + "kind": { + "description": "The type of operation.", + "allOf": [ + { + "$ref": "#/components/schemas/OpKind" + } + ] + } + }, + "additionalProperties": false + }, + "OpKind": { + "description": "The type of operation performed while executing a deploy.", + "type": "string", + "enum": [ + "Read", + "Write", + "Add", + "NoOp" + ] + }, + "TransformEntry": { + "description": "A transformation performed while executing a deploy.", + "type": "object", + "required": [ + "key", + "transform" + ], + "properties": { + "key": { + "description": "The formatted string of the `Key`.", + "type": "string" + }, + "transform": { + "description": "The transformation.", + "allOf": [ + { + "$ref": "#/components/schemas/Transform" + } + ] + } + }, + "additionalProperties": false + }, + "Transform": { + "description": "The actual transformation performed while executing a deploy.", + "anyOf": [ + { + "type": "string", + "enum": [ + "Identity", + "WriteContractWasm", + "WriteContract", + "WriteContractPackage" ] }, - "Approval": { - "description": "A struct containing a signature of a deploy hash and the public key of the signer.", + { + "description": "Writes the given CLValue to global state.", "type": "object", "required": [ - "signature", - "signer" + "WriteCLValue" ], "properties": { - "signer": { - "$ref": "#/components/schemas/PublicKey" - }, - "signature": { - "$ref": "#/components/schemas/Signature" + "WriteCLValue": { + "$ref": "#/components/schemas/CLValue" } }, "additionalProperties": false }, - "Signature": { - "description": "Hex-encoded cryptographic signature, including the algorithm tag prefix.", - "type": "string" - }, - "JsonExecutionResult": { - "description": "The execution result of a single deploy.", + { + "description": "Writes the given Account to global state.", "type": "object", "required": [ - "block_hash", - "result" + "WriteAccount" ], "properties": { - "block_hash": { - "description": "The block hash.", - "allOf": [ - { - "$ref": "#/components/schemas/BlockHash" - } - ] - }, - "result": { - "description": "Execution result.", - "allOf": [ - { - "$ref": "#/components/schemas/ExecutionResult" - } - ] + "WriteAccount": { + "$ref": "#/components/schemas/AccountHash" } }, "additionalProperties": false }, - "BlockHash": { - "description": "A cryptographic hash identifying a [`Block`](struct.Block.html).", - "allOf": [ - { - "$ref": "#/components/schemas/Digest" + { + "description": "Writes the given DeployInfo to global state.", + "type": "object", + "required": [ + "WriteDeployInfo" + ], + "properties": { + "WriteDeployInfo": { + "$ref": "#/components/schemas/DeployInfo" } - ] + }, + "additionalProperties": false }, - "ExecutionResult": { - "description": "The result of executing a single deploy.", - "anyOf": [ - { - "description": "The result of a failed execution.", - "type": "object", - "required": [ - "Failure" - ], - "properties": { - "Failure": { - "type": "object", - "required": [ - "cost", - "effect", - "error_message", - "transfers" - ], - "properties": { - "effect": { - "description": "The effect of executing the deploy.", - "allOf": [ - { - "$ref": "#/components/schemas/ExecutionEffect" - } - ] - }, - "transfers": { - "description": "A record of Transfers performed while executing the deploy.", - "type": "array", - "items": { - "$ref": "#/components/schemas/TransferAddr" - } - }, - "cost": { - "description": "The cost of executing the deploy.", - "allOf": [ - { - "$ref": "#/components/schemas/U512" - } - ] - }, - "error_message": { - "description": "The error message associated with executing the deploy.", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - }, - { - "description": "The result of a successful execution.", - "type": "object", - "required": [ - "Success" - ], - "properties": { - "Success": { - "type": "object", - "required": [ - "cost", - "effect", - "transfers" - ], - "properties": { - "effect": { - "description": "The effect of executing the deploy.", - "allOf": [ - { - "$ref": "#/components/schemas/ExecutionEffect" - } - ] - }, - "transfers": { - "description": "A record of Transfers performed while executing the deploy.", - "type": "array", - "items": { - "$ref": "#/components/schemas/TransferAddr" - } - }, - "cost": { - "description": "The cost of executing the deploy.", - "allOf": [ - { - "$ref": "#/components/schemas/U512" - } - ] - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false + { + "description": "Writes the given EraInfo to global state.", + "type": "object", + "required": [ + "WriteEraInfo" + ], + "properties": { + "WriteEraInfo": { + "$ref": "#/components/schemas/EraInfo" } - ] + }, + "additionalProperties": false }, - "ExecutionEffect": { - "description": "The journal of execution transforms from a single deploy.", + { + "description": "Writes the given Transfer to global state.", "type": "object", "required": [ - "operations", - "transforms" + "WriteTransfer" ], "properties": { - "operations": { - "description": "The resulting operations.", - "type": "array", - "items": { - "$ref": "#/components/schemas/Operation" - } - }, - "transforms": { - "description": "The journal of execution transforms.", - "type": "array", - "items": { - "$ref": "#/components/schemas/TransformEntry" - } + "WriteTransfer": { + "$ref": "#/components/schemas/Transfer" } }, "additionalProperties": false }, - "Operation": { - "description": "An operation performed while executing a deploy.", + { + "description": "Writes the given Bid to global state.", "type": "object", "required": [ - "key", - "kind" + "WriteBid" ], "properties": { - "key": { - "description": "The formatted string of the `Key`.", - "type": "string" - }, - "kind": { - "description": "The type of operation.", - "allOf": [ - { - "$ref": "#/components/schemas/OpKind" - } - ] + "WriteBid": { + "$ref": "#/components/schemas/Bid" } }, "additionalProperties": false }, - "OpKind": { - "description": "The type of operation performed while executing a deploy.", - "type": "string", - "enum": [ - "Read", - "Write", - "Add", - "NoOp" - ] + { + "description": "Writes the given Withdraw to global state.", + "type": "object", + "required": [ + "WriteWithdraw" + ], + "properties": { + "WriteWithdraw": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WithdrawPurse" + } + } + }, + "additionalProperties": false }, - "TransformEntry": { - "description": "A transformation performed while executing a deploy.", + { + "description": "Adds the given `i32`.", "type": "object", "required": [ - "key", - "transform" + "AddInt32" ], "properties": { - "key": { - "description": "The formatted string of the `Key`.", - "type": "string" - }, - "transform": { - "description": "The transformation.", - "allOf": [ - { - "$ref": "#/components/schemas/Transform" - } - ] + "AddInt32": { + "type": "integer", + "format": "int32" } }, "additionalProperties": false }, - "Transform": { - "description": "The actual transformation performed while executing a deploy.", - "anyOf": [ - { - "type": "string", - "enum": [ - "Identity", - "WriteContractWasm", - "WriteContract", - "WriteContractPackage" - ] - }, - { - "description": "Writes the given CLValue to global state.", - "type": "object", - "required": [ - "WriteCLValue" - ], - "properties": { - "WriteCLValue": { - "$ref": "#/components/schemas/CLValue" - } - }, - "additionalProperties": false - }, - { - "description": "Writes the given Account to global state.", - "type": "object", - "required": [ - "WriteAccount" - ], - "properties": { - "WriteAccount": { - "$ref": "#/components/schemas/AccountHash" - } - }, - "additionalProperties": false - }, - { - "description": "Writes the given DeployInfo to global state.", - "type": "object", - "required": [ - "WriteDeployInfo" - ], - "properties": { - "WriteDeployInfo": { - "$ref": "#/components/schemas/DeployInfo" - } - }, - "additionalProperties": false - }, - { - "description": "Writes the given EraInfo to global state.", - "type": "object", - "required": [ - "WriteEraInfo" - ], - "properties": { - "WriteEraInfo": { - "$ref": "#/components/schemas/EraInfo" - } - }, - "additionalProperties": false - }, - { - "description": "Writes the given Transfer to global state.", - "type": "object", - "required": [ - "WriteTransfer" - ], - "properties": { - "WriteTransfer": { - "$ref": "#/components/schemas/Transfer" - } - }, - "additionalProperties": false - }, - { - "description": "Writes the given Bid to global state.", - "type": "object", - "required": [ - "WriteBid" - ], - "properties": { - "WriteBid": { - "$ref": "#/components/schemas/Bid" - } - }, - "additionalProperties": false - }, - { - "description": "Writes the given Withdraw to global state.", - "type": "object", - "required": [ - "WriteWithdraw" - ], - "properties": { - "WriteWithdraw": { - "type": "array", - "items": { - "$ref": "#/components/schemas/WithdrawPurse" - } - } - }, - "additionalProperties": false - }, - { - "description": "Adds the given `i32`.", - "type": "object", - "required": [ - "AddInt32" - ], - "properties": { - "AddInt32": { - "type": "integer", - "format": "int32" - } - }, - "additionalProperties": false - }, - { - "description": "Adds the given `u64`.", - "type": "object", - "required": [ - "AddUInt64" - ], - "properties": { - "AddUInt64": { - "type": "integer", - "format": "uint64", - "minimum": 0.0 - } - }, - "additionalProperties": false - }, - { - "description": "Adds the given `U128`.", - "type": "object", - "required": [ - "AddUInt128" - ], - "properties": { - "AddUInt128": { - "$ref": "#/components/schemas/U128" - } - }, - "additionalProperties": false - }, - { - "description": "Adds the given `U256`.", - "type": "object", - "required": [ - "AddUInt256" - ], - "properties": { - "AddUInt256": { - "$ref": "#/components/schemas/U256" - } - }, - "additionalProperties": false - }, - { - "description": "Adds the given `U512`.", - "type": "object", - "required": [ - "AddUInt512" - ], - "properties": { - "AddUInt512": { - "$ref": "#/components/schemas/U512" - } - }, - "additionalProperties": false - }, - { - "description": "Adds the given collection of named keys.", - "type": "object", - "required": [ - "AddKeys" - ], - "properties": { - "AddKeys": { - "type": "array", - "items": { - "$ref": "#/components/schemas/NamedKey" - } - } - }, - "additionalProperties": false - }, - { - "description": "A failed transformation, containing an error message.", - "type": "object", - "required": [ - "Failure" - ], - "properties": { - "Failure": { - "type": "string" - } - }, - "additionalProperties": false - }, - { - "description": "Writes the given Unbonding to global state.", - "type": "object", - "required": [ - "WriteUnbonding" - ], - "properties": { - "WriteUnbonding": { - "type": "array", - "items": { - "$ref": "#/components/schemas/UnbondingPurse" - } - } - }, - "additionalProperties": false + { + "description": "Adds the given `u64`.", + "type": "object", + "required": [ + "AddUInt64" + ], + "properties": { + "AddUInt64": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 } - ] - }, - "AccountHash": { - "description": "Hex-encoded account hash.", - "type": "string" + }, + "additionalProperties": false }, - "DeployInfo": { - "description": "Information relating to the given Deploy.", + { + "description": "Adds the given `U128`.", "type": "object", "required": [ - "deploy_hash", - "from", - "gas", - "source", - "transfers" + "AddUInt128" ], "properties": { - "deploy_hash": { - "description": "The relevant Deploy.", - "allOf": [ - { - "$ref": "#/components/schemas/DeployHash" - } - ] - }, - "transfers": { - "description": "Transfers performed by the Deploy.", - "type": "array", - "items": { - "$ref": "#/components/schemas/TransferAddr" - } - }, - "from": { - "description": "Account identifier of the creator of the Deploy.", - "allOf": [ - { - "$ref": "#/components/schemas/AccountHash" - } - ] - }, - "source": { - "description": "Source purse used for payment of the Deploy.", - "allOf": [ - { - "$ref": "#/components/schemas/URef" - } - ] - }, - "gas": { - "description": "Gas cost of executing the Deploy.", - "allOf": [ - { - "$ref": "#/components/schemas/U512" - } - ] + "AddUInt128": { + "$ref": "#/components/schemas/U128" } }, "additionalProperties": false }, - "TransferAddr": { - "description": "Hex-encoded transfer address.", - "type": "string" - }, - "URef": { - "description": "Hex-encoded, formatted URef.", - "type": "string" + { + "description": "Adds the given `U256`.", + "type": "object", + "required": [ + "AddUInt256" + ], + "properties": { + "AddUInt256": { + "$ref": "#/components/schemas/U256" + } + }, + "additionalProperties": false }, - "U512": { - "description": "Decimal representation of a 512-bit integer.", - "type": "string" + { + "description": "Adds the given `U512`.", + "type": "object", + "required": [ + "AddUInt512" + ], + "properties": { + "AddUInt512": { + "$ref": "#/components/schemas/U512" + } + }, + "additionalProperties": false }, - "EraInfo": { - "description": "Auction metadata. Intended to be recorded at each era.", + { + "description": "Adds the given collection of named keys.", "type": "object", "required": [ - "seigniorage_allocations" + "AddKeys" ], "properties": { - "seigniorage_allocations": { + "AddKeys": { "type": "array", "items": { - "$ref": "#/components/schemas/SeigniorageAllocation" + "$ref": "#/components/schemas/NamedKey" } } }, "additionalProperties": false }, - "SeigniorageAllocation": { - "description": "Information about a seigniorage allocation", - "anyOf": [ - { - "description": "Info about a seigniorage allocation for a validator", - "type": "object", - "required": [ - "Validator" - ], - "properties": { - "Validator": { - "type": "object", - "required": [ - "amount", - "validator_public_key" - ], - "properties": { - "validator_public_key": { - "description": "Validator's public key", - "allOf": [ - { - "$ref": "#/components/schemas/PublicKey" - } - ] - }, - "amount": { - "description": "Allocated amount", - "allOf": [ - { - "$ref": "#/components/schemas/U512" - } - ] - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - }, - { - "description": "Info about a seigniorage allocation for a delegator", - "type": "object", - "required": [ - "Delegator" - ], - "properties": { - "Delegator": { - "type": "object", - "required": [ - "amount", - "delegator_public_key", - "validator_public_key" - ], - "properties": { - "delegator_public_key": { - "description": "Delegator's public key", - "allOf": [ - { - "$ref": "#/components/schemas/PublicKey" - } - ] - }, - "validator_public_key": { - "description": "Validator's public key", - "allOf": [ - { - "$ref": "#/components/schemas/PublicKey" - } - ] - }, - "amount": { - "description": "Allocated amount", - "allOf": [ - { - "$ref": "#/components/schemas/U512" - } - ] - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false + { + "description": "A failed transformation, containing an error message.", + "type": "object", + "required": [ + "Failure" + ], + "properties": { + "Failure": { + "type": "string" } - ] + }, + "additionalProperties": false }, - "Transfer": { - "description": "Represents a transfer from one purse to another", + { + "description": "Writes the given Unbonding to global state.", "type": "object", "required": [ - "amount", - "deploy_hash", - "from", - "gas", - "source", - "target" + "WriteUnbonding" ], "properties": { - "deploy_hash": { - "description": "Deploy that created the transfer", - "allOf": [ - { - "$ref": "#/components/schemas/DeployHash" - } - ] - }, - "from": { - "description": "Account from which transfer was executed", - "allOf": [ - { - "$ref": "#/components/schemas/AccountHash" - } - ] - }, - "to": { - "description": "Account to which funds are transferred", - "anyOf": [ - { - "$ref": "#/components/schemas/AccountHash" - }, - { - "type": "null" - } - ] - }, - "source": { - "description": "Source purse", - "allOf": [ - { - "$ref": "#/components/schemas/URef" - } - ] - }, - "target": { - "description": "Target purse", - "allOf": [ - { - "$ref": "#/components/schemas/URef" - } - ] - }, - "amount": { - "description": "Transfer amount", - "allOf": [ - { - "$ref": "#/components/schemas/U512" - } - ] - }, - "gas": { - "description": "Gas", - "allOf": [ - { - "$ref": "#/components/schemas/U512" - } - ] - }, - "id": { - "description": "User-defined id", - "type": [ - "integer", - "null" - ], - "format": "uint64", - "minimum": 0.0 + "WriteUnbonding": { + "type": "array", + "items": { + "$ref": "#/components/schemas/UnbondingPurse" + } } }, "additionalProperties": false + } + ] + }, + "AccountHash": { + "description": "Hex-encoded account hash.", + "type": "string" + }, + "DeployInfo": { + "description": "Information relating to the given Deploy.", + "type": "object", + "required": [ + "deploy_hash", + "from", + "gas", + "source", + "transfers" + ], + "properties": { + "deploy_hash": { + "description": "The relevant Deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/DeployHash" + } + ] + }, + "transfers": { + "description": "Transfers performed by the Deploy.", + "type": "array", + "items": { + "$ref": "#/components/schemas/TransferAddr" + } + }, + "from": { + "description": "Account identifier of the creator of the Deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/AccountHash" + } + ] + }, + "source": { + "description": "Source purse used for payment of the Deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] }, - "Bid": { - "description": "An entry in the validator map.", + "gas": { + "description": "Gas cost of executing the Deploy.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + }, + "TransferAddr": { + "description": "Hex-encoded transfer address.", + "type": "string" + }, + "URef": { + "description": "Hex-encoded, formatted URef.", + "type": "string" + }, + "U512": { + "description": "Decimal representation of a 512-bit integer.", + "type": "string" + }, + "EraInfo": { + "description": "Auction metadata. Intended to be recorded at each era.", + "type": "object", + "required": [ + "seigniorage_allocations" + ], + "properties": { + "seigniorage_allocations": { + "type": "array", + "items": { + "$ref": "#/components/schemas/SeigniorageAllocation" + } + } + }, + "additionalProperties": false + }, + "SeigniorageAllocation": { + "description": "Information about a seigniorage allocation", + "anyOf": [ + { + "description": "Info about a seigniorage allocation for a validator", "type": "object", "required": [ - "bonding_purse", - "delegation_rate", - "delegators", - "inactive", - "staked_amount", - "validator_public_key" + "Validator" ], "properties": { - "validator_public_key": { - "description": "Validator public key", - "allOf": [ - { - "$ref": "#/components/schemas/PublicKey" - } - ] - }, - "bonding_purse": { - "description": "The purse that was used for bonding.", - "allOf": [ - { - "$ref": "#/components/schemas/URef" - } - ] - }, - "staked_amount": { - "description": "The amount of tokens staked by a validator (not including delegators).", - "allOf": [ - { - "$ref": "#/components/schemas/U512" - } - ] - }, - "delegation_rate": { - "description": "Delegation rate", - "type": "integer", - "format": "uint8", - "minimum": 0.0 - }, - "vesting_schedule": { - "description": "Vesting schedule for a genesis validator. `None` if non-genesis validator.", - "anyOf": [ - { - "$ref": "#/components/schemas/VestingSchedule" + "Validator": { + "type": "object", + "required": [ + "amount", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator's public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] }, - { - "type": "null" + "amount": { + "description": "Allocated amount", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] } - ] - }, - "delegators": { - "description": "This validator's delegators, indexed by their public keys", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/Delegator" - } - }, - "inactive": { - "description": "`true` if validator has been \"evicted\"", - "type": "boolean" + }, + "additionalProperties": false } }, "additionalProperties": false }, - "VestingSchedule": { + { + "description": "Info about a seigniorage allocation for a delegator", "type": "object", "required": [ - "initial_release_timestamp_millis" + "Delegator" ], "properties": { - "initial_release_timestamp_millis": { - "type": "integer", - "format": "uint64", - "minimum": 0.0 - }, - "locked_amounts": { - "type": [ - "array", - "null" + "Delegator": { + "type": "object", + "required": [ + "amount", + "delegator_public_key", + "validator_public_key" ], - "items": { - "$ref": "#/components/schemas/U512" + "properties": { + "delegator_public_key": { + "description": "Delegator's public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "validator_public_key": { + "description": "Validator's public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "amount": { + "description": "Allocated amount", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } }, - "maxItems": 14, - "minItems": 14 + "additionalProperties": false } }, "additionalProperties": false + } + ] + }, + "Transfer": { + "description": "Represents a transfer from one purse to another", + "type": "object", + "required": [ + "amount", + "deploy_hash", + "from", + "gas", + "source", + "target" + ], + "properties": { + "deploy_hash": { + "description": "Deploy that created the transfer", + "allOf": [ + { + "$ref": "#/components/schemas/DeployHash" + } + ] }, - "Delegator": { - "description": "Represents a party delegating their stake to a validator (or \"delegatee\")", - "type": "object", - "required": [ - "bonding_purse", - "delegator_public_key", - "staked_amount", - "validator_public_key" - ], - "properties": { - "delegator_public_key": { - "$ref": "#/components/schemas/PublicKey" - }, - "staked_amount": { - "$ref": "#/components/schemas/U512" + "from": { + "description": "Account from which transfer was executed", + "allOf": [ + { + "$ref": "#/components/schemas/AccountHash" + } + ] + }, + "to": { + "description": "Account to which funds are transferred", + "anyOf": [ + { + "$ref": "#/components/schemas/AccountHash" }, - "bonding_purse": { + { + "type": "null" + } + ] + }, + "source": { + "description": "Source purse", + "allOf": [ + { "$ref": "#/components/schemas/URef" - }, - "validator_public_key": { - "$ref": "#/components/schemas/PublicKey" - }, - "vesting_schedule": { - "anyOf": [ - { - "$ref": "#/components/schemas/VestingSchedule" - }, - { - "type": "null" - } - ] } - }, - "additionalProperties": false + ] }, - "WithdrawPurse": { - "description": "A withdraw purse, a legacy structure.", - "type": "object", - "required": [ - "amount", - "bonding_purse", - "era_of_creation", - "unbonder_public_key", - "validator_public_key" - ], - "properties": { - "bonding_purse": { - "description": "Bonding Purse", - "allOf": [ - { - "$ref": "#/components/schemas/URef" - } - ] - }, - "validator_public_key": { - "description": "Validators public key.", - "allOf": [ - { - "$ref": "#/components/schemas/PublicKey" - } - ] - }, - "unbonder_public_key": { - "description": "Unbonders public key.", - "allOf": [ - { - "$ref": "#/components/schemas/PublicKey" - } - ] - }, - "era_of_creation": { - "description": "Era in which this unbonding request was created.", - "allOf": [ - { - "$ref": "#/components/schemas/EraId" - } - ] - }, - "amount": { - "description": "Unbonding Amount.", - "allOf": [ - { - "$ref": "#/components/schemas/U512" - } - ] + "target": { + "description": "Target purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" } - }, - "additionalProperties": false + ] }, - "EraId": { - "description": "Era ID newtype.", - "type": "integer", + "amount": { + "description": "Transfer amount", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "gas": { + "description": "Gas", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "id": { + "description": "User-defined id", + "type": [ + "integer", + "null" + ], "format": "uint64", "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "Bid": { + "description": "An entry in the validator map.", + "type": "object", + "required": [ + "bonding_purse", + "delegation_rate", + "delegators", + "inactive", + "staked_amount", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator public key", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] }, - "U128": { - "description": "Decimal representation of a 128-bit integer.", - "type": "string" + "bonding_purse": { + "description": "The purse that was used for bonding.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] }, - "U256": { - "description": "Decimal representation of a 256-bit integer.", - "type": "string" + "staked_amount": { + "description": "The amount of tokens staked by a validator (not including delegators).", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] }, - "NamedKey": { - "description": "A named key.", - "type": "object", - "required": [ - "key", - "name" - ], - "properties": { - "name": { - "description": "The name of the entry.", - "type": "string" + "delegation_rate": { + "description": "Delegation rate", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "vesting_schedule": { + "description": "Vesting schedule for a genesis validator. `None` if non-genesis validator.", + "anyOf": [ + { + "$ref": "#/components/schemas/VestingSchedule" }, - "key": { - "description": "The value of the entry: a casper `Key` type.", - "type": "string" + { + "type": "null" } - }, - "additionalProperties": false + ] }, - "UnbondingPurse": { - "description": "Unbonding purse.", + "delegators": { + "description": "This validator's delegators, indexed by their public keys", "type": "object", - "required": [ - "amount", - "bonding_purse", - "era_of_creation", - "unbonder_public_key", - "validator_public_key" - ], - "properties": { - "bonding_purse": { - "description": "Bonding Purse", - "allOf": [ - { - "$ref": "#/components/schemas/URef" - } - ] - }, - "validator_public_key": { - "description": "Validators public key.", - "allOf": [ - { - "$ref": "#/components/schemas/PublicKey" - } - ] - }, - "unbonder_public_key": { - "description": "Unbonders public key.", - "allOf": [ - { - "$ref": "#/components/schemas/PublicKey" - } - ] - }, - "era_of_creation": { - "description": "Era in which this unbonding request was created.", - "allOf": [ - { - "$ref": "#/components/schemas/EraId" - } - ] - }, - "amount": { - "description": "Unbonding Amount.", - "allOf": [ - { - "$ref": "#/components/schemas/U512" - } - ] - }, - "new_validator": { - "description": "The validator public key to re-delegate to.", - "anyOf": [ - { - "$ref": "#/components/schemas/PublicKey" - }, - { - "type": "null" - } - ] - } + "additionalProperties": { + "$ref": "#/components/schemas/Delegator" + } + }, + "inactive": { + "description": "`true` if validator has been \"evicted\"", + "type": "boolean" + } + }, + "additionalProperties": false + }, + "VestingSchedule": { + "type": "object", + "required": [ + "initial_release_timestamp_millis" + ], + "properties": { + "initial_release_timestamp_millis": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "locked_amounts": { + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/components/schemas/U512" }, - "additionalProperties": false + "maxItems": 14, + "minItems": 14 + } + }, + "additionalProperties": false + }, + "Delegator": { + "description": "Represents a party delegating their stake to a validator (or \"delegatee\")", + "type": "object", + "required": [ + "bonding_purse", + "delegator_public_key", + "staked_amount", + "validator_public_key" + ], + "properties": { + "delegator_public_key": { + "$ref": "#/components/schemas/PublicKey" }, - "BlockIdentifier": { - "description": "Identifier for possible ways to retrieve a block.", + "staked_amount": { + "$ref": "#/components/schemas/U512" + }, + "bonding_purse": { + "$ref": "#/components/schemas/URef" + }, + "validator_public_key": { + "$ref": "#/components/schemas/PublicKey" + }, + "vesting_schedule": { "anyOf": [ { - "description": "Identify and retrieve the block with its hash.", - "type": "object", - "required": [ - "Hash" - ], - "properties": { - "Hash": { - "$ref": "#/components/schemas/BlockHash" - } - }, - "additionalProperties": false + "$ref": "#/components/schemas/VestingSchedule" }, { - "description": "Identify and retrieve the block with its height.", - "type": "object", - "required": [ - "Height" - ], - "properties": { - "Height": { - "type": "integer", - "format": "uint64", - "minimum": 0.0 - } - }, - "additionalProperties": false + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "WithdrawPurse": { + "description": "A withdraw purse, a legacy structure.", + "type": "object", + "required": [ + "amount", + "bonding_purse", + "era_of_creation", + "unbonder_public_key", + "validator_public_key" + ], + "properties": { + "bonding_purse": { + "description": "Bonding Purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "validator_public_key": { + "description": "Validators public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "unbonder_public_key": { + "description": "Unbonders public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "era_of_creation": { + "description": "Era in which this unbonding request was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "amount": { + "description": "Unbonding Amount.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + } + }, + "additionalProperties": false + }, + "EraId": { + "description": "Era ID newtype.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "U128": { + "description": "Decimal representation of a 128-bit integer.", + "type": "string" + }, + "U256": { + "description": "Decimal representation of a 256-bit integer.", + "type": "string" + }, + "NamedKey": { + "description": "A named key.", + "type": "object", + "required": [ + "key", + "name" + ], + "properties": { + "name": { + "description": "The name of the entry.", + "type": "string" + }, + "key": { + "description": "The value of the entry: a casper `Key` type.", + "type": "string" + } + }, + "additionalProperties": false + }, + "UnbondingPurse": { + "description": "Unbonding purse.", + "type": "object", + "required": [ + "amount", + "bonding_purse", + "era_of_creation", + "unbonder_public_key", + "validator_public_key" + ], + "properties": { + "bonding_purse": { + "description": "Bonding Purse", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "validator_public_key": { + "description": "Validators public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "unbonder_public_key": { + "description": "Unbonders public key.", + "allOf": [ + { + "$ref": "#/components/schemas/PublicKey" + } + ] + }, + "era_of_creation": { + "description": "Era in which this unbonding request was created.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" } ] }, - "Account": { - "description": "Structure representing a user's account, stored in global state.", + "amount": { + "description": "Unbonding Amount.", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "new_validator": { + "description": "The validator public key to re-delegate to.", + "anyOf": [ + { + "$ref": "#/components/schemas/PublicKey" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "BlockIdentifier": { + "description": "Identifier for possible ways to retrieve a block.", + "anyOf": [ + { + "description": "Identify and retrieve the block with its hash.", "type": "object", "required": [ - "account_hash", - "action_thresholds", - "associated_keys", - "main_purse", - "named_keys" + "Hash" ], "properties": { - "account_hash": { - "$ref": "#/components/schemas/AccountHash" - }, - "named_keys": { - "type": "array", - "items": { - "$ref": "#/components/schemas/NamedKey" - } - }, - "main_purse": { - "$ref": "#/components/schemas/URef" - }, - "associated_keys": { - "type": "array", - "items": { - "$ref": "#/components/schemas/AssociatedKey" - } - }, - "action_thresholds": { - "$ref": "#/components/schemas/ActionThresholds" + "Hash": { + "$ref": "#/components/schemas/BlockHash" } }, "additionalProperties": false }, - "AssociatedKey": { + { + "description": "Identify and retrieve the block with its height.", "type": "object", "required": [ - "account_hash", - "weight" + "Height" ], "properties": { - "account_hash": { - "$ref": "#/components/schemas/AccountHash" - }, - "weight": { + "Height": { "type": "integer", - "format": "uint8", + "format": "uint64", "minimum": 0.0 } }, "additionalProperties": false + } + ] + }, + "Account": { + "description": "Structure representing a user's account, stored in global state.", + "type": "object", + "required": [ + "account_hash", + "action_thresholds", + "associated_keys", + "main_purse", + "named_keys" + ], + "properties": { + "account_hash": { + "$ref": "#/components/schemas/AccountHash" + }, + "named_keys": { + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedKey" + } + }, + "main_purse": { + "$ref": "#/components/schemas/URef" + }, + "associated_keys": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AssociatedKey" + } + }, + "action_thresholds": { + "$ref": "#/components/schemas/ActionThresholds" + } + }, + "additionalProperties": false + }, + "AssociatedKey": { + "type": "object", + "required": [ + "account_hash", + "weight" + ], + "properties": { + "account_hash": { + "$ref": "#/components/schemas/AccountHash" + }, + "weight": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "ActionThresholds": { + "description": "Thresholds that have to be met when executing an action of a certain type.", + "type": "object", + "required": [ + "deployment", + "key_management" + ], + "properties": { + "deployment": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 }, - "ActionThresholds": { - "description": "Thresholds that have to be met when executing an action of a certain type.", + "key_management": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "DictionaryIdentifier": { + "description": "Options for dictionary item lookups.", + "anyOf": [ + { + "description": "Lookup a dictionary item via an Account's named keys.", "type": "object", "required": [ - "deployment", - "key_management" + "AccountNamedKey" ], "properties": { - "deployment": { - "type": "integer", - "format": "uint8", - "minimum": 0.0 - }, - "key_management": { - "type": "integer", - "format": "uint8", - "minimum": 0.0 + "AccountNamedKey": { + "type": "object", + "required": [ + "dictionary_item_key", + "dictionary_name", + "key" + ], + "properties": { + "key": { + "description": "The account key as a formatted string whose named keys contains dictionary_name.", + "type": "string" + }, + "dictionary_name": { + "description": "The named key under which the dictionary seed URef is stored.", + "type": "string" + }, + "dictionary_item_key": { + "description": "The dictionary item key formatted as a string.", + "type": "string" + } + } } }, "additionalProperties": false }, - "DictionaryIdentifier": { - "description": "Options for dictionary item lookups.", - "anyOf": [ - { - "description": "Lookup a dictionary item via an Account's named keys.", + { + "description": "Lookup a dictionary item via a Contract's named keys.", + "type": "object", + "required": [ + "ContractNamedKey" + ], + "properties": { + "ContractNamedKey": { "type": "object", "required": [ - "AccountNamedKey" + "dictionary_item_key", + "dictionary_name", + "key" ], "properties": { - "AccountNamedKey": { - "type": "object", - "required": [ - "dictionary_item_key", - "dictionary_name", - "key" - ], - "properties": { - "key": { - "description": "The account key as a formatted string whose named keys contains dictionary_name.", - "type": "string" - }, - "dictionary_name": { - "description": "The named key under which the dictionary seed URef is stored.", - "type": "string" - }, - "dictionary_item_key": { - "description": "The dictionary item key formatted as a string.", - "type": "string" - } - } + "key": { + "description": "The contract key as a formatted string whose named keys contains dictionary_name.", + "type": "string" + }, + "dictionary_name": { + "description": "The named key under which the dictionary seed URef is stored.", + "type": "string" + }, + "dictionary_item_key": { + "description": "The dictionary item key formatted as a string.", + "type": "string" } - }, - "additionalProperties": false - }, - { - "description": "Lookup a dictionary item via a Contract's named keys.", + } + } + }, + "additionalProperties": false + }, + { + "description": "Lookup a dictionary item via its seed URef.", + "type": "object", + "required": [ + "URef" + ], + "properties": { + "URef": { "type": "object", "required": [ - "ContractNamedKey" + "dictionary_item_key", + "seed_uref" ], "properties": { - "ContractNamedKey": { - "type": "object", - "required": [ - "dictionary_item_key", - "dictionary_name", - "key" - ], - "properties": { - "key": { - "description": "The contract key as a formatted string whose named keys contains dictionary_name.", - "type": "string" - }, - "dictionary_name": { - "description": "The named key under which the dictionary seed URef is stored.", - "type": "string" - }, - "dictionary_item_key": { - "description": "The dictionary item key formatted as a string.", - "type": "string" - } - } + "seed_uref": { + "description": "The dictionary's seed URef.", + "type": "string" + }, + "dictionary_item_key": { + "description": "The dictionary item key formatted as a string.", + "type": "string" } - }, - "additionalProperties": false - }, - { - "description": "Lookup a dictionary item via its seed URef.", - "type": "object", - "required": [ - "URef" - ], - "properties": { - "URef": { - "type": "object", - "required": [ - "dictionary_item_key", - "seed_uref" - ], - "properties": { - "seed_uref": { - "description": "The dictionary's seed URef.", - "type": "string" - }, - "dictionary_item_key": { - "description": "The dictionary item key formatted as a string.", - "type": "string" - } - } - } - }, - "additionalProperties": false - }, - { - "description": "Lookup a dictionary item via its unique key.", - "type": "object", - "required": [ - "Dictionary" - ], - "properties": { - "Dictionary": { - "type": "string" - } - }, - "additionalProperties": false - } - ] - }, - "StoredValue": { - "description": "Representation of a value stored in global state.\n\n`Account`, `Contract` and `ContractPackage` have their own `json_compatibility` representations (see their docs for further info).", - "anyOf": [ - { - "description": "A CasperLabs value.", - "type": "object", - "required": [ - "CLValue" - ], - "properties": { - "CLValue": { - "$ref": "#/components/schemas/CLValue" - } - }, - "additionalProperties": false - }, - { - "description": "An account.", - "type": "object", - "required": [ - "Account" - ], - "properties": { - "Account": { - "$ref": "#/components/schemas/Account" - } - }, - "additionalProperties": false - }, - { - "description": "A contract's Wasm", - "type": "object", - "required": [ - "ContractWasm" - ], - "properties": { - "ContractWasm": { - "type": "string" - } - }, - "additionalProperties": false - }, - { - "description": "Methods and type signatures supported by a contract.", - "type": "object", - "required": [ - "Contract" - ], - "properties": { - "Contract": { - "$ref": "#/components/schemas/Contract" - } - }, - "additionalProperties": false - }, - { - "description": "A contract definition, metadata, and security container.", - "type": "object", - "required": [ - "ContractPackage" - ], - "properties": { - "ContractPackage": { - "$ref": "#/components/schemas/ContractPackage" - } - }, - "additionalProperties": false - }, - { - "description": "A record of a transfer", - "type": "object", - "required": [ - "Transfer" - ], - "properties": { - "Transfer": { - "$ref": "#/components/schemas/Transfer" - } - }, - "additionalProperties": false - }, - { - "description": "A record of a deploy", - "type": "object", - "required": [ - "DeployInfo" - ], - "properties": { - "DeployInfo": { - "$ref": "#/components/schemas/DeployInfo" - } - }, - "additionalProperties": false - }, - { - "description": "Auction metadata", - "type": "object", - "required": [ - "EraInfo" - ], - "properties": { - "EraInfo": { - "$ref": "#/components/schemas/EraInfo" - } - }, - "additionalProperties": false - }, - { - "description": "A bid", - "type": "object", - "required": [ - "Bid" - ], - "properties": { - "Bid": { - "$ref": "#/components/schemas/Bid" - } - }, - "additionalProperties": false - }, - { - "description": "A withdraw", - "type": "object", - "required": [ - "Withdraw" - ], - "properties": { - "Withdraw": { - "type": "array", - "items": { - "$ref": "#/components/schemas/WithdrawPurse" - } - } - }, - "additionalProperties": false - }, - { - "description": "A collection of unbonding purses", - "type": "object", - "required": [ - "Unbonding" - ], - "properties": { - "Unbonding": { - "type": "array", - "items": { - "$ref": "#/components/schemas/UnbondingPurse" - } - } - }, - "additionalProperties": false + } } - ] + }, + "additionalProperties": false }, - "Contract": { - "description": "A contract struct that can be serialized as JSON object.", + { + "description": "Lookup a dictionary item via its unique key.", "type": "object", "required": [ - "contract_package_hash", - "contract_wasm_hash", - "entry_points", - "named_keys", - "protocol_version" + "Dictionary" ], "properties": { - "contract_package_hash": { - "$ref": "#/components/schemas/ContractPackageHash" - }, - "contract_wasm_hash": { - "$ref": "#/components/schemas/ContractWasmHash" - }, - "named_keys": { - "type": "array", - "items": { - "$ref": "#/components/schemas/NamedKey" - } - }, - "entry_points": { - "type": "array", - "items": { - "$ref": "#/components/schemas/EntryPoint" - } - }, - "protocol_version": { + "Dictionary": { "type": "string" } }, "additionalProperties": false - }, - "ContractPackageHash": { - "description": "The hash address of the contract package", - "type": "string" - }, - "ContractWasmHash": { - "description": "The hash address of the contract wasm", - "type": "string" - }, - "EntryPoint": { - "description": "Type signature of a method. Order of arguments matter since can be referenced by index as well as name.", + } + ] + }, + "StoredValue": { + "description": "Representation of a value stored in global state.\n\n`Account`, `Contract` and `ContractPackage` have their own `json_compatibility` representations (see their docs for further info).", + "anyOf": [ + { + "description": "A CasperLabs value.", "type": "object", "required": [ - "access", - "args", - "entry_point_type", - "name", - "ret" + "CLValue" ], "properties": { - "name": { - "type": "string" - }, - "args": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Parameter" - } - }, - "ret": { - "$ref": "#/components/schemas/CLType" - }, - "access": { - "$ref": "#/components/schemas/EntryPointAccess" - }, - "entry_point_type": { - "$ref": "#/components/schemas/EntryPointType" + "CLValue": { + "$ref": "#/components/schemas/CLValue" } - } + }, + "additionalProperties": false }, - "Parameter": { - "description": "Parameter to a method", + { + "description": "An account.", "type": "object", "required": [ - "cl_type", - "name" + "Account" ], "properties": { - "name": { - "type": "string" - }, - "cl_type": { - "$ref": "#/components/schemas/CLType" + "Account": { + "$ref": "#/components/schemas/Account" } - } + }, + "additionalProperties": false }, - "EntryPointAccess": { - "description": "Enum describing the possible access control options for a contract entry point (method).", - "anyOf": [ - { - "type": "string", - "enum": [ - "Public" - ] - }, - { - "description": "Only users from the listed groups may call this method. Note: if the list is empty then this method is not callable from outside the contract.", - "type": "object", - "required": [ - "Groups" - ], - "properties": { - "Groups": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Group" - } - } - }, - "additionalProperties": false + { + "description": "A contract's Wasm", + "type": "object", + "required": [ + "ContractWasm" + ], + "properties": { + "ContractWasm": { + "type": "string" } - ] - }, - "Group": { - "description": "A (labelled) \"user group\". Each method of a versioned contract may be associated with one or more user groups which are allowed to call it.", - "type": "string" + }, + "additionalProperties": false }, - "EntryPointType": { - "description": "Context of method execution", - "type": "string", - "enum": [ - "Session", + { + "description": "Methods and type signatures supported by a contract.", + "type": "object", + "required": [ "Contract" - ] + ], + "properties": { + "Contract": { + "$ref": "#/components/schemas/Contract" + } + }, + "additionalProperties": false }, - "ContractPackage": { - "description": "Contract definition, metadata, and security container.", + { + "description": "A contract definition, metadata, and security container.", "type": "object", "required": [ - "access_key", - "disabled_versions", - "groups", - "lock_status", - "versions" + "ContractPackage" ], "properties": { - "access_key": { - "$ref": "#/components/schemas/URef" - }, - "versions": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ContractVersion" - } - }, - "disabled_versions": { - "type": "array", - "items": { - "$ref": "#/components/schemas/DisabledVersion" - } - }, - "groups": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Groups" - } - }, - "lock_status": { - "$ref": "#/components/schemas/ContractPackageStatus" + "ContractPackage": { + "$ref": "#/components/schemas/ContractPackage" } }, "additionalProperties": false }, - "ContractVersion": { + { + "description": "A record of a transfer", "type": "object", "required": [ - "contract_hash", - "contract_version", - "protocol_version_major" + "Transfer" ], "properties": { - "protocol_version_major": { - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "contract_version": { - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "contract_hash": { - "$ref": "#/components/schemas/ContractHash" + "Transfer": { + "$ref": "#/components/schemas/Transfer" } - } - }, - "ContractHash": { - "description": "The hash address of the contract", - "type": "string" + }, + "additionalProperties": false }, - "DisabledVersion": { + { + "description": "A record of a deploy", "type": "object", "required": [ - "contract_version", - "protocol_version_major" + "DeployInfo" ], "properties": { - "protocol_version_major": { - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "contract_version": { - "type": "integer", - "format": "uint32", - "minimum": 0.0 + "DeployInfo": { + "$ref": "#/components/schemas/DeployInfo" } - } + }, + "additionalProperties": false }, - "Groups": { + { + "description": "Auction metadata", "type": "object", "required": [ - "group", - "keys" + "EraInfo" ], "properties": { - "group": { - "type": "string" - }, - "keys": { - "type": "array", - "items": { - "$ref": "#/components/schemas/URef" - } + "EraInfo": { + "$ref": "#/components/schemas/EraInfo" } - } - }, - "ContractPackageStatus": { - "description": "A enum to determine the lock status of the contract package.", - "type": "string", - "enum": [ - "Locked", - "Unlocked" - ] - }, - "GlobalStateIdentifier": { - "description": "Identifier for possible ways to query Global State", - "anyOf": [ - { - "description": "Query using a block hash.", - "type": "object", - "required": [ - "BlockHash" - ], - "properties": { - "BlockHash": { - "$ref": "#/components/schemas/BlockHash" - } - }, - "additionalProperties": false - }, - { - "description": "Query using a block height.", - "type": "object", - "required": [ - "BlockHeight" - ], - "properties": { - "BlockHeight": { - "type": "integer", - "format": "uint64", - "minimum": 0.0 - } - }, - "additionalProperties": false - }, - { - "description": "Query using the state root hash.", - "type": "object", - "required": [ - "StateRootHash" - ], - "properties": { - "StateRootHash": { - "$ref": "#/components/schemas/Digest" - } - }, - "additionalProperties": false - } - ] + }, + "additionalProperties": false }, - "JsonBlockHeader": { - "description": "JSON representation of a block header.", + { + "description": "A bid", "type": "object", "required": [ - "accumulated_seed", - "body_hash", - "era_id", - "height", - "parent_hash", - "protocol_version", - "random_bit", - "state_root_hash", - "timestamp" + "Bid" ], "properties": { - "parent_hash": { - "description": "The parent hash.", - "allOf": [ - { - "$ref": "#/components/schemas/BlockHash" - } - ] - }, - "state_root_hash": { - "description": "The state root hash.", - "allOf": [ - { - "$ref": "#/components/schemas/Digest" - } - ] - }, - "body_hash": { - "description": "The body hash.", - "allOf": [ - { - "$ref": "#/components/schemas/Digest" - } - ] - }, - "random_bit": { - "description": "Randomness bit.", - "type": "boolean" - }, - "accumulated_seed": { - "description": "Accumulated seed.", - "allOf": [ - { - "$ref": "#/components/schemas/Digest" - } - ] - }, - "era_end": { - "description": "The era end.", - "anyOf": [ - { - "$ref": "#/components/schemas/JsonEraEnd" - }, - { - "type": "null" - } - ] - }, - "timestamp": { - "description": "The block timestamp.", - "allOf": [ - { - "$ref": "#/components/schemas/Timestamp" - } - ] - }, - "era_id": { - "description": "The block era id.", - "allOf": [ - { - "$ref": "#/components/schemas/EraId" - } - ] - }, - "height": { - "description": "The block height.", - "type": "integer", - "format": "uint64", - "minimum": 0.0 - }, - "protocol_version": { - "description": "The protocol version.", - "allOf": [ - { - "$ref": "#/components/schemas/ProtocolVersion" - } - ] + "Bid": { + "$ref": "#/components/schemas/Bid" } }, "additionalProperties": false }, - "JsonEraEnd": { + { + "description": "A withdraw", "type": "object", "required": [ - "era_report", - "next_era_validator_weights" + "Withdraw" ], "properties": { - "era_report": { - "$ref": "#/components/schemas/JsonEraReport" - }, - "next_era_validator_weights": { + "Withdraw": { "type": "array", "items": { - "$ref": "#/components/schemas/ValidatorWeight" + "$ref": "#/components/schemas/WithdrawPurse" } } }, "additionalProperties": false }, - "JsonEraReport": { - "description": "Equivocation and reward information to be included in the terminal block.", + { + "description": "A collection of unbonding purses", "type": "object", "required": [ - "equivocators", - "inactive_validators", - "rewards" + "Unbonding" ], "properties": { - "equivocators": { - "type": "array", - "items": { - "$ref": "#/components/schemas/PublicKey" - } - }, - "rewards": { + "Unbonding": { "type": "array", "items": { - "$ref": "#/components/schemas/Reward" - } - }, - "inactive_validators": { - "type": "array", - "items": { - "$ref": "#/components/schemas/PublicKey" + "$ref": "#/components/schemas/UnbondingPurse" } } }, "additionalProperties": false + } + ] + }, + "Contract": { + "description": "A contract struct that can be serialized as JSON object.", + "type": "object", + "required": [ + "contract_package_hash", + "contract_wasm_hash", + "entry_points", + "named_keys", + "protocol_version" + ], + "properties": { + "contract_package_hash": { + "$ref": "#/components/schemas/ContractPackageHash" }, - "Reward": { - "type": "object", - "required": [ - "amount", - "validator" - ], - "properties": { - "validator": { - "$ref": "#/components/schemas/PublicKey" - }, - "amount": { - "type": "integer", - "format": "uint64", - "minimum": 0.0 - } - }, - "additionalProperties": false + "contract_wasm_hash": { + "$ref": "#/components/schemas/ContractWasmHash" + }, + "named_keys": { + "type": "array", + "items": { + "$ref": "#/components/schemas/NamedKey" + } + }, + "entry_points": { + "type": "array", + "items": { + "$ref": "#/components/schemas/EntryPoint" + } + }, + "protocol_version": { + "type": "string" + } + }, + "additionalProperties": false + }, + "ContractPackageHash": { + "description": "The hash address of the contract package", + "type": "string" + }, + "ContractWasmHash": { + "description": "The hash address of the contract wasm", + "type": "string" + }, + "EntryPoint": { + "description": "Type signature of a method. Order of arguments matter since can be referenced by index as well as name.", + "type": "object", + "required": [ + "access", + "args", + "entry_point_type", + "name", + "ret" + ], + "properties": { + "name": { + "type": "string" + }, + "args": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Parameter" + } + }, + "ret": { + "$ref": "#/components/schemas/CLType" + }, + "access": { + "$ref": "#/components/schemas/EntryPointAccess" + }, + "entry_point_type": { + "$ref": "#/components/schemas/EntryPointType" + } + } + }, + "Parameter": { + "description": "Parameter to a method", + "type": "object", + "required": [ + "cl_type", + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "cl_type": { + "$ref": "#/components/schemas/CLType" + } + } + }, + "EntryPointAccess": { + "description": "Enum describing the possible access control options for a contract entry point (method).", + "anyOf": [ + { + "type": "string", + "enum": [ + "Public" + ] }, - "ValidatorWeight": { + { + "description": "Only users from the listed groups may call this method. Note: if the list is empty then this method is not callable from outside the contract.", "type": "object", "required": [ - "validator", - "weight" + "Groups" ], "properties": { - "validator": { - "$ref": "#/components/schemas/PublicKey" - }, - "weight": { - "$ref": "#/components/schemas/U512" + "Groups": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Group" + } } }, "additionalProperties": false + } + ] + }, + "Group": { + "description": "A (labelled) \"user group\". Each method of a versioned contract may be associated with one or more user groups which are allowed to call it.", + "type": "string" + }, + "EntryPointType": { + "description": "Context of method execution", + "type": "string", + "enum": [ + "Session", + "Contract" + ] + }, + "ContractPackage": { + "description": "Contract definition, metadata, and security container.", + "type": "object", + "required": [ + "access_key", + "disabled_versions", + "groups", + "lock_status", + "versions" + ], + "properties": { + "access_key": { + "$ref": "#/components/schemas/URef" }, - "ProtocolVersion": { - "description": "Casper Platform protocol version", - "type": "string" + "versions": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ContractVersion" + } }, - "PurseIdentifier": { - "description": "Identifier of a purse.", - "anyOf": [ - { - "description": "The main purse of the account identified by this public key.", - "type": "object", - "required": [ - "main_purse_under_public_key" - ], - "properties": { - "main_purse_under_public_key": { - "$ref": "#/components/schemas/PublicKey" - } - }, - "additionalProperties": false - }, - { - "description": "The main purse of the account identified by this account hash.", - "type": "object", - "required": [ - "main_purse_under_account_hash" - ], - "properties": { - "main_purse_under_account_hash": { - "$ref": "#/components/schemas/AccountHash" - } - }, - "additionalProperties": false - }, - { - "description": "The purse identified by this URef.", - "type": "object", - "required": [ - "purse_uref" - ], - "properties": { - "purse_uref": { - "$ref": "#/components/schemas/URef" - } - }, - "additionalProperties": false - } - ] + "disabled_versions": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DisabledVersion" + } }, - "PeersMap": { - "description": "Map of peer IDs to network addresses.", + "groups": { "type": "array", "items": { - "$ref": "#/components/schemas/PeerEntry" + "$ref": "#/components/schemas/Groups" } }, - "PeerEntry": { - "description": "Node peer entry.", + "lock_status": { + "$ref": "#/components/schemas/ContractPackageStatus" + } + }, + "additionalProperties": false + }, + "ContractVersion": { + "type": "object", + "required": [ + "contract_hash", + "contract_version", + "protocol_version_major" + ], + "properties": { + "protocol_version_major": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "contract_version": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "contract_hash": { + "$ref": "#/components/schemas/ContractHash" + } + } + }, + "ContractHash": { + "description": "The hash address of the contract", + "type": "string" + }, + "DisabledVersion": { + "type": "object", + "required": [ + "contract_version", + "protocol_version_major" + ], + "properties": { + "protocol_version_major": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "contract_version": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + } + }, + "Groups": { + "type": "object", + "required": [ + "group", + "keys" + ], + "properties": { + "group": { + "type": "string" + }, + "keys": { + "type": "array", + "items": { + "$ref": "#/components/schemas/URef" + } + } + } + }, + "ContractPackageStatus": { + "description": "A enum to determine the lock status of the contract package.", + "type": "string", + "enum": [ + "Locked", + "Unlocked" + ] + }, + "GlobalStateIdentifier": { + "description": "Identifier for possible ways to query Global State", + "anyOf": [ + { + "description": "Query using a block hash.", "type": "object", "required": [ - "address", - "node_id" + "BlockHash" ], "properties": { - "node_id": { - "description": "Node id.", - "type": "string" - }, - "address": { - "description": "Node address.", - "type": "string" + "BlockHash": { + "$ref": "#/components/schemas/BlockHash" } }, "additionalProperties": false }, - "MinimalBlockInfo": { - "description": "Minimal info of a `Block`.", + { + "description": "Query using a block height.", "type": "object", "required": [ - "creator", - "era_id", - "hash", - "height", - "state_root_hash", - "timestamp" + "BlockHeight" ], "properties": { - "hash": { - "$ref": "#/components/schemas/BlockHash" - }, - "timestamp": { - "$ref": "#/components/schemas/Timestamp" - }, - "era_id": { - "$ref": "#/components/schemas/EraId" - }, - "height": { + "BlockHeight": { "type": "integer", "format": "uint64", "minimum": 0.0 - }, - "state_root_hash": { - "$ref": "#/components/schemas/Digest" - }, - "creator": { - "$ref": "#/components/schemas/PublicKey" } }, "additionalProperties": false }, - "NextUpgrade": { - "description": "Information about the next protocol upgrade.", + { + "description": "Query using the state root hash.", "type": "object", "required": [ - "activation_point", - "protocol_version" + "StateRootHash" ], "properties": { - "activation_point": { - "$ref": "#/components/schemas/ActivationPoint" - }, - "protocol_version": { - "type": "string" + "StateRootHash": { + "$ref": "#/components/schemas/Digest" } - } + }, + "additionalProperties": false + } + ] + }, + "JsonBlockHeader": { + "description": "JSON representation of a block header.", + "type": "object", + "required": [ + "accumulated_seed", + "body_hash", + "era_id", + "height", + "parent_hash", + "protocol_version", + "random_bit", + "state_root_hash", + "timestamp" + ], + "properties": { + "parent_hash": { + "description": "The parent hash.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] }, - "ActivationPoint": { - "description": "The first era to which the associated protocol version applies.", - "anyOf": [ + "state_root_hash": { + "description": "The state root hash.", + "allOf": [ { - "description": "Era id.", - "allOf": [ - { - "$ref": "#/components/schemas/EraId" - } - ] - }, - { - "description": "Genesis timestamp.", - "allOf": [ - { - "$ref": "#/components/schemas/Timestamp" - } - ] + "$ref": "#/components/schemas/Digest" } ] }, - "ReactorState": { - "description": "The state of the reactor.", - "type": "string", - "enum": [ - "Initialize", - "CatchUp", - "Upgrading", - "KeepUp", - "Validate", - "ShutdownForUpgrade" + "body_hash": { + "description": "The body hash.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } ] }, - "AvailableBlockRange": { - "description": "An unbroken, inclusive range of blocks.", - "type": "object", - "required": [ - "high", - "low" - ], - "properties": { - "low": { - "description": "The inclusive lower bound of the range.", - "type": "integer", - "format": "uint64", - "minimum": 0.0 - }, - "high": { - "description": "The inclusive upper bound of the range.", - "type": "integer", - "format": "uint64", - "minimum": 0.0 - } - }, - "additionalProperties": false + "random_bit": { + "description": "Randomness bit.", + "type": "boolean" }, - "BlockSynchronizerStatus": { - "description": "The status of the block synchronizer.", - "type": "object", - "properties": { - "historical": { - "description": "The status of syncing a historical block, if any.", - "anyOf": [ - { - "$ref": "#/components/schemas/BlockSyncStatus" - }, - { - "type": "null" - } - ] - }, - "forward": { - "description": "The status of syncing a forward block, if any.", - "anyOf": [ - { - "$ref": "#/components/schemas/BlockSyncStatus" - }, - { - "type": "null" - } - ] + "accumulated_seed": { + "description": "Accumulated seed.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" } - }, - "additionalProperties": false + ] }, - "BlockSyncStatus": { - "description": "The status of syncing an individual block.", - "type": "object", - "required": [ - "acquisition_state", - "block_hash" - ], - "properties": { - "block_hash": { - "description": "The block hash.", - "allOf": [ - { - "$ref": "#/components/schemas/BlockHash" - } - ] - }, - "block_height": { - "description": "The height of the block, if known.", - "type": [ - "integer", - "null" - ], - "format": "uint64", - "minimum": 0.0 + "era_end": { + "description": "The era end.", + "anyOf": [ + { + "$ref": "#/components/schemas/JsonEraEnd" }, - "acquisition_state": { - "description": "The state of acquisition of the data associated with the block.", - "type": "string" + { + "type": "null" } - }, - "additionalProperties": false + ] }, - "JsonValidatorChanges": { - "description": "The changes in a validator's status.", - "type": "object", - "required": [ - "public_key", - "status_changes" - ], - "properties": { - "public_key": { - "description": "The public key of the validator.", - "allOf": [ - { - "$ref": "#/components/schemas/PublicKey" - } - ] - }, - "status_changes": { - "description": "The set of changes to the validator's status.", - "type": "array", - "items": { - "$ref": "#/components/schemas/JsonValidatorStatusChange" - } + "timestamp": { + "description": "The block timestamp.", + "allOf": [ + { + "$ref": "#/components/schemas/Timestamp" } - }, - "additionalProperties": false + ] }, - "JsonValidatorStatusChange": { - "description": "A single change to a validator's status in the given era.", - "type": "object", - "required": [ - "era_id", - "validator_change" - ], - "properties": { - "era_id": { - "description": "The era in which the change occurred.", - "allOf": [ - { - "$ref": "#/components/schemas/EraId" - } - ] - }, - "validator_change": { - "description": "The change in validator status.", - "allOf": [ - { - "$ref": "#/components/schemas/ValidatorChange" - } - ] + "era_id": { + "description": "The block era id.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" } - }, - "additionalProperties": false - }, - "ValidatorChange": { - "description": "A change to a validator's status between two eras.", - "type": "string", - "enum": [ - "Added", - "Removed", - "Banned", - "CannotPropose", - "SeenAsFaulty" ] }, - "ChainspecRawBytes": { - "description": "The raw bytes of the chainspec.toml, genesis accounts.toml, and global_state.toml files.", - "type": "object", - "required": [ - "chainspec_bytes", - "maybe_genesis_accounts_bytes", - "maybe_global_state_bytes" - ], - "properties": { - "chainspec_bytes": { - "description": "Hex-encoded raw bytes of the current chainspec.toml file.", - "type": "string" - }, - "maybe_genesis_accounts_bytes": { - "description": "Hex-encoded raw bytes of the current genesis accounts.toml file.", - "type": "string" - }, - "maybe_global_state_bytes": { - "description": "Hex-encoded raw bytes of the current global_state.toml file.", - "type": "string" + "height": { + "description": "The block height.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "protocol_version": { + "description": "The protocol version.", + "allOf": [ + { + "$ref": "#/components/schemas/ProtocolVersion" } + ] + } + }, + "additionalProperties": false + }, + "JsonEraEnd": { + "type": "object", + "required": [ + "era_report", + "next_era_validator_weights" + ], + "properties": { + "era_report": { + "$ref": "#/components/schemas/JsonEraReport" + }, + "next_era_validator_weights": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ValidatorWeight" + } + } + }, + "additionalProperties": false + }, + "JsonEraReport": { + "description": "Equivocation and reward information to be included in the terminal block.", + "type": "object", + "required": [ + "equivocators", + "inactive_validators", + "rewards" + ], + "properties": { + "equivocators": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PublicKey" } }, - "JsonBlock": { - "description": "A JSON-friendly representation of `Block`.", - "type": "object", - "required": [ - "body", - "hash", - "header", - "proofs" - ], - "properties": { - "hash": { - "description": "`BlockHash`", - "allOf": [ - { - "$ref": "#/components/schemas/BlockHash" - } - ] - }, - "header": { - "description": "JSON-friendly block header.", - "allOf": [ - { - "$ref": "#/components/schemas/JsonBlockHeader" - } - ] - }, - "body": { - "description": "JSON-friendly block body.", - "allOf": [ - { - "$ref": "#/components/schemas/JsonBlockBody" - } - ] - }, - "proofs": { - "description": "JSON-friendly list of proofs for this block.", - "type": "array", - "items": { - "$ref": "#/components/schemas/JsonProof" - } - } - }, - "additionalProperties": false + "rewards": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Reward" + } }, - "JsonBlockBody": { - "description": "A JSON-friendly representation of `Body`", - "type": "object", - "required": [ - "deploy_hashes", - "proposer", - "transfer_hashes" - ], - "properties": { - "proposer": { - "$ref": "#/components/schemas/PublicKey" - }, - "deploy_hashes": { - "type": "array", - "items": { - "$ref": "#/components/schemas/DeployHash" - } - }, - "transfer_hashes": { - "type": "array", - "items": { - "$ref": "#/components/schemas/DeployHash" - } - } - }, - "additionalProperties": false + "inactive_validators": { + "type": "array", + "items": { + "$ref": "#/components/schemas/PublicKey" + } + } + }, + "additionalProperties": false + }, + "Reward": { + "type": "object", + "required": [ + "amount", + "validator" + ], + "properties": { + "validator": { + "$ref": "#/components/schemas/PublicKey" }, - "JsonProof": { - "description": "A JSON-friendly representation of a proof, i.e. a block's finality signature.", - "type": "object", - "required": [ - "public_key", - "signature" - ], - "properties": { - "public_key": { - "$ref": "#/components/schemas/PublicKey" - }, - "signature": { - "$ref": "#/components/schemas/Signature" - } - }, - "additionalProperties": false + "amount": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "ValidatorWeight": { + "type": "object", + "required": [ + "validator", + "weight" + ], + "properties": { + "validator": { + "$ref": "#/components/schemas/PublicKey" }, - "EraSummary": { - "description": "The summary of an era", + "weight": { + "$ref": "#/components/schemas/U512" + } + }, + "additionalProperties": false + }, + "ProtocolVersion": { + "description": "Casper Platform protocol version", + "type": "string" + }, + "PurseIdentifier": { + "description": "Identifier of a purse.", + "anyOf": [ + { + "description": "The main purse of the account identified by this public key.", "type": "object", "required": [ - "block_hash", - "era_id", - "merkle_proof", - "state_root_hash", - "stored_value" + "main_purse_under_public_key" ], "properties": { - "block_hash": { - "description": "The block hash", - "allOf": [ - { - "$ref": "#/components/schemas/BlockHash" - } - ] - }, - "era_id": { - "description": "The era id", - "allOf": [ - { - "$ref": "#/components/schemas/EraId" - } - ] - }, - "stored_value": { - "description": "The StoredValue containing era information", - "allOf": [ - { - "$ref": "#/components/schemas/StoredValue" - } - ] - }, - "state_root_hash": { - "description": "Hex-encoded hash of the state root", - "allOf": [ - { - "$ref": "#/components/schemas/Digest" - } - ] - }, - "merkle_proof": { - "description": "The Merkle proof", - "type": "string" + "main_purse_under_public_key": { + "$ref": "#/components/schemas/PublicKey" } }, "additionalProperties": false }, - "AuctionState": { - "description": "Data structure summarizing auction contract data.", + { + "description": "The main purse of the account identified by this account hash.", "type": "object", "required": [ - "bids", - "block_height", - "era_validators", - "state_root_hash" + "main_purse_under_account_hash" ], "properties": { - "state_root_hash": { - "description": "Global state hash.", - "allOf": [ - { - "$ref": "#/components/schemas/Digest" - } - ] - }, - "block_height": { - "description": "Block height.", - "type": "integer", - "format": "uint64", - "minimum": 0.0 - }, - "era_validators": { - "description": "Era validators.", - "type": "array", - "items": { - "$ref": "#/components/schemas/JsonEraValidators" - } - }, - "bids": { - "description": "All bids contained within a vector.", - "type": "array", - "items": { - "$ref": "#/components/schemas/JsonBids" - } + "main_purse_under_account_hash": { + "$ref": "#/components/schemas/AccountHash" } }, "additionalProperties": false }, - "JsonEraValidators": { - "description": "The validators for the given era.", + { + "description": "The purse identified by this URef.", "type": "object", "required": [ - "era_id", - "validator_weights" + "purse_uref" ], "properties": { - "era_id": { - "$ref": "#/components/schemas/EraId" - }, - "validator_weights": { - "type": "array", - "items": { - "$ref": "#/components/schemas/JsonValidatorWeights" - } + "purse_uref": { + "$ref": "#/components/schemas/URef" } }, "additionalProperties": false + } + ] + }, + "PeersMap": { + "description": "Map of peer IDs to network addresses.", + "type": "array", + "items": { + "$ref": "#/components/schemas/PeerEntry" + } + }, + "PeerEntry": { + "description": "Node peer entry.", + "type": "object", + "required": [ + "address", + "node_id" + ], + "properties": { + "node_id": { + "description": "Node id.", + "type": "string" }, - "JsonValidatorWeights": { - "description": "A validator's weight.", - "type": "object", - "required": [ - "public_key", - "weight" - ], - "properties": { - "public_key": { - "$ref": "#/components/schemas/PublicKey" - }, - "weight": { - "$ref": "#/components/schemas/U512" + "address": { + "description": "Node address.", + "type": "string" + } + }, + "additionalProperties": false + }, + "MinimalBlockInfo": { + "description": "Minimal info of a `Block`.", + "type": "object", + "required": [ + "creator", + "era_id", + "hash", + "height", + "state_root_hash", + "timestamp" + ], + "properties": { + "hash": { + "$ref": "#/components/schemas/BlockHash" + }, + "timestamp": { + "$ref": "#/components/schemas/Timestamp" + }, + "era_id": { + "$ref": "#/components/schemas/EraId" + }, + "height": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "state_root_hash": { + "$ref": "#/components/schemas/Digest" + }, + "creator": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false + }, + "NextUpgrade": { + "description": "Information about the next protocol upgrade.", + "type": "object", + "required": [ + "activation_point", + "protocol_version" + ], + "properties": { + "activation_point": { + "$ref": "#/components/schemas/ActivationPoint" + }, + "protocol_version": { + "type": "string" + } + } + }, + "ActivationPoint": { + "description": "The first era to which the associated protocol version applies.", + "anyOf": [ + { + "description": "Era id.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" } - }, - "additionalProperties": false + ] }, - "JsonBids": { - "description": "A Json representation of a single bid.", - "type": "object", - "required": [ - "bid", - "public_key" - ], - "properties": { - "public_key": { - "$ref": "#/components/schemas/PublicKey" - }, - "bid": { - "$ref": "#/components/schemas/JsonBid" + { + "description": "Genesis timestamp.", + "allOf": [ + { + "$ref": "#/components/schemas/Timestamp" } - }, - "additionalProperties": false + ] + } + ] + }, + "ReactorState": { + "description": "The state of the reactor.", + "type": "string", + "enum": [ + "Initialize", + "CatchUp", + "Upgrading", + "KeepUp", + "Validate", + "ShutdownForUpgrade" + ] + }, + "AvailableBlockRange": { + "description": "An unbroken, inclusive range of blocks.", + "type": "object", + "required": [ + "high", + "low" + ], + "properties": { + "low": { + "description": "The inclusive lower bound of the range.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 }, - "JsonBid": { - "description": "An entry in a founding validator map representing a bid.", - "type": "object", - "required": [ - "bonding_purse", - "delegation_rate", - "delegators", - "inactive", - "staked_amount" - ], - "properties": { - "bonding_purse": { - "description": "The purse that was used for bonding.", - "allOf": [ - { - "$ref": "#/components/schemas/URef" - } - ] - }, - "staked_amount": { - "description": "The amount of tokens staked by a validator (not including delegators).", - "allOf": [ - { - "$ref": "#/components/schemas/U512" - } - ] - }, - "delegation_rate": { - "description": "The delegation rate.", - "type": "integer", - "format": "uint8", - "minimum": 0.0 + "high": { + "description": "The inclusive upper bound of the range.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "BlockSynchronizerStatus": { + "description": "The status of the block synchronizer.", + "type": "object", + "properties": { + "historical": { + "description": "The status of syncing a historical block, if any.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockSyncStatus" }, - "delegators": { - "description": "The delegators.", - "type": "array", - "items": { - "$ref": "#/components/schemas/JsonDelegator" - } + { + "type": "null" + } + ] + }, + "forward": { + "description": "The status of syncing a forward block, if any.", + "anyOf": [ + { + "$ref": "#/components/schemas/BlockSyncStatus" }, - "inactive": { - "description": "Is this an inactive validator.", - "type": "boolean" + { + "type": "null" } - }, - "additionalProperties": false + ] + } + }, + "additionalProperties": false + }, + "BlockSyncStatus": { + "description": "The status of syncing an individual block.", + "type": "object", + "required": [ + "acquisition_state", + "block_hash" + ], + "properties": { + "block_hash": { + "description": "The block hash.", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] }, - "JsonDelegator": { - "description": "A delegator associated with the given validator.", - "type": "object", - "required": [ - "bonding_purse", - "delegatee", - "public_key", - "staked_amount" + "block_height": { + "description": "The height of the block, if known.", + "type": [ + "integer", + "null" ], - "properties": { - "public_key": { - "$ref": "#/components/schemas/PublicKey" - }, - "staked_amount": { - "$ref": "#/components/schemas/U512" - }, - "bonding_purse": { - "$ref": "#/components/schemas/URef" - }, - "delegatee": { + "format": "uint64", + "minimum": 0.0 + }, + "acquisition_state": { + "description": "The state of acquisition of the data associated with the block.", + "type": "string" + } + }, + "additionalProperties": false + }, + "JsonValidatorChanges": { + "description": "The changes in a validator's status.", + "type": "object", + "required": [ + "public_key", + "status_changes" + ], + "properties": { + "public_key": { + "description": "The public key of the validator.", + "allOf": [ + { "$ref": "#/components/schemas/PublicKey" } - }, - "additionalProperties": false + ] + }, + "status_changes": { + "description": "The set of changes to the validator's status.", + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonValidatorStatusChange" + } + } + }, + "additionalProperties": false + }, + "JsonValidatorStatusChange": { + "description": "A single change to a validator's status in the given era.", + "type": "object", + "required": [ + "era_id", + "validator_change" + ], + "properties": { + "era_id": { + "description": "The era in which the change occurred.", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "validator_change": { + "description": "The change in validator status.", + "allOf": [ + { + "$ref": "#/components/schemas/ValidatorChange" + } + ] + } + }, + "additionalProperties": false + }, + "ValidatorChange": { + "description": "A change to a validator's status between two eras.", + "type": "string", + "enum": [ + "Added", + "Removed", + "Banned", + "CannotPropose", + "SeenAsFaulty" + ] + }, + "ChainspecRawBytes": { + "description": "The raw bytes of the chainspec.toml, genesis accounts.toml, and global_state.toml files.", + "type": "object", + "required": [ + "chainspec_bytes", + "maybe_genesis_accounts_bytes", + "maybe_global_state_bytes" + ], + "properties": { + "chainspec_bytes": { + "description": "Hex-encoded raw bytes of the current chainspec.toml file.", + "type": "string" + }, + "maybe_genesis_accounts_bytes": { + "description": "Hex-encoded raw bytes of the current genesis accounts.toml file.", + "type": "string" + }, + "maybe_global_state_bytes": { + "description": "Hex-encoded raw bytes of the current global_state.toml file.", + "type": "string" } } - } - } - ], - "type": "object", - "properties": { - "openrpc": { - "type": "string" - }, - "info": { - "type": "object", - "properties": { - "version": { - "type": "string" + }, + "JsonBlock": { + "description": "A JSON-friendly representation of `Block`.", + "type": "object", + "required": [ + "body", + "hash", + "header", + "proofs" + ], + "properties": { + "hash": { + "description": "`BlockHash`", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "header": { + "description": "JSON-friendly block header.", + "allOf": [ + { + "$ref": "#/components/schemas/JsonBlockHeader" + } + ] + }, + "body": { + "description": "JSON-friendly block body.", + "allOf": [ + { + "$ref": "#/components/schemas/JsonBlockBody" + } + ] + }, + "proofs": { + "description": "JSON-friendly list of proofs for this block.", + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonProof" + } + } }, - "title": { - "type": "string" + "additionalProperties": false + }, + "JsonBlockBody": { + "description": "A JSON-friendly representation of `Body`", + "type": "object", + "required": [ + "deploy_hashes", + "proposer", + "transfer_hashes" + ], + "properties": { + "proposer": { + "$ref": "#/components/schemas/PublicKey" + }, + "deploy_hashes": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DeployHash" + } + }, + "transfer_hashes": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DeployHash" + } + } }, - "description": { - "type": "string" + "additionalProperties": false + }, + "JsonProof": { + "description": "A JSON-friendly representation of a proof, i.e. a block's finality signature.", + "type": "object", + "required": [ + "public_key", + "signature" + ], + "properties": { + "public_key": { + "$ref": "#/components/schemas/PublicKey" + }, + "signature": { + "$ref": "#/components/schemas/Signature" + } }, - "contact": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" + "additionalProperties": false + }, + "EraSummary": { + "description": "The summary of an era", + "type": "object", + "required": [ + "block_hash", + "era_id", + "merkle_proof", + "state_root_hash", + "stored_value" + ], + "properties": { + "block_hash": { + "description": "The block hash", + "allOf": [ + { + "$ref": "#/components/schemas/BlockHash" + } + ] + }, + "era_id": { + "description": "The era id", + "allOf": [ + { + "$ref": "#/components/schemas/EraId" + } + ] + }, + "stored_value": { + "description": "The StoredValue containing era information", + "allOf": [ + { + "$ref": "#/components/schemas/StoredValue" + } + ] + }, + "state_root_hash": { + "description": "Hex-encoded hash of the state root", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "merkle_proof": { + "description": "The Merkle proof", + "type": "string" + } + }, + "additionalProperties": false + }, + "AuctionState": { + "description": "Data structure summarizing auction contract data.", + "type": "object", + "required": [ + "bids", + "block_height", + "era_validators", + "state_root_hash" + ], + "properties": { + "state_root_hash": { + "description": "Global state hash.", + "allOf": [ + { + "$ref": "#/components/schemas/Digest" + } + ] + }, + "block_height": { + "description": "Block height.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "era_validators": { + "description": "Era validators.", + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonEraValidators" + } + }, + "bids": { + "description": "All bids contained within a vector.", + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonBids" } } }, - "license": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" + "additionalProperties": false + }, + "JsonEraValidators": { + "description": "The validators for the given era.", + "type": "object", + "required": [ + "era_id", + "validator_weights" + ], + "properties": { + "era_id": { + "$ref": "#/components/schemas/EraId" + }, + "validator_weights": { + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonValidatorWeights" } } - } - } - }, - "servers": { - "type": "array", - "items": { + }, + "additionalProperties": false + }, + "JsonValidatorWeights": { + "description": "A validator's weight.", "type": "object", + "required": [ + "public_key", + "weight" + ], "properties": { - "name": { - "type": "string" + "public_key": { + "$ref": "#/components/schemas/PublicKey" }, - "url": { - "type": "string" + "weight": { + "$ref": "#/components/schemas/U512" } - } - } - }, - "methods": { - "type": "array", - "items": true - }, - "components": { - "type": "object", - "properties": { - "schemas": { - "type": "object", - "additionalProperties": true - } + }, + "additionalProperties": false + }, + "JsonBids": { + "description": "A Json representation of a single bid.", + "type": "object", + "required": [ + "bid", + "public_key" + ], + "properties": { + "public_key": { + "$ref": "#/components/schemas/PublicKey" + }, + "bid": { + "$ref": "#/components/schemas/JsonBid" + } + }, + "additionalProperties": false + }, + "JsonBid": { + "description": "An entry in a founding validator map representing a bid.", + "type": "object", + "required": [ + "bonding_purse", + "delegation_rate", + "delegators", + "inactive", + "staked_amount" + ], + "properties": { + "bonding_purse": { + "description": "The purse that was used for bonding.", + "allOf": [ + { + "$ref": "#/components/schemas/URef" + } + ] + }, + "staked_amount": { + "description": "The amount of tokens staked by a validator (not including delegators).", + "allOf": [ + { + "$ref": "#/components/schemas/U512" + } + ] + }, + "delegation_rate": { + "description": "The delegation rate.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "delegators": { + "description": "The delegators.", + "type": "array", + "items": { + "$ref": "#/components/schemas/JsonDelegator" + } + }, + "inactive": { + "description": "Is this an inactive validator.", + "type": "boolean" + } + }, + "additionalProperties": false + }, + "JsonDelegator": { + "description": "A delegator associated with the given validator.", + "type": "object", + "required": [ + "bonding_purse", + "delegatee", + "public_key", + "staked_amount" + ], + "properties": { + "public_key": { + "$ref": "#/components/schemas/PublicKey" + }, + "staked_amount": { + "$ref": "#/components/schemas/U512" + }, + "bonding_purse": { + "$ref": "#/components/schemas/URef" + }, + "delegatee": { + "$ref": "#/components/schemas/PublicKey" + } + }, + "additionalProperties": false } } } diff --git a/resources/test/sse_data_schema.json b/resources/test/sse_data_schema.json index 8c77ad830e..7a7b305793 100644 --- a/resources/test/sse_data_schema.json +++ b/resources/test/sse_data_schema.json @@ -417,6 +417,23 @@ }, "PublicKey": { "description": "Hex-encoded cryptographic public key, including the algorithm tag prefix.", + "examples": [ + { + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }, + { + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }, + { + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + } + ], "type": "string" }, "Reward": { @@ -2032,4 +2049,4 @@ } } } -} +} \ No newline at end of file diff --git a/types/src/crypto/asymmetric_key.rs b/types/src/crypto/asymmetric_key.rs index edf91cd5ae..082bca83e8 100644 --- a/types/src/crypto/asymmetric_key.rs +++ b/types/src/crypto/asymmetric_key.rs @@ -40,6 +40,8 @@ use rand::{Rng, RngCore}; #[cfg(feature = "json-schema")] use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; +#[cfg(feature = "json-schema")] +use serde_json::json; #[cfg(any(feature = "std", test))] use untrusted::Input; @@ -868,6 +870,27 @@ impl JsonSchema for PublicKey { schema_object.metadata().description = Some( "Hex-encoded cryptographic public key, including the algorithm tag prefix.".to_string(), ); + schema_object.metadata().examples = vec![ + json!({ + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an \ + immediate switch block after a network upgrade rather than a specific validator. \ + Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }), + json!({ + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is \ + followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }), + json!({ + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is \ + followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + }), + ]; schema_object.into() } } From d6c79e5c1e9f894be98ba64159a4f16a5b03c43f Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Mon, 4 Sep 2023 12:59:59 +0100 Subject: [PATCH 06/41] update patch URL --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 14317b9fd0..7a51409346 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3163,7 +3163,7 @@ dependencies = [ [[package]] name = "parity-wasm" version = "0.45.0" -source = "git+https://github.com/casper-network/casper-wasm.git?branch=casper-0.45.0#49752a84f34d2f8748133cdd95e3064d1158b0af" +source = "git+ssh://git@github.com/casper-network/casper-wasm.git?branch=casper-0.45.0#49752a84f34d2f8748133cdd95e3064d1158b0af" [[package]] name = "parking_lot" diff --git a/Cargo.toml b/Cargo.toml index ec6b18c2dd..5c77d04195 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,4 +43,4 @@ codegen-units = 1 lto = true [patch.crates-io] -parity-wasm = { git = "https://github.com/casper-network/casper-wasm.git", branch = "casper-0.45.0" } +parity-wasm = { git = "ssh://git@github.com/casper-network/casper-wasm.git", branch = "casper-0.45.0" } From f916c4cbb9060d3def3e53fd7dbb8a975db32a2d Mon Sep 17 00:00:00 2001 From: "casperlabs-bors-ng[bot]" <82463608+casperlabs-bors-ng[bot]@users.noreply.github.com> Date: Fri, 16 Jun 2023 20:36:15 +0000 Subject: [PATCH 07/41] Merge #4042 4042: finer grained block_synchronizer latching r=EdHastingsCasperLabs a=EdHastingsCasperLabs As discussed with `@alsrdn` , a finer grained latching mechanism for block_synchronization. Co-authored-by: Ed Hastings Co-authored-by: Alexandru Sardan Co-authored-by: Alex Sardan <86006646+alsrdn@users.noreply.github.com> --- node/src/components/block_synchronizer.rs | 375 +++++++++-------- .../block_synchronizer/block_acquisition.rs | 6 +- .../block_acquisition_action.rs | 4 +- .../block_synchronizer/block_builder.rs | 53 +-- .../block_synchronizer/block_builder/latch.rs | 63 +++ .../block_synchronizer/block_builder/tests.rs | 6 - .../block_synchronizer_progress.rs | 10 +- .../components/block_synchronizer/config.rs | 5 - .../block_synchronizer/need_next.rs | 2 +- .../block_synchronizer/peer_list.rs | 4 +- .../components/block_synchronizer/tests.rs | 384 +++++++++++++----- node/src/reactor/main_reactor.rs | 2 +- node/src/reactor/main_reactor/catch_up.rs | 16 +- node/src/reactor/main_reactor/keep_up.rs | 18 - node/src/reactor/main_reactor/validate.rs | 1 + node/src/types/chainspec/core_config.rs | 4 +- node/src/types/validator_matrix.rs | 11 + 17 files changed, 580 insertions(+), 384 deletions(-) create mode 100644 node/src/components/block_synchronizer/block_builder/latch.rs diff --git a/node/src/components/block_synchronizer.rs b/node/src/components/block_synchronizer.rs index c6ed5a1c34..659a2d9af5 100644 --- a/node/src/components/block_synchronizer.rs +++ b/node/src/components/block_synchronizer.rs @@ -202,7 +202,7 @@ pub(crate) struct BlockSynchronizer { state: ComponentState, config: Config, chainspec: Arc, - max_simultaneous_peers: u32, + max_simultaneous_peers: u8, validator_matrix: ValidatorMatrix, // execute forward block (do not get global state or execution effects) @@ -219,7 +219,7 @@ impl BlockSynchronizer { pub(crate) fn new( config: Config, chainspec: Arc, - max_simultaneous_peers: u32, + max_simultaneous_peers: u8, validator_matrix: ValidatorMatrix, registry: &Registry, ) -> Result { @@ -296,7 +296,6 @@ impl BlockSynchronizer { should_fetch_execution_state, self.max_simultaneous_peers, self.config.peer_refresh_interval, - self.config.latch_reset_interval, self.chainspec.core_config.legacy_required_finality, self.chainspec .core_config @@ -330,57 +329,45 @@ impl BlockSynchronizer { } let (block_header, maybe_sigs) = sync_leap.highest_block_header_and_signatures(); - match (&mut self.forward, &mut self.historical) { - (Some(builder), _) | (_, Some(builder)) - if builder.block_hash() == block_header.block_hash() => - { - debug!(%builder, "BlockSynchronizer: register_sync_leap update builder"); - apply_sigs(builder, maybe_sigs); - builder.register_peers(peers); - } - _ => { - debug!("BlockSynchronizer: register_sync_leap update validator_matrix"); - let era_id = block_header.era_id(); - if let Some(validator_weights) = self.validator_matrix.validator_weights(era_id) { - let mut builder = BlockBuilder::new_from_sync_leap( - block_header, - maybe_sigs, - validator_weights, - peers, - should_fetch_execution_state, - self.max_simultaneous_peers, - self.config.peer_refresh_interval, - self.config.latch_reset_interval, - self.chainspec.core_config.legacy_required_finality, - self.chainspec - .core_config - .start_protocol_version_with_strict_finality_signatures_required, - ); - apply_sigs(&mut builder, maybe_sigs); - if should_fetch_execution_state { - self.historical = Some(builder); - } else { - self.forward = Some(builder); - } + if let Some(builder) = self.get_builder(block_header.block_hash(), true) { + debug!(%builder, "BlockSynchronizer: register_sync_leap update builder"); + apply_sigs(builder, maybe_sigs); + builder.register_peers(peers); + } else { + let era_id = block_header.era_id(); + if let Some(validator_weights) = self.validator_matrix.validator_weights(era_id) { + let mut builder = BlockBuilder::new_from_sync_leap( + block_header, + maybe_sigs, + validator_weights, + peers, + should_fetch_execution_state, + self.max_simultaneous_peers, + self.config.peer_refresh_interval, + self.chainspec.core_config.legacy_required_finality, + self.chainspec + .core_config + .start_protocol_version_with_strict_finality_signatures_required, + ); + apply_sigs(&mut builder, maybe_sigs); + if should_fetch_execution_state { + self.historical = Some(builder); } else { - warn!( - block_hash = %block_header.block_hash(), - "BlockSynchronizer: register_sync_leap unable to create block builder", - ); + self.forward = Some(builder); } + } else { + warn!( + block_hash = %block_header.block_hash(), + "BlockSynchronizer: register_sync_leap unable to create block builder", + ); } } } /// Registers peers to a block builder by `BlockHash`. pub(crate) fn register_peers(&mut self, block_hash: BlockHash, peers: Vec) { - match (&mut self.forward, &mut self.historical) { - (Some(builder), _) | (_, Some(builder)) if builder.block_hash() == block_hash => { - builder.register_peers(peers); - } - _ => { - trace!(%block_hash, "BlockSynchronizer: not currently synchronizing block"); - } + if let Some(builder) = self.get_builder(block_hash, false) { + builder.register_peers(peers); } } @@ -547,11 +534,15 @@ impl BlockSynchronizer { where REv: ReactorEvent + From> + From, { + let latch_reset_interval = self.config.latch_reset_interval; let need_next_interval = self.config.need_next_interval.into(); let mut results = Effects::new(); - let max_simultaneous_peers = self.max_simultaneous_peers as usize; + let max_simultaneous_peers = self.max_simultaneous_peers; let mut builder_needs_next = |builder: &mut BlockBuilder, chainspec: Arc| { - if builder.in_flight_latch().is_some() || builder.is_finished() || builder.is_failed() { + if builder.check_latch(latch_reset_interval) + || builder.is_finished() + || builder.is_failed() + { return; } let action = builder.block_acquisition_action(rng, max_simultaneous_peers); @@ -572,7 +563,7 @@ impl BlockSynchronizer { ); } NeedNext::BlockHeader(block_hash) => { - builder.set_in_flight_latch(); + builder.latch_by(peers.len()); results.extend(peers.into_iter().flat_map(|node_id| { effect_builder .fetch::( @@ -584,7 +575,7 @@ impl BlockSynchronizer { })) } NeedNext::BlockBody(block_hash) => { - builder.set_in_flight_latch(); + builder.latch_by(peers.len()); results.extend(peers.into_iter().flat_map(|node_id| { effect_builder .fetch::(block_hash, node_id, Box::new(EmptyValidationMetadata)) @@ -592,10 +583,13 @@ impl BlockSynchronizer { })) } NeedNext::FinalitySignatures(block_hash, era_id, validators) => { - builder.set_in_flight_latch(); + builder.latch_by(std::cmp::min( + validators.len(), + max_simultaneous_peers as usize, + )); for (validator, peer) in validators .into_iter() - .take(max_simultaneous_peers) + .take(max_simultaneous_peers as usize) .zip(peers.into_iter().cycle()) { debug!(%validator, %peer, "attempting to fetch FinalitySignature"); @@ -617,7 +611,7 @@ impl BlockSynchronizer { } } NeedNext::GlobalState(block_hash, global_state_root_hash) => { - builder.set_in_flight_latch(); + builder.latch(); results.extend( effect_builder .sync_global_state(block_hash, global_state_root_hash) @@ -625,7 +619,7 @@ impl BlockSynchronizer { ); } NeedNext::ExecutionResultsChecksum(block_hash, global_state_root_hash) => { - builder.set_in_flight_latch(); + builder.latch(); results.extend( effect_builder .get_execution_results_checksum(global_state_root_hash) @@ -636,7 +630,7 @@ impl BlockSynchronizer { ); } NeedNext::ExecutionResults(block_hash, id, checksum) => { - builder.set_in_flight_latch(); + builder.latch_by(peers.len()); results.extend(peers.into_iter().flat_map(|node_id| { debug!("attempting to fetch BlockExecutionResultsOrChunk"); effect_builder @@ -648,7 +642,7 @@ impl BlockSynchronizer { })) } NeedNext::ApprovalsHashes(block_hash, block) => { - builder.set_in_flight_latch(); + builder.latch_by(peers.len()); results.extend(peers.into_iter().flat_map(|node_id| { effect_builder .fetch::(block_hash, node_id, block.clone()) @@ -656,7 +650,7 @@ impl BlockSynchronizer { })) } NeedNext::DeployByHash(block_hash, deploy_hash) => { - builder.set_in_flight_latch(); + builder.latch_by(peers.len()); results.extend(peers.into_iter().flat_map(|node_id| { effect_builder .fetch::( @@ -671,7 +665,7 @@ impl BlockSynchronizer { })) } NeedNext::DeployById(block_hash, deploy_id) => { - builder.set_in_flight_latch(); + builder.latch_by(peers.len()); results.extend(peers.into_iter().flat_map(|node_id| { effect_builder .fetch::(deploy_id, node_id, Box::new(EmptyValidationMetadata)) @@ -682,17 +676,22 @@ impl BlockSynchronizer { })) } NeedNext::MakeExecutableBlock(block_hash, _) => { - if false == builder.should_fetch_execution_state() { - builder.set_in_flight_latch(); - if builder.execution_unattempted() { - results.extend(effect_builder.make_block_executable(block_hash).event( - move |result| Event::MadeFinalizedBlock { block_hash, result }, - )) - } + let need_to_execute = false == builder.should_fetch_execution_state() + && builder.execution_unattempted(); + if need_to_execute { + builder.latch(); + results.extend( + effect_builder + .make_block_executable(block_hash) + .event(move |result| Event::MadeFinalizedBlock { + block_hash, + result, + }), + ) } } NeedNext::EnqueueForExecution(block_hash, _, finalized_block, deploys) => { - builder.set_in_flight_latch(); + builder.latch(); results.extend( effect_builder .enqueue_block_for_execution( @@ -708,7 +707,7 @@ impl BlockSynchronizer { // because we have global state and execution effects (if // any). if builder.should_fetch_execution_state() { - builder.set_in_flight_latch(); + builder.latch(); results.extend( effect_builder.mark_block_completed(block_height).event( move |is_new| Event::MarkBlockCompleted { block_hash, is_new }, @@ -717,16 +716,17 @@ impl BlockSynchronizer { } } NeedNext::Peers(block_hash) => { - builder.set_in_flight_latch(); if builder.should_fetch_execution_state() { + builder.latch(); // the accumulator may or may not have peers for an older block, // so we're going to also get a random sampling from networking results.extend( effect_builder - .get_fully_connected_peers(max_simultaneous_peers) + .get_fully_connected_peers(max_simultaneous_peers as usize) .event(move |peers| Event::NetworkPeers(block_hash, peers)), ) } + builder.latch(); results.extend( effect_builder .get_block_accumulated_peers(block_hash) @@ -740,7 +740,7 @@ impl BlockSynchronizer { "BlockSynchronizer: does not have era_validators for era_id: {}", era_id ); - builder.set_in_flight_latch(); + builder.latch_by(peers.len()); results.extend(peers.into_iter().flat_map(|node_id| { effect_builder .fetch::( @@ -779,12 +779,9 @@ impl BlockSynchronizer { results } - fn register_disconnected_peer(&mut self, node_id: NodeId) { - if let Some(builder) = &mut self.forward { - builder.disqualify_peer(node_id); - } - if let Some(builder) = &mut self.historical { - builder.disqualify_peer(node_id); + fn peers_accumulated(&mut self, block_hash: BlockHash, peers: Vec) { + if let Some(builder) = self.get_builder(block_hash, true) { + builder.register_peers(peers); } } @@ -809,28 +806,23 @@ impl BlockSynchronizer { } }; - match (&mut self.forward, &mut self.historical) { - (Some(builder), _) | (_, Some(builder)) if builder.block_hash() == block_hash => { - match maybe_block_header { - None => { - if let Some(peer_id) = maybe_peer_id { - builder.demote_peer(peer_id); - } + let validator_matrix = &self.validator_matrix.clone(); + if let Some(builder) = self.get_builder(block_hash, true) { + match maybe_block_header { + None => { + if let Some(peer_id) = maybe_peer_id { + builder.demote_peer(peer_id); } - Some(block_header) => { - if let Err(error) = - builder.register_block_header(*block_header, maybe_peer_id) - { - error!(%error, "BlockSynchronizer: failed to apply block header"); - } else { - builder.register_era_validator_weights(&self.validator_matrix); - } + } + Some(block_header) => { + if let Err(error) = builder.register_block_header(*block_header, maybe_peer_id) + { + error!(%error, "BlockSynchronizer: failed to apply block header"); + } else { + builder.register_era_validator_weights(validator_matrix); } } } - _ => { - trace!(%block_hash, "BlockSynchronizer: not currently synchronizing block"); - } } } @@ -859,24 +851,19 @@ impl BlockSynchronizer { } }; - match (&mut self.forward, &mut self.historical) { - (Some(builder), _) | (_, Some(builder)) if builder.block_hash() == block_hash => { - match maybe_block { - None => { - if let Some(peer_id) = maybe_peer_id { - builder.demote_peer(peer_id); - } + if let Some(builder) = self.get_builder(block_hash, true) { + match maybe_block { + None => { + if let Some(peer_id) = maybe_peer_id { + builder.demote_peer(peer_id); } - Some(block) => { - if let Err(error) = builder.register_block(&block, maybe_peer_id) { - error!(%error, "BlockSynchronizer: failed to apply block"); - } + } + Some(block) => { + if let Err(error) = builder.register_block(&block, maybe_peer_id) { + error!(%error, "BlockSynchronizer: failed to apply block"); } } } - _ => { - trace!(%block_hash, "BlockSynchronizer: not currently synchronizing block"); - } } } @@ -908,26 +895,21 @@ impl BlockSynchronizer { } }; - match (&mut self.forward, &mut self.historical) { - (Some(builder), _) | (_, Some(builder)) if builder.block_hash() == block_hash => { - match maybe_approvals_hashes { - None => { - if let Some(peer_id) = maybe_peer_id { - builder.demote_peer(peer_id); - } + if let Some(builder) = self.get_builder(block_hash, true) { + match maybe_approvals_hashes { + None => { + if let Some(peer_id) = maybe_peer_id { + builder.demote_peer(peer_id); } - Some(approvals_hashes) => { - if let Err(error) = - builder.register_approvals_hashes(&approvals_hashes, maybe_peer_id) - { - error!(%error, "BlockSynchronizer: failed to apply approvals hashes"); - } + } + Some(approvals_hashes) => { + if let Err(error) = + builder.register_approvals_hashes(&approvals_hashes, maybe_peer_id) + { + error!(%error, "BlockSynchronizer: failed to apply approvals hashes"); } } } - _ => { - trace!(%block_hash, "BlockSynchronizer: not currently synchronizing block"); - } } } @@ -954,28 +936,21 @@ impl BlockSynchronizer { } }; - let block_hash = id.block_hash; - - match (&mut self.forward, &mut self.historical) { - (Some(builder), _) | (_, Some(builder)) if builder.block_hash() == block_hash => { - match maybe_finality_signature { - None => { - if let Some(peer_id) = maybe_peer_id { - builder.demote_peer(peer_id); - } + if let Some(builder) = self.get_builder(id.block_hash, true) { + match maybe_finality_signature { + None => { + if let Some(peer_id) = maybe_peer_id { + builder.demote_peer(peer_id); } - Some(finality_signature) => { - if let Err(error) = - builder.register_finality_signature(*finality_signature, maybe_peer_id) - { - warn!(%error, "BlockSynchronizer: failed to apply finality signature"); - } + } + Some(finality_signature) => { + if let Err(error) = + builder.register_finality_signature(*finality_signature, maybe_peer_id) + { + warn!(%error, "BlockSynchronizer: failed to apply finality signature"); } } } - _ => { - trace!(%block_hash, "BlockSynchronizer: not currently synchronizing block"); - } } } @@ -1017,21 +992,17 @@ impl BlockSynchronizer { self.validator_matrix.register_era_validator_weights(evw); } } - match (&mut self.forward, &mut self.historical) { - (Some(builder), _) | (_, Some(builder)) if builder.block_hash() == block_hash => { - if demote_peer { - if let Some(peer_id) = maybe_peer_id { - builder.demote_peer(peer_id); - } - } else { - if let Some(peer_id) = maybe_peer_id { - builder.promote_peer(peer_id); - } - builder.register_era_validator_weights(&self.validator_matrix); + let validator_matrix = &self.validator_matrix.clone(); + if let Some(builder) = self.get_builder(block_hash, true) { + if demote_peer { + if let Some(peer_id) = maybe_peer_id { + builder.demote_peer(peer_id); } - } - _ => { - trace!(%block_hash, "BlockSynchronizer: not currently synchronizing block"); + } else { + if let Some(peer_id) = maybe_peer_id { + builder.promote_peer(peer_id); + } + builder.register_era_validator_weights(validator_matrix); } } } @@ -1075,6 +1046,7 @@ impl BlockSynchronizer { if builder.block_hash() != block_hash { debug!(%block_hash, "BlockSynchronizer: not currently synchronizing block"); } else { + builder.latch_decrement(); if let Some(root_hash) = maybe_root_hash { if let Err(error) = builder.register_global_state(root_hash.into_inner()) { error!(%block_hash, %error, "BlockSynchronizer: failed to apply global state"); @@ -1118,10 +1090,13 @@ impl BlockSynchronizer { if let Some(builder) = &mut self.historical { if builder.block_hash() != block_hash { debug!(%block_hash, "BlockSynchronizer: not currently synchronising block"); - } else if let Err(error) = - builder.register_execution_results_checksum(execution_results_checksum) - { - error!(%block_hash, %error, "BlockSynchronizer: failed to apply execution results checksum"); + } else { + builder.latch_decrement(); + if let Err(error) = + builder.register_execution_results_checksum(execution_results_checksum) + { + error!(%block_hash, %error, "BlockSynchronizer: failed to apply execution results checksum"); + } } } } @@ -1166,7 +1141,7 @@ impl BlockSynchronizer { debug!(%block_hash, "BlockSynchronizer: not currently synchronizing block"); return Effects::new(); } - + builder.latch_decrement(); match maybe_value_or_chunk { None => { debug!(%block_hash, "execution_results_fetched: No maybe_value_or_chunk"); @@ -1204,12 +1179,15 @@ impl BlockSynchronizer { Effects::new() } - fn register_execution_results_stored(&mut self, block_hash: BlockHash) { + fn execution_results_stored(&mut self, block_hash: BlockHash) { if let Some(builder) = &mut self.historical { if builder.block_hash() != block_hash { debug!(%block_hash, "BlockSynchronizer: register_execution_results_stored: not currently synchronizing block"); - } else if let Err(error) = builder.register_execution_results_stored_notification() { - error!(%block_hash, %error, "BlockSynchronizer: register_execution_results_stored: failed to apply stored execution results"); + } else { + builder.latch_decrement(); + if let Err(error) = builder.register_execution_results_stored_notification() { + error!(%block_hash, %error, "BlockSynchronizer: register_execution_results_stored: failed to apply stored execution results"); + } } } } @@ -1220,18 +1198,22 @@ impl BlockSynchronizer { FetchedData::FromStorage { item } => (item, None), }; - match (&mut self.forward, &mut self.historical) { - (Some(builder), _) | (_, Some(builder)) if builder.block_hash() == block_hash => { - if let Err(error) = builder.register_deploy(deploy.fetch_id(), maybe_peer) { - error!(%block_hash, %error, "BlockSynchronizer: failed to apply deploy"); - } - } - _ => { - trace!(%block_hash, "BlockSynchronizer: not currently synchronizing block"); + if let Some(builder) = self.get_builder(block_hash, true) { + if let Err(error) = builder.register_deploy(deploy.fetch_id(), maybe_peer) { + error!(%block_hash, %error, "BlockSynchronizer: failed to apply deploy"); } } } + fn disqualify_peer(&mut self, node_id: NodeId) { + if let Some(builder) = &mut self.forward { + builder.disqualify_peer(node_id); + } + if let Some(builder) = &mut self.historical { + builder.disqualify_peer(node_id); + } + } + fn progress(&self, builder: &BlockBuilder) -> BlockSynchronizerProgress { if builder.is_finished() { match builder.block_height_and_era() { @@ -1274,19 +1256,11 @@ impl BlockSynchronizer { .unwrap_or_else(Timestamp::zero), ); - if last_progress_time.elapsed() > self.config.stall_limit { - BlockSynchronizerProgress::Stalled( - builder.block_hash(), - builder.block_height(), - last_progress_time, - ) - } else { - BlockSynchronizerProgress::Syncing( - builder.block_hash(), - builder.block_height(), - last_progress_time, - ) - } + BlockSynchronizerProgress::Syncing( + builder.block_hash(), + builder.block_height(), + last_progress_time, + ) } fn status(&self) -> BlockSynchronizerStatus { @@ -1303,6 +1277,25 @@ impl BlockSynchronizer { }), ) } + + fn get_builder( + &mut self, + block_hash: BlockHash, + decrement_latch: bool, + ) -> Option<&mut BlockBuilder> { + match (&mut self.forward, &mut self.historical) { + (Some(builder), _) | (_, Some(builder)) if builder.block_hash() == block_hash => { + if decrement_latch { + builder.latch_decrement(); + } + Some(builder) + } + _ => { + trace!(%block_hash, "BlockSynchronizer: not currently synchronizing block"); + None + } + } + } } impl InitializedComponent for BlockSynchronizer @@ -1488,7 +1481,7 @@ impl Component for BlockSynchronizer { } // when a peer is disconnected from for any reason, disqualify peer Event::DisconnectFromPeer(node_id) => { - self.register_disconnected_peer(node_id); + self.disqualify_peer(node_id); Effects::new() } Event::MarkBlockExecutionEnqueued(block_hash) => { @@ -1558,7 +1551,7 @@ impl Component for BlockSynchronizer { } // historical sync needs to know that execution effects have been stored Event::ExecutionResultsStored(block_hash) => { - self.register_execution_results_stored(block_hash); + self.execution_results_stored(block_hash); self.need_next(effect_builder, rng) } // for pre-1.5 blocks we use the legacy deploy fetcher, otherwise we use the deploy @@ -1587,18 +1580,20 @@ impl Component for BlockSynchronizer { // fresh peers to apply (random sample from network) Event::NetworkPeers(block_hash, peers) => { debug!(%block_hash, "BlockSynchronizer: got {} peers from network", peers.len()); - self.register_peers(block_hash, peers); + self.peers_accumulated(block_hash, peers); self.need_next(effect_builder, rng) } // fresh peers to apply (qualified peers from accumulator) Event::AccumulatedPeers(block_hash, Some(peers)) => { debug!(%block_hash, "BlockSynchronizer: got {} peers from accumulator", peers.len()); - self.register_peers(block_hash, peers); + self.peers_accumulated(block_hash, peers); self.need_next(effect_builder, rng) } - // no more peers available, what do we need next? + // no more peers available; periodically retry via need next... + // the node will likely get more peers over time and resume Event::AccumulatedPeers(block_hash, None) => { debug!(%block_hash, "BlockSynchronizer: got 0 peers from accumulator"); + self.peers_accumulated(block_hash, vec![]); self.need_next(effect_builder, rng) } Event::MadeFinalizedBlock { block_hash, result } => { diff --git a/node/src/components/block_synchronizer/block_acquisition.rs b/node/src/components/block_synchronizer/block_acquisition.rs index 7a90a6dc59..ff6445d110 100644 --- a/node/src/components/block_synchronizer/block_acquisition.rs +++ b/node/src/components/block_synchronizer/block_acquisition.rs @@ -320,7 +320,7 @@ impl BlockAcquisitionState { validator_weights: &EraValidatorWeights, rng: &mut NodeRng, is_historical: bool, - max_simultaneous_peers: usize, + max_simultaneous_peers: u8, ) -> Result { // self is the resting state we are in, ret is the next action that should be taken // to acquire the necessary data to get us to the next step (if any), or an error @@ -1352,7 +1352,7 @@ impl BlockAcquisitionState { pub(super) fn signatures_from_missing_validators( validator_weights: &EraValidatorWeights, signatures: &mut SignatureAcquisition, - max_simultaneous_peers: usize, + max_simultaneous_peers: u8, peer_list: &PeerList, rng: &mut NodeRng, block_header: &BlockHeader, @@ -1362,7 +1362,7 @@ pub(super) fn signatures_from_missing_validators( .cloned() .collect(); // If there are too few, retry any in Pending state. - if missing_signatures_in_random_order.len() < max_simultaneous_peers { + if (missing_signatures_in_random_order.len() as u8) < max_simultaneous_peers { missing_signatures_in_random_order.extend( validator_weights .missing_validators(signatures.not_pending()) diff --git a/node/src/components/block_synchronizer/block_acquisition_action.rs b/node/src/components/block_synchronizer/block_acquisition_action.rs index e8a4035a5d..d3deef7e64 100644 --- a/node/src/components/block_synchronizer/block_acquisition_action.rs +++ b/node/src/components/block_synchronizer/block_acquisition_action.rs @@ -19,7 +19,7 @@ use crate::{ use super::block_acquisition::signatures_from_missing_validators; -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub(crate) struct BlockAcquisitionAction { peers_to_ask: Vec, need_next: NeedNext, @@ -273,7 +273,7 @@ impl BlockAcquisitionAction { signatures: &mut SignatureAcquisition, needs_deploy: Option, is_historical: bool, - max_simultaneous_peers: usize, + max_simultaneous_peers: u8, ) -> Self { match needs_deploy { Some(DeployIdentifier::ById(deploy_id)) => { diff --git a/node/src/components/block_synchronizer/block_builder.rs b/node/src/components/block_synchronizer/block_builder.rs index 951a2be60c..d510fa7b6b 100644 --- a/node/src/components/block_synchronizer/block_builder.rs +++ b/node/src/components/block_synchronizer/block_builder.rs @@ -1,3 +1,4 @@ +mod latch; #[cfg(test)] mod tests; @@ -22,6 +23,7 @@ use super::{ BlockAcquisitionError, }; use crate::{ + components::block_synchronizer::block_builder::latch::Latch, types::{ chainspec::LegacyRequiredFinality, ApprovalsHashes, Block, BlockExecutionResultsOrChunk, BlockHash, BlockHeader, BlockSignatures, Deploy, DeployHash, DeployId, EraValidatorWeights, @@ -82,8 +84,7 @@ pub(super) struct BlockBuilder { sync_start: Instant, execution_progress: ExecutionProgress, last_progress: Timestamp, - in_flight_latch: Option, - latch_reset_interval: TimeDiff, + latch: Latch, // acquired state acquisition_state: BlockAcquisitionState, @@ -107,9 +108,8 @@ impl BlockBuilder { pub(super) fn new( block_hash: BlockHash, should_fetch_execution_state: bool, - max_simultaneous_peers: u32, + max_simultaneous_peers: u8, peer_refresh_interval: TimeDiff, - latch_reset_interval: TimeDiff, legacy_required_finality: LegacyRequiredFinality, strict_finality_protocol_version: ProtocolVersion, ) -> Self { @@ -127,8 +127,7 @@ impl BlockBuilder { sync_start: Instant::now(), execution_progress: ExecutionProgress::Idle, last_progress: Timestamp::now(), - in_flight_latch: None, - latch_reset_interval, + latch: Latch::default(), } } @@ -139,9 +138,8 @@ impl BlockBuilder { validator_weights: EraValidatorWeights, peers: Vec, should_fetch_execution_state: bool, - max_simultaneous_peers: u32, + max_simultaneous_peers: u8, peer_refresh_interval: TimeDiff, - latch_reset_interval: TimeDiff, legacy_required_finality: LegacyRequiredFinality, strict_finality_protocol_version: ProtocolVersion, ) -> Self { @@ -175,8 +173,7 @@ impl BlockBuilder { sync_start: Instant::now(), execution_progress: ExecutionProgress::Idle, last_progress: Timestamp::now(), - in_flight_latch: None, - latch_reset_interval, + latch: Latch::default(), } } @@ -229,23 +226,27 @@ impl BlockBuilder { self.last_progress } - pub(super) fn in_flight_latch(&mut self) -> Option { - if let Some(timestamp) = self.in_flight_latch { - // we put a latch on ourselves the first time we signal we need something specific - // if asked again before we get what we need, and latch_reset_interval has not passed, - // we signal we need nothing to avoid spamming redundant asks - // - // if latch_reset_interval has passed, we reset the latch and ask again. + #[cfg(test)] + pub fn latched(&self) -> bool { + self.latch.count() > 0 + } - if Timestamp::now().saturating_diff(timestamp) > self.latch_reset_interval { - self.in_flight_latch = None; - } - } - self.in_flight_latch + pub(super) fn check_latch(&mut self, interval: TimeDiff) -> bool { + self.latch.check_latch(interval, Timestamp::now()) + } + + /// Increments the latch counter by 1. + pub(super) fn latch(&mut self) { + self.latch.increment(1); + } + + pub(super) fn latch_by(&mut self, count: usize) { + self.latch.increment(count as u8); } - pub(super) fn set_in_flight_latch(&mut self) { - self.in_flight_latch = Some(Timestamp::now()); + /// Decrements the latch counter. + pub(super) fn latch_decrement(&mut self) { + self.latch.decrement(1); } pub(super) fn is_failed(&self) -> bool { @@ -387,7 +388,7 @@ impl BlockBuilder { pub(super) fn block_acquisition_action( &mut self, rng: &mut NodeRng, - max_simultaneous_peers: usize, + max_simultaneous_peers: u8, ) -> BlockAcquisitionAction { match self.peer_list.need_peers() { PeersStatus::Sufficient => { @@ -714,7 +715,7 @@ impl BlockBuilder { fn touch(&mut self) { self.last_progress = Timestamp::now(); - self.in_flight_latch = None; + self.latch.unlatch(); } pub(crate) fn peer_list(&self) -> &PeerList { diff --git a/node/src/components/block_synchronizer/block_builder/latch.rs b/node/src/components/block_synchronizer/block_builder/latch.rs new file mode 100644 index 0000000000..37153bbec4 --- /dev/null +++ b/node/src/components/block_synchronizer/block_builder/latch.rs @@ -0,0 +1,63 @@ +use datasize::DataSize; + +use tracing::warn; + +use casper_types::{TimeDiff, Timestamp}; + +#[derive(Debug, Default, DataSize)] +pub(super) struct Latch { + #[data_size(skip)] + latch: u8, + timestamp: Option, +} + +impl Latch { + pub(super) fn increment(&mut self, increment_by: u8) { + match self.latch.checked_add(increment_by) { + Some(val) => { + self.latch = val; + self.touch(); + } + None => { + warn!("latch increment overflowed."); + } + } + } + + pub(super) fn decrement(&mut self, decrement_by: u8) { + match self.latch.checked_sub(decrement_by) { + Some(val) => { + self.latch = val; + } + None => { + self.latch = 0; + } + } + self.touch(); + } + + pub(super) fn unlatch(&mut self) { + self.latch = 0; + self.timestamp = None; + } + + pub(super) fn check_latch(&mut self, interval: TimeDiff, checked: Timestamp) -> bool { + match self.timestamp { + None => false, + Some(timestamp) => { + if checked > timestamp + interval { + self.unlatch() + } + self.count() > 0 + } + } + } + + pub(super) fn count(&self) -> u8 { + self.latch + } + + pub(super) fn touch(&mut self) { + self.timestamp = Some(Timestamp::now()); + } +} diff --git a/node/src/components/block_synchronizer/block_builder/tests.rs b/node/src/components/block_synchronizer/block_builder/tests.rs index 118cebf7a4..cf9c594082 100644 --- a/node/src/components/block_synchronizer/block_builder/tests.rs +++ b/node/src/components/block_synchronizer/block_builder/tests.rs @@ -16,7 +16,6 @@ fn handle_acceptance() { false, 1, TimeDiff::from_seconds(1), - TimeDiff::from_seconds(1), LegacyRequiredFinality::Strict, ProtocolVersion::V1_0_0, ); @@ -100,7 +99,6 @@ fn register_era_validator_weights() { false, 1, TimeDiff::from_seconds(1), - TimeDiff::from_seconds(1), LegacyRequiredFinality::Strict, ProtocolVersion::V1_0_0, ); @@ -150,7 +148,6 @@ fn register_finalized_block() { false, 1, TimeDiff::from_seconds(1), - TimeDiff::from_seconds(1), LegacyRequiredFinality::Strict, ProtocolVersion::V1_0_0, ); @@ -224,7 +221,6 @@ fn register_block_execution() { false, 1, TimeDiff::from_seconds(1), - TimeDiff::from_seconds(1), LegacyRequiredFinality::Strict, ProtocolVersion::V1_0_0, ); @@ -307,7 +303,6 @@ fn register_block_executed() { false, 1, TimeDiff::from_seconds(1), - TimeDiff::from_seconds(1), LegacyRequiredFinality::Strict, ProtocolVersion::V1_0_0, ); @@ -376,7 +371,6 @@ fn register_block_marked_complete() { false, 1, TimeDiff::from_seconds(1), - TimeDiff::from_seconds(1), LegacyRequiredFinality::Strict, ProtocolVersion::V1_0_0, ); diff --git a/node/src/components/block_synchronizer/block_synchronizer_progress.rs b/node/src/components/block_synchronizer/block_synchronizer_progress.rs index e17af809c7..0ae91ba1c5 100644 --- a/node/src/components/block_synchronizer/block_synchronizer_progress.rs +++ b/node/src/components/block_synchronizer/block_synchronizer_progress.rs @@ -9,15 +9,12 @@ pub(crate) enum BlockSynchronizerProgress { Syncing(BlockHash, Option, Timestamp), Executing(BlockHash, u64, EraId), Synced(BlockHash, u64, EraId), - Stalled(BlockHash, Option, Timestamp), } impl BlockSynchronizerProgress { pub(crate) fn is_active(&self) -> bool { match self { - BlockSynchronizerProgress::Idle - | BlockSynchronizerProgress::Synced(_, _, _) - | BlockSynchronizerProgress::Stalled(_, _, _) => false, + BlockSynchronizerProgress::Idle | BlockSynchronizerProgress::Synced(_, _, _) => false, BlockSynchronizerProgress::Syncing(_, _, _) | BlockSynchronizerProgress::Executing(_, _, _) => true, } @@ -52,11 +49,6 @@ impl Display for BlockSynchronizerProgress { block_height, block_hash, era_id ) } - BlockSynchronizerProgress::Stalled(block_hash, block_height, timestamp) => { - write!(f, "block synchronizer stalled on ")?; - display_height(f, block_height)?; - write!(f, "{}, {}", timestamp, block_hash) - } } } } diff --git a/node/src/components/block_synchronizer/config.rs b/node/src/components/block_synchronizer/config.rs index 33792cbb94..29ba11dd99 100644 --- a/node/src/components/block_synchronizer/config.rs +++ b/node/src/components/block_synchronizer/config.rs @@ -10,7 +10,6 @@ const DEFAULT_PEER_REFRESH_INTERVAL: &str = "90sec"; const DEFAULT_NEED_NEXT_INTERVAL: &str = "1sec"; const DEFAULT_DISCONNECT_DISHONEST_PEERS_INTERVAL: &str = "10sec"; const DEFAULT_LATCH_RESET_INTERVAL: &str = "5sec"; -const DEFAULT_STALL_LIMIT: &str = "120sec"; /// Configuration options for fetching. #[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)] @@ -25,9 +24,6 @@ pub struct Config { pub disconnect_dishonest_peers_interval: TimeDiff, /// Time interval for resetting the latch in block builders. pub latch_reset_interval: TimeDiff, - /// Time interval after which synchronization is considered stalled if no successful sync - /// activity happened. - pub stall_limit: TimeDiff, } impl Default for Config { @@ -41,7 +37,6 @@ impl Default for Config { ) .unwrap(), latch_reset_interval: TimeDiff::from_str(DEFAULT_LATCH_RESET_INTERVAL).unwrap(), - stall_limit: TimeDiff::from_str(DEFAULT_STALL_LIMIT).unwrap(), } } } diff --git a/node/src/components/block_synchronizer/need_next.rs b/node/src/components/block_synchronizer/need_next.rs index 6392be4e02..14d57f7f62 100644 --- a/node/src/components/block_synchronizer/need_next.rs +++ b/node/src/components/block_synchronizer/need_next.rs @@ -10,7 +10,7 @@ use crate::types::{ use super::execution_results_acquisition::ExecutionResultsChecksum; -#[derive(DataSize, Debug, Clone, Display)] +#[derive(DataSize, Debug, Clone, Display, PartialEq)] pub(crate) enum NeedNext { #[display(fmt = "need next for {}: nothing", _0)] Nothing(BlockHash), diff --git a/node/src/components/block_synchronizer/peer_list.rs b/node/src/components/block_synchronizer/peer_list.rs index 3ee9d420d4..547af010b7 100644 --- a/node/src/components/block_synchronizer/peer_list.rs +++ b/node/src/components/block_synchronizer/peer_list.rs @@ -30,12 +30,12 @@ pub(super) enum PeersStatus { pub(super) struct PeerList { peer_list: BTreeMap, keep_fresh: Timestamp, - max_simultaneous_peers: u32, + max_simultaneous_peers: u8, peer_refresh_interval: TimeDiff, } impl PeerList { - pub(super) fn new(max_simultaneous_peers: u32, peer_refresh_interval: TimeDiff) -> Self { + pub(super) fn new(max_simultaneous_peers: u8, peer_refresh_interval: TimeDiff) -> Self { PeerList { peer_list: BTreeMap::new(), keep_fresh: Timestamp::now(), diff --git a/node/src/components/block_synchronizer/tests.rs b/node/src/components/block_synchronizer/tests.rs index f590eb27b9..256145cb0b 100644 --- a/node/src/components/block_synchronizer/tests.rs +++ b/node/src/components/block_synchronizer/tests.rs @@ -32,9 +32,8 @@ use crate::{ utils, }; -const MAX_SIMULTANEOUS_PEERS: usize = 5; +const MAX_SIMULTANEOUS_PEERS: u8 = 5; const TEST_LATCH_RESET_INTERVAL_MILLIS: u64 = 5; -const TEST_SYNCHRONIZER_STALL_LIMIT_MILLIS: u64 = 150; const SHOULD_FETCH_EXECUTION_STATE: bool = true; const STRICT_FINALITY_REQUIRED_VERSION: ProtocolVersion = ProtocolVersion::from_parts(1, 5, 0); @@ -197,10 +196,10 @@ async fn need_next( rng: &mut TestRng, reactor: &MockReactor, block_synchronizer: &mut BlockSynchronizer, - num_expected_events: usize, + num_expected_events: u8, ) -> Vec { let effects = block_synchronizer.need_next(reactor.effect_builder(), rng); - assert_eq!(effects.len(), num_expected_events); + assert_eq!(effects.len() as u8, num_expected_events); reactor.process_effects(effects).await } @@ -267,7 +266,7 @@ impl BlockSynchronizer { let mut block_synchronizer = BlockSynchronizer::new( config, Arc::new(Chainspec::random(rng)), - MAX_SIMULTANEOUS_PEERS as u32, + MAX_SIMULTANEOUS_PEERS, validator_matrix, &prometheus::Registry::new(), ) @@ -300,6 +299,28 @@ fn weak_finality_threshold(n: usize) -> usize { n / 3 + 1 } +fn latch_inner_check(builder: Option<&BlockBuilder>, expected: bool, msg: &str) { + assert_eq!( + builder.expect("builder should exist").latched(), + expected, + "{}", + msg + ); +} + +fn need_next_inner_check( + builder: Option<&mut BlockBuilder>, + rng: &mut TestRng, + expected: NeedNext, + msg: &str, +) { + let need_next = builder + .expect("should exist") + .block_acquisition_action(rng, MAX_SIMULTANEOUS_PEERS) + .need_next(); + assert_eq!(need_next, expected, "{}", msg); +} + #[tokio::test] async fn global_state_sync_wont_stall_with_bad_peers() { let mut rng = TestRng::new(); @@ -447,6 +468,24 @@ async fn global_state_sync_wont_stall_with_bad_peers() { #[tokio::test] async fn synchronizer_doesnt_busy_loop_without_peers() { + fn check_need_peer_events(expected_block_hash: BlockHash, events: Vec) { + // Explicitly verify the two effects are indeed asking networking and accumulator for peers. + assert_matches!( + events[0], + MockReactorEvent::NetworkInfoRequest(NetworkInfoRequest::FullyConnectedPeers { + count, + .. + }) if count == MAX_SIMULTANEOUS_PEERS as usize + ); + assert_matches!( + events[1], + MockReactorEvent::BlockAccumulatorRequest(BlockAccumulatorRequest::GetPeersForBlock { + block_hash, + .. + }) if block_hash == expected_block_hash + ); + } + let mut rng = TestRng::new(); let mock_reactor = MockReactor::new(); let test_env = TestEnv::random(&mut rng).with_block( @@ -456,6 +495,7 @@ async fn synchronizer_doesnt_busy_loop_without_peers() { .build(&mut rng), ); let block = test_env.block(); + let block_hash = *block.hash(); let validator_matrix = test_env.gen_validator_matrix(); let cfg = Config { latch_reset_interval: TimeDiff::from_millis(TEST_LATCH_RESET_INTERVAL_MILLIS), @@ -464,70 +504,84 @@ async fn synchronizer_doesnt_busy_loop_without_peers() { let mut block_synchronizer = BlockSynchronizer::new_initialized(&mut rng, validator_matrix, cfg); - block_synchronizer.register_block_by_hash(*block.hash(), true); + block_synchronizer.register_block_by_hash(block_hash, true); + + latch_inner_check( + block_synchronizer.historical.as_ref(), + false, + "initial set up, should not be latched", + ); + { + // We registered no peers, so we need peers + need_next_inner_check( + block_synchronizer.historical.as_mut(), + &mut rng, + NeedNext::Peers(block_hash), + "should need peers", + ); + // We registered no peers, so the synchronizer should ask for peers. let effects = block_synchronizer.handle_event( mock_reactor.effect_builder(), &mut rng, Event::Request(BlockSynchronizerRequest::NeedNext), ); - assert_eq!(effects.len(), 2); - let events = mock_reactor.process_effects(effects).await; - // Given this is a historical builder, ask for peers from the network. - assert_matches!( - events[0], - MockReactorEvent::NetworkInfoRequest(NetworkInfoRequest::FullyConnectedPeers { - count, - .. - }) if count == MAX_SIMULTANEOUS_PEERS - ); - // Ask for peers from the accumulator. - assert_matches!( - events[1], - MockReactorEvent::BlockAccumulatorRequest(BlockAccumulatorRequest::GetPeersForBlock { - block_hash, - .. - }) if block_hash == *block.hash() + assert_eq!(effects.len(), 2, "we should ask for peers from both networking and accumulator, thus two effects are expected"); + + latch_inner_check( + block_synchronizer.historical.as_ref(), + true, + "should be latched waiting for peers", ); + + check_need_peer_events(block_hash, mock_reactor.process_effects(effects).await); } { - // Inject an empty response from the network, simulating no available - // peers. + // Inject an empty response from the network, simulating no available peers. let effects = block_synchronizer.handle_event( mock_reactor.effect_builder(), &mut rng, Event::NetworkPeers(*block.hash(), vec![]), ); - // The builder should have its latch set and should not ask for peers - // until the latch clears itself. - assert!(effects.is_empty()); - assert!(block_synchronizer - .historical - .as_mut() - .unwrap() - .in_flight_latch() - .is_some()); + + latch_inner_check( + block_synchronizer.historical.as_ref(), + true, + "should still be latched because only one response was received and it \ + did not have what we needed.", + ); + + assert!(effects.is_empty(), "effects should be empty"); } { - // Inject an empty response from the accumulator, simulating no - // available peers. + // Inject an empty response from the accumulator, simulating no available peers. + // as this is the second of two responses, the latch clears. the logic then + // calls need next again, we still need peers, so we generate the same two effects again. let effects = block_synchronizer.handle_event( mock_reactor.effect_builder(), &mut rng, - Event::AccumulatedPeers(*block.hash(), Some(vec![])), + Event::AccumulatedPeers(*block.hash(), None), + ); + assert!(!effects.is_empty(), "we should still need peers..."); + + latch_inner_check( + block_synchronizer.historical.as_ref(), + true, + "we need peers, ask again", + ); + + // We registered no peers, so we still need peers + need_next_inner_check( + block_synchronizer.historical.as_mut(), + &mut rng, + NeedNext::Peers(block_hash), + "should need peers", ); - // The builder should have its latch set and should not ask for peers - // until the latch clears itself. - assert!(effects.is_empty()); - assert!(block_synchronizer - .historical - .as_mut() - .unwrap() - .in_flight_latch() - .is_some()); + + check_need_peer_events(block_hash, mock_reactor.process_effects(effects).await); } } @@ -538,15 +592,17 @@ async fn should_not_stall_after_registering_new_era_validator_weights() { let test_env = TestEnv::random(&mut rng); let peers = test_env.peers(); let block = test_env.block(); + let block_hash = *block.hash(); + let era_id = block.header().era_id(); - // Set up an empty validator matrix. + // Set up a validator matrix. let mut validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone()); let mut block_synchronizer = BlockSynchronizer::new_initialized(&mut rng, validator_matrix.clone(), Config::default()); // Set up the synchronizer for the test block such that the next step is getting era validators. - block_synchronizer.register_block_by_hash(*block.hash(), true); - block_synchronizer.register_peers(*block.hash(), peers.clone()); + block_synchronizer.register_block_by_hash(block_hash, true); + block_synchronizer.register_peers(block_hash, peers.clone()); block_synchronizer .historical .as_mut() @@ -554,13 +610,39 @@ async fn should_not_stall_after_registering_new_era_validator_weights() { .register_block_header(block.header().clone(), None) .expect("should register block header"); - // At this point, the next step the synchronizer takes should be to get era validators. + latch_inner_check( + block_synchronizer.historical.as_ref(), + false, + "initial set up, should not be latched", + ); + need_next_inner_check( + block_synchronizer.historical.as_mut(), + &mut rng, + NeedNext::EraValidators(era_id), + "should need era validators for era block is in", + ); + let effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng); assert_eq!( effects.len(), - MAX_SIMULTANEOUS_PEERS, - "need next should have an effect per peer when needing peers" + MAX_SIMULTANEOUS_PEERS as usize, + "need next should have an effect per peer when needing sync leap" ); + latch_inner_check( + block_synchronizer.historical.as_ref(), + true, + "after determination that we need validators, should be latched", + ); + + // `need_next` should return no effects while latched. + assert!( + block_synchronizer + .need_next(mock_reactor.effect_builder(), &mut rng) + .is_empty(), + "should return no effects while latched" + ); + + // bleed off the event q, checking the expected event kind for effect in effects { tokio::spawn(async move { effect.await }); let event = mock_reactor.crank().await; @@ -570,32 +652,43 @@ async fn should_not_stall_after_registering_new_era_validator_weights() { }; } - // Ensure the in-flight latch has been set, i.e. that `need_next` returns nothing. - let effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng); - assert!( - effects.is_empty(), - "should not have need next while latched" - ); - // Update the validator matrix to now have an entry for the era of our random block. validator_matrix.register_validator_weights( - block.header().era_id(), + era_id, iter::once((ALICE_PUBLIC_KEY.clone(), 100.into())).collect(), ); + // register validator_matrix block_synchronizer .historical .as_mut() .expect("should have historical builder") .register_era_validator_weights(&validator_matrix); + latch_inner_check( + block_synchronizer.historical.as_ref(), + false, + "after registering validators, should not be latched", + ); + + need_next_inner_check( + block_synchronizer.historical.as_mut(), + &mut rng, + NeedNext::FinalitySignatures(block_hash, era_id, validator_matrix.public_keys(&era_id)), + "should need finality sigs", + ); + // Ensure the in-flight latch has been released, i.e. that `need_next` returns something. let mut effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng); assert_eq!( effects.len(), 1, - "need next should produce 1 effect now that we have peers and the latch is removed" + "need next should produce 1 effect because we currently need exactly 1 signature \ + NOTE: finality signatures are a special case; we currently we fan out 1 peer per signature \ + but do multiple rounds of this against increasingly strict weight thresholds. \ + All other fetchers fan out by asking each of MAX_SIMULTANEOUS_PEERS for the _same_ item." ); + tokio::spawn(async move { effects.remove(0).await }); let event = mock_reactor.crank().await; assert_matches!( @@ -664,7 +757,7 @@ async fn historical_sync_gets_peers_form_both_connected_peers_and_accumulator() MockReactorEvent::NetworkInfoRequest(NetworkInfoRequest::FullyConnectedPeers { count, .. - }) if count == MAX_SIMULTANEOUS_PEERS + }) if count == MAX_SIMULTANEOUS_PEERS as usize ); assert_matches!( @@ -754,19 +847,25 @@ async fn fwd_sync_is_not_blocked_by_failed_header_fetch_within_latch_interval() let mock_reactor = MockReactor::new(); let test_env = TestEnv::random(&mut rng); let block = test_env.block(); + let block_hash = *block.hash(); let peers = test_env.peers(); let validator_matrix = test_env.gen_validator_matrix(); let cfg = Config { - stall_limit: TimeDiff::from_millis(TEST_SYNCHRONIZER_STALL_LIMIT_MILLIS), ..Default::default() }; let mut block_synchronizer = BlockSynchronizer::new_initialized(&mut rng, validator_matrix, cfg); // Register block for fwd sync - assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); - assert!(block_synchronizer.forward.is_some()); - block_synchronizer.register_peers(*block.hash(), peers.clone()); + assert!( + block_synchronizer.register_block_by_hash(block_hash, false), + "should register block by hash" + ); + assert!( + block_synchronizer.forward.is_some(), + "should have forward sync" + ); + block_synchronizer.register_peers(block_hash, peers.clone()); let events = need_next( &mut rng, @@ -776,6 +875,18 @@ async fn fwd_sync_is_not_blocked_by_failed_header_fetch_within_latch_interval() ) .await; + let initial_progress = block_synchronizer + .forward + .as_ref() + .expect("should exist") + .last_progress_time(); + + latch_inner_check( + block_synchronizer.forward.as_ref(), + true, + "forward builder should be latched after need next call", + ); + let mut peers_asked = Vec::new(); for event in events { assert_matches!( @@ -784,15 +895,25 @@ async fn fwd_sync_is_not_blocked_by_failed_header_fetch_within_latch_interval() id, peer, .. - }) if peers.contains(&peer) && id == *block.hash() => { + }) if peers.contains(&peer) && id == block_hash => { peers_asked.push(peer); - } + }, + "should be block header fetch" ); } // Simulate fetch errors for the header let mut generated_effects = Effects::new(); for peer in peers_asked { + latch_inner_check( + block_synchronizer.forward.as_ref(), + true, + &format!("response from peer: {:?}, but should still be latched until after final response received", peer), + ); + assert!( + generated_effects.is_empty(), + "effects should remain empty until last response" + ); let effects = block_synchronizer.handle_event( mock_reactor.effect_builder(), &mut rng, @@ -807,25 +928,47 @@ async fn fwd_sync_is_not_blocked_by_failed_header_fetch_within_latch_interval() generated_effects.extend(effects); } + need_next_inner_check( + block_synchronizer.forward.as_mut(), + &mut rng, + NeedNext::BlockHeader(block_hash), + "should need block header", + ); + assert!( + !generated_effects.is_empty(), + "should have gotten effects after the final response tail called into need next" + ); + + latch_inner_check( + block_synchronizer.forward.as_ref(), + true, + "all requests have been responded to, and the last event response should have \ + resulted in a fresh need next being reported and thus a new latch", + ); + assert_matches!( block_synchronizer.forward_progress(), - BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == *block.hash() + BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == block_hash, + "should be syncing" ); - // The effects are empty at this point and the synchronizer is stuck - assert!(generated_effects.is_empty()); - - // Wait for the stall detection time to pass - tokio::time::sleep(Duration::from_millis( - TEST_SYNCHRONIZER_STALL_LIMIT_MILLIS * 2, - )) - .await; + tokio::time::sleep(Duration::from(cfg.need_next_interval)).await; - // Check if the forward builder is reported as stalled so that the control logic can recover assert_matches!( block_synchronizer.forward_progress(), - BlockSynchronizerProgress::Stalled(block_hash, _, _) if block_hash == *block.hash() + BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == *block.hash() ); + + let current_progress = block_synchronizer + .forward + .as_ref() + .expect("should exist") + .last_progress_time(); + + assert_eq!( + initial_progress, current_progress, + "we have not gotten the record we need, so progress should remain the same" + ) } #[tokio::test] @@ -891,7 +1034,10 @@ async fn registering_header_successfully_triggers_signatures_fetch_for_weak_fina // need to get more signatures to reach weak finality. assert_eq!( effects.len(), - min(test_env.validator_keys().len(), MAX_SIMULTANEOUS_PEERS) + min( + test_env.validator_keys().len(), + MAX_SIMULTANEOUS_PEERS as usize + ) ); for event in mock_reactor.process_effects(effects).await { assert_matches!( @@ -959,7 +1105,10 @@ async fn fwd_more_signatures_are_requested_if_weak_finality_is_not_reached() { // The peer limit should still be in place. assert_eq!( effects.len(), - min(validators_secret_keys.len() - 1, MAX_SIMULTANEOUS_PEERS) + min( + validators_secret_keys.len() - 1, + MAX_SIMULTANEOUS_PEERS as usize + ) ); for event in mock_reactor.process_effects(effects).await { assert_matches!( @@ -1011,7 +1160,7 @@ async fn fwd_more_signatures_are_requested_if_weak_finality_is_not_reached() { generated_effects .into_iter() .rev() - .take(MAX_SIMULTANEOUS_PEERS), + .take(MAX_SIMULTANEOUS_PEERS as usize), ) .await; for event in events { @@ -1036,19 +1185,20 @@ async fn fwd_sync_is_not_blocked_by_failed_signatures_fetch_within_latch_interva let test_env = TestEnv::random(&mut rng); let peers = test_env.peers(); let block = test_env.block(); + let expected_block_hash = *block.hash(); + let era_id = block.header().era_id(); let validator_matrix = test_env.gen_validator_matrix(); - let num_validators = test_env.validator_keys().len(); + let num_validators = test_env.validator_keys().len() as u8; let cfg = Config { - stall_limit: TimeDiff::from_millis(TEST_SYNCHRONIZER_STALL_LIMIT_MILLIS), ..Default::default() }; let mut block_synchronizer = BlockSynchronizer::new_initialized(&mut rng, validator_matrix, cfg); // Register block for fwd sync - assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.register_block_by_hash(expected_block_hash, false)); assert!(block_synchronizer.forward.is_some()); - block_synchronizer.register_peers(*block.hash(), peers.clone()); + block_synchronizer.register_peers(expected_block_hash, peers.clone()); let fwd_builder = block_synchronizer .forward .as_mut() @@ -1061,7 +1211,7 @@ async fn fwd_sync_is_not_blocked_by_failed_signatures_fetch_within_latch_interva // Check the block acquisition state assert_matches!( fwd_builder.block_acquisition_state(), - BlockAcquisitionState::HaveBlockHeader(header, _) if header.block_hash() == *block.hash() + BlockAcquisitionState::HaveBlockHeader(header, _) if header.block_hash() == expected_block_hash ); // Synchronizer should fetch finality signatures @@ -1069,10 +1219,11 @@ async fn fwd_sync_is_not_blocked_by_failed_signatures_fetch_within_latch_interva &mut rng, &mock_reactor, &mut block_synchronizer, - min(num_validators, MAX_SIMULTANEOUS_PEERS), /* We have num_validators - * validators so we - * require the num_validators - * signatures */ + min(num_validators, MAX_SIMULTANEOUS_PEERS), + /* We have num_validators + * validators so we + * require the num_validators + * signatures */ ) .await; @@ -1087,8 +1238,8 @@ async fn fwd_sync_is_not_blocked_by_failed_signatures_fetch_within_latch_interva .. }) => { assert!(peers.contains(&peer)); - assert_eq!(id.block_hash, *block.hash()); - assert_eq!(id.era_id, block.header().era_id()); + assert_eq!(id.block_hash, expected_block_hash); + assert_eq!(id.era_id, era_id); sigs_requested.push((peer, id.public_key)); } ); @@ -1097,13 +1248,22 @@ async fn fwd_sync_is_not_blocked_by_failed_signatures_fetch_within_latch_interva // Simulate failed fetch of finality signatures let mut generated_effects = Effects::new(); for (peer, public_key) in sigs_requested { + latch_inner_check( + block_synchronizer.forward.as_ref(), + true, + &format!("response from peer: {:?}, but should still be latched until after final response received", peer), + ); + assert!( + generated_effects.is_empty(), + "effects should remain empty until last response" + ); let effects = block_synchronizer.handle_event( mock_reactor.effect_builder(), &mut rng, Event::FinalitySignatureFetched(Err(FetcherError::Absent { id: Box::new(Box::new(FinalitySignatureId { - block_hash: *block.hash(), - era_id: block.header().era_id(), + block_hash: expected_block_hash, + era_id, public_key, })), peer, @@ -1116,22 +1276,38 @@ async fn fwd_sync_is_not_blocked_by_failed_signatures_fetch_within_latch_interva assert_matches!( block_synchronizer.forward_progress(), - BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == *block.hash() + BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == expected_block_hash, + "should be syncing" ); // The effects are empty at this point and the synchronizer is stuck - assert!(generated_effects.is_empty()); + assert!( + !generated_effects.is_empty(), + "should have gotten effects after the final response tail called into need next" + ); - // Wait for the stall detection time to pass - tokio::time::sleep(Duration::from_millis( - TEST_SYNCHRONIZER_STALL_LIMIT_MILLIS * 2, - )) - .await; + latch_inner_check( + block_synchronizer.forward.as_ref(), + true, + "all requests have been responded to, and the last event response should have \ + resulted in a fresh need next being reported and thus a new latch", + ); + + for event in mock_reactor.process_effects(generated_effects).await { + assert_matches!( + event, + MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest { + id, + peer, + .. + }) if peers.contains(&peer) && id.block_hash == expected_block_hash && id.era_id == block.header().era_id() + ); + } // Check if the forward builder is reported as stalled so that the control logic can recover assert_matches!( block_synchronizer.forward_progress(), - BlockSynchronizerProgress::Stalled(block_hash, _, _) if block_hash == *block.hash() + BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == expected_block_hash ); } @@ -1477,7 +1653,7 @@ async fn fwd_registering_approvals_hashes_triggers_fetch_for_deploys() { peer: peers[0], })), ); - assert_eq!(effects.len(), MAX_SIMULTANEOUS_PEERS); + assert_eq!(effects.len(), MAX_SIMULTANEOUS_PEERS as usize); for event in mock_reactor.process_effects(effects).await { assert_matches!( event, diff --git a/node/src/reactor/main_reactor.rs b/node/src/reactor/main_reactor.rs index f2a311297a..55f25dcaa2 100644 --- a/node/src/reactor/main_reactor.rs +++ b/node/src/reactor/main_reactor.rs @@ -269,7 +269,7 @@ impl reactor::Reactor for MainReactor { // era by enqueuing all finalized blocks starting from the // first one in that era, blocks which should have already // been executed and marked complete in storage. - error!( + warn!( block_height, "Finalized block enqueued for execution, but a complete \ block header with the same height is not present in storage." diff --git a/node/src/reactor/main_reactor/catch_up.rs b/node/src/reactor/main_reactor/catch_up.rs index 952b82c033..5c12469998 100644 --- a/node/src/reactor/main_reactor/catch_up.rs +++ b/node/src/reactor/main_reactor/catch_up.rs @@ -85,8 +85,7 @@ impl MainReactor { // this code path should be unreachable because we're not // supposed to enqueue historical blocks for execution. Either::Right(CatchUpInstruction::Fatal(format!( - "CatchUp: block synchronizer attempted to execute \ - block: {}", + "CatchUp: block synchronizer attempted to execute block: {}", block_hash ))) } @@ -95,19 +94,6 @@ impl MainReactor { // effects, any referenced deploys, & sufficient finality (by weight) of signatures SyncIdentifier::SyncedBlockIdentifier(block_hash, block_height, era_id), ), - BlockSynchronizerProgress::Stalled(block_hash, _, last_progress_time) => { - // working on syncing a block - warn!( - %block_hash, - %last_progress_time, - "CatchUp: block synchronizer stalled while syncing block; purging historical builder" - ); - self.block_synchronizer.purge_historical(); - match self.trusted_hash { - Some(trusted_hash) => self.catch_up_trusted_hash(trusted_hash), - None => self.catch_up_no_trusted_hash(), - } - } } } diff --git a/node/src/reactor/main_reactor/keep_up.rs b/node/src/reactor/main_reactor/keep_up.rs index 63d3e0f7ff..d591925f14 100644 --- a/node/src/reactor/main_reactor/keep_up.rs +++ b/node/src/reactor/main_reactor/keep_up.rs @@ -170,16 +170,6 @@ impl MainReactor { // execution effects. Either::Left(self.keep_up_synced(block_hash, block_height, era_id)) } - BlockSynchronizerProgress::Stalled(block_hash, _, last_progress_time) => { - // working on syncing a block - warn!( - %block_hash, - %last_progress_time, - "KeepUp: block synchronizer stalled while syncing block; purging forward builder" - ); - self.block_synchronizer.purge_forward(); - self.keep_up_idle() - } } } @@ -595,14 +585,6 @@ impl MainReactor { debug!("KeepUp: still syncing historical block"); return Ok(Some(SyncBackInstruction::Syncing)); } - BlockSynchronizerProgress::Stalled(block_hash, _, last_progress_time) => { - warn!( - %block_hash, - %last_progress_time, - "KeepUp: block synchronizer stalled while syncing historical block; purging historical builder" - ); - self.block_synchronizer.purge_historical(); - } BlockSynchronizerProgress::Executing(block_hash, height, _) => { warn!( %block_hash, diff --git a/node/src/reactor/main_reactor/validate.rs b/node/src/reactor/main_reactor/validate.rs index b48ec2107a..ee56f941f1 100644 --- a/node/src/reactor/main_reactor/validate.rs +++ b/node/src/reactor/main_reactor/validate.rs @@ -50,6 +50,7 @@ impl MainReactor { highest_complete_block.height(), highest_complete_block.header().era_id(), ); + if let SyncInstruction::Leap { .. } = self.block_accumulator.sync_instruction(sync_identifier) { diff --git a/node/src/types/chainspec/core_config.rs b/node/src/types/chainspec/core_config.rs index 78d6f1ac68..2af9792bef 100644 --- a/node/src/types/chainspec/core_config.rs +++ b/node/src/types/chainspec/core_config.rs @@ -83,7 +83,7 @@ pub struct CoreConfig { pub strict_argument_checking: bool, /// How many peers to simultaneously ask when sync leaping. - pub simultaneous_peer_requests: u32, + pub simultaneous_peer_requests: u8, /// Which consensus protocol to use. pub consensus_protocol: ConsensusProtocolName, @@ -272,7 +272,7 @@ impl FromBytes for CoreConfig { let (minimum_delegation_amount, remainder) = u64::from_bytes(remainder)?; let (prune_batch_size, remainder) = u64::from_bytes(remainder)?; let (strict_argument_checking, remainder) = bool::from_bytes(remainder)?; - let (simultaneous_peer_requests, remainder) = u32::from_bytes(remainder)?; + let (simultaneous_peer_requests, remainder) = u8::from_bytes(remainder)?; let (consensus_protocol, remainder) = ConsensusProtocolName::from_bytes(remainder)?; let (max_delegators_per_validator, remainder) = FromBytes::from_bytes(remainder)?; let config = CoreConfig { diff --git a/node/src/types/validator_matrix.rs b/node/src/types/validator_matrix.rs index 1bd3931245..d4cb23c59e 100644 --- a/node/src/types/validator_matrix.rs +++ b/node/src/types/validator_matrix.rs @@ -98,6 +98,17 @@ impl ValidatorMatrix { } } + #[cfg(test)] + pub(crate) fn public_keys(&self, era_id: &EraId) -> Vec { + let mut ret = vec![]; + if let Some(evw) = self.read_inner().get(era_id) { + for validator_public_key in evw.validator_public_keys() { + ret.push(validator_public_key.clone()); + } + } + ret + } + // Register the era of the highest orphaned block. pub(crate) fn register_retrograde_latch(&mut self, latch_era: Option) { self.retrograde_latch = latch_era; From e6a0857f16c81df22100536523019084aa8b7b2d Mon Sep 17 00:00:00 2001 From: "casperlabs-bors-ng[bot]" <82463608+casperlabs-bors-ng[bot]@users.noreply.github.com> Date: Mon, 28 Aug 2023 15:09:05 +0000 Subject: [PATCH 08/41] Merge #4205 4205: Attempt to fix block synchronizer request storm by tweaking the latching. r=alsrdn a=alsrdn Attempt to fix block synchronizer request storm by tweaking the latching. Don't decrement the latch when responses for old requests that are no longer needed (are received in a state that expects other data). For example if the synchonizer is the `HaveWeakFinality` state, it will try to fetch the block body from some peers. The latch will be incremented by `num_peers`. If a late finality signature response comes in this state it will decrement the latch. If multiple late responses come, the latch can become 0, and the synchronizer would try to fetch the block body again even if no response for the initial requests were received yet. Added a test for this also. It's a bit ugly and copy-pasty but I think it showcases the issue. Attempted fix for: https://github.com/casper-network/casper-node/issues/4202 Co-authored-by: Alexandru Sardan --- node/src/components/block_synchronizer.rs | 44 +- .../block_synchronizer/block_acquisition.rs | 70 +- .../block_synchronizer/block_builder.rs | 167 ++- .../block_synchronizer/block_builder/tests.rs | 88 +- .../components/block_synchronizer/tests.rs | 979 +++++++++++++++++- node/src/types/block.rs | 25 +- 6 files changed, 1311 insertions(+), 62 deletions(-) diff --git a/node/src/components/block_synchronizer.rs b/node/src/components/block_synchronizer.rs index 659a2d9af5..008415cfd6 100644 --- a/node/src/components/block_synchronizer.rs +++ b/node/src/components/block_synchronizer.rs @@ -807,12 +807,16 @@ impl BlockSynchronizer { }; let validator_matrix = &self.validator_matrix.clone(); - if let Some(builder) = self.get_builder(block_hash, true) { + if let Some(builder) = self.get_builder(block_hash, false) { match maybe_block_header { None => { if let Some(peer_id) = maybe_peer_id { builder.demote_peer(peer_id); } + + if builder.waiting_for_block_header() { + builder.latch_decrement(); + } } Some(block_header) => { if let Err(error) = builder.register_block_header(*block_header, maybe_peer_id) @@ -851,12 +855,16 @@ impl BlockSynchronizer { } }; - if let Some(builder) = self.get_builder(block_hash, true) { + if let Some(builder) = self.get_builder(block_hash, false) { match maybe_block { None => { if let Some(peer_id) = maybe_peer_id { builder.demote_peer(peer_id); } + + if builder.waiting_for_block() { + builder.latch_decrement(); + } } Some(block) => { if let Err(error) = builder.register_block(&block, maybe_peer_id) { @@ -895,12 +903,16 @@ impl BlockSynchronizer { } }; - if let Some(builder) = self.get_builder(block_hash, true) { + if let Some(builder) = self.get_builder(block_hash, false) { match maybe_approvals_hashes { None => { if let Some(peer_id) = maybe_peer_id { builder.demote_peer(peer_id); } + + if builder.waiting_for_approvals_hashes() { + builder.latch_decrement(); + } } Some(approvals_hashes) => { if let Err(error) = @@ -936,12 +948,18 @@ impl BlockSynchronizer { } }; - if let Some(builder) = self.get_builder(id.block_hash, true) { + if let Some(builder) = self.get_builder(id.block_hash, false) { match maybe_finality_signature { None => { if let Some(peer_id) = maybe_peer_id { builder.demote_peer(peer_id); } + + // Failed to fetch a finality sig. Decrement the latch if we were actually + // waiting for signatures. + if builder.waiting_for_signatures() { + builder.latch_decrement(); + } } Some(finality_signature) => { if let Err(error) = @@ -1141,13 +1159,15 @@ impl BlockSynchronizer { debug!(%block_hash, "BlockSynchronizer: not currently synchronizing block"); return Effects::new(); } - builder.latch_decrement(); match maybe_value_or_chunk { None => { debug!(%block_hash, "execution_results_fetched: No maybe_value_or_chunk"); if let Some(peer_id) = maybe_peer_id { builder.demote_peer(peer_id); } + if builder.waiting_for_execution_results() { + builder.latch_decrement(); + } } Some(value_or_chunk) => { // due to reasons, the stitched back together execution effects need to be saved @@ -1198,7 +1218,7 @@ impl BlockSynchronizer { FetchedData::FromStorage { item } => (item, None), }; - if let Some(builder) = self.get_builder(block_hash, true) { + if let Some(builder) = self.get_builder(block_hash, false) { if let Err(error) = builder.register_deploy(deploy.fetch_id(), maybe_peer) { error!(%block_hash, %error, "BlockSynchronizer: failed to apply deploy"); } @@ -1569,9 +1589,21 @@ impl Component for BlockSynchronizer { self.deploy_fetched(block_hash, fetched_deploy) } Either::Left(Err(error)) => { + if let Some(builder) = self.get_builder(block_hash, false) { + if builder.waiting_for_deploys() { + builder.latch_decrement(); + } + } + debug!(%error, "BlockSynchronizer: failed to fetch legacy deploy"); } Either::Right(Err(error)) => { + if let Some(builder) = self.get_builder(block_hash, false) { + if builder.waiting_for_deploys() { + builder.latch_decrement(); + } + } + debug!(%error, "BlockSynchronizer: failed to fetch deploy"); } }; diff --git a/node/src/components/block_synchronizer/block_acquisition.rs b/node/src/components/block_synchronizer/block_acquisition.rs index ff6445d110..199df8a5ed 100644 --- a/node/src/components/block_synchronizer/block_acquisition.rs +++ b/node/src/components/block_synchronizer/block_acquisition.rs @@ -668,6 +668,53 @@ impl BlockAcquisitionState { }; } + pub(super) fn actively_acquiring_signatures(&self, is_historical: bool) -> bool { + match self { + BlockAcquisitionState::HaveBlockHeader(..) => true, + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveFinalizedBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => false, + BlockAcquisitionState::HaveBlock(_, acquired_signatures, acquired_deploys) => { + !is_historical + && acquired_deploys.needs_deploy().is_none() + && acquired_signatures.signature_weight() != SignatureWeight::Strict + } + BlockAcquisitionState::HaveGlobalState( + _, + acquired_signatures, + acquired_deploys, + .., + ) => { + acquired_deploys.needs_deploy().is_none() + && acquired_signatures.signature_weight() != SignatureWeight::Strict + } + BlockAcquisitionState::HaveApprovalsHashes( + _, + acquired_signatures, + acquired_deploys, + ) => { + acquired_deploys.needs_deploy().is_none() + && acquired_signatures.signature_weight() != SignatureWeight::Strict + } + BlockAcquisitionState::HaveAllExecutionResults( + _, + acquired_signatures, + acquired_deploys, + .., + ) => { + acquired_signatures.is_legacy() + && acquired_deploys.needs_deploy().is_none() + && acquired_signatures.signature_weight() != SignatureWeight::Strict + } + BlockAcquisitionState::HaveAllDeploys(_, acquired_signatures) => { + acquired_signatures.signature_weight() != SignatureWeight::Strict + } + } + } + /// Register a finality signature for this block. pub(super) fn register_finality_signature( &mut self, @@ -682,7 +729,7 @@ impl BlockAcquisitionState { let signer = signature.public_key.clone(); let acceptance: Acceptance; let maybe_block_hash: Option; - let currently_acquiring_sigs: bool; + let currently_acquiring_sigs = self.actively_acquiring_signatures(is_historical); let maybe_new_state: Option = match self { BlockAcquisitionState::HaveBlockHeader(header, acquired_signatures) => { // we are attempting to acquire at least ~1/3 signature weight before @@ -692,7 +739,6 @@ impl BlockAcquisitionState { // signature. maybe_block_hash = Some(header.block_hash()); acceptance = acquired_signatures.apply_signature(signature, validator_weights); - currently_acquiring_sigs = true; if acquired_signatures.has_sufficient_finality(is_historical, false) { Some(BlockAcquisitionState::HaveWeakFinalitySignatures( header.clone(), @@ -704,9 +750,6 @@ impl BlockAcquisitionState { } BlockAcquisitionState::HaveBlock(block, acquired_signatures, acquired_deploys) => { maybe_block_hash = Some(*block.hash()); - currently_acquiring_sigs = !is_historical - && acquired_deploys.needs_deploy().is_none() - && acquired_signatures.signature_weight() != SignatureWeight::Strict; acceptance = acquired_signatures.apply_signature(signature, validator_weights); if !is_historical && acquired_deploys.needs_deploy().is_none() @@ -730,8 +773,6 @@ impl BlockAcquisitionState { .., ) => { maybe_block_hash = Some(*block.hash()); - currently_acquiring_sigs = acquired_deploys.needs_deploy().is_none() - && acquired_signatures.signature_weight() != SignatureWeight::Strict; acceptance = acquired_signatures.apply_signature(signature, validator_weights); if acquired_deploys.needs_deploy().is_none() && acquired_signatures.has_sufficient_finality(is_historical, true) @@ -744,14 +785,8 @@ impl BlockAcquisitionState { None } } - BlockAcquisitionState::HaveApprovalsHashes( - block, - acquired_signatures, - acquired_deploys, - ) => { + BlockAcquisitionState::HaveApprovalsHashes(block, acquired_signatures, ..) => { maybe_block_hash = Some(*block.hash()); - currently_acquiring_sigs = acquired_deploys.needs_deploy().is_none() - && acquired_signatures.signature_weight() != SignatureWeight::Strict; acceptance = acquired_signatures.apply_signature(signature, validator_weights); None } @@ -762,9 +797,6 @@ impl BlockAcquisitionState { .., ) => { maybe_block_hash = Some(*block.hash()); - currently_acquiring_sigs = acquired_signatures.is_legacy() - && acquired_deploys.needs_deploy().is_none() - && acquired_signatures.signature_weight() != SignatureWeight::Strict; acceptance = acquired_signatures.apply_signature(signature, validator_weights); if acquired_signatures.is_legacy() && acquired_deploys.needs_deploy().is_none() @@ -780,8 +812,6 @@ impl BlockAcquisitionState { } BlockAcquisitionState::HaveAllDeploys(block, acquired_signatures) => { maybe_block_hash = Some(*block.hash()); - currently_acquiring_sigs = - acquired_signatures.signature_weight() != SignatureWeight::Strict; acceptance = acquired_signatures.apply_signature(signature, validator_weights); if acquired_signatures.has_sufficient_finality(is_historical, true) { Some(BlockAcquisitionState::HaveStrictFinalitySignatures( @@ -795,7 +825,6 @@ impl BlockAcquisitionState { BlockAcquisitionState::HaveStrictFinalitySignatures(block, acquired_signatures) => { maybe_block_hash = Some(*block.hash()); acceptance = acquired_signatures.apply_signature(signature, validator_weights); - currently_acquiring_sigs = false; None } BlockAcquisitionState::HaveWeakFinalitySignatures(header, acquired_signatures) => { @@ -805,7 +834,6 @@ impl BlockAcquisitionState { // will accept late comers while resting in this state maybe_block_hash = Some(header.block_hash()); acceptance = acquired_signatures.apply_signature(signature, validator_weights); - currently_acquiring_sigs = false; None } BlockAcquisitionState::Initialized(..) diff --git a/node/src/components/block_synchronizer/block_builder.rs b/node/src/components/block_synchronizer/block_builder.rs index d510fa7b6b..8923991787 100644 --- a/node/src/components/block_synchronizer/block_builder.rs +++ b/node/src/components/block_synchronizer/block_builder.rs @@ -231,6 +231,11 @@ impl BlockBuilder { self.latch.count() > 0 } + #[cfg(test)] + pub fn latch_count(&self) -> u8 { + self.latch.count() + } + pub(super) fn check_latch(&mut self, interval: TimeDiff) -> bool { self.latch.check_latch(interval, Timestamp::now()) } @@ -456,31 +461,91 @@ impl BlockBuilder { } } + pub(super) fn waiting_for_block_header(&self) -> bool { + match &self.acquisition_state { + BlockAcquisitionState::Initialized(..) => true, + BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveFinalizedBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => false, + } + } + pub(super) fn register_block_header( &mut self, block_header: BlockHeader, maybe_peer: Option, ) -> Result<(), Error> { + let was_waiting_for_block_header = self.waiting_for_block_header(); + let era_id = block_header.era_id(); let acceptance = self.acquisition_state.register_block_header( block_header, self.strict_finality_protocol_version, self.should_fetch_execution_state, ); - self.handle_acceptance(maybe_peer, acceptance)?; + self.handle_acceptance(maybe_peer, acceptance, was_waiting_for_block_header)?; self.era_id = Some(era_id); Ok(()) } + pub(super) fn waiting_for_block(&self) -> bool { + match &self.acquisition_state { + BlockAcquisitionState::HaveWeakFinalitySignatures(..) => true, + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveFinalizedBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => false, + } + } + pub(super) fn register_block( &mut self, block: &Block, maybe_peer: Option, ) -> Result<(), Error> { + let was_waiting_for_block = self.waiting_for_block(); let acceptance = self .acquisition_state .register_block(block, self.should_fetch_execution_state); - self.handle_acceptance(maybe_peer, acceptance) + self.handle_acceptance(maybe_peer, acceptance, was_waiting_for_block) + } + + pub(super) fn waiting_for_approvals_hashes(&self) -> bool { + match &self.acquisition_state { + BlockAcquisitionState::HaveBlock(..) if !self.should_fetch_execution_state => true, + BlockAcquisitionState::HaveAllExecutionResults(..) + if self.should_fetch_execution_state => + { + true + } + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveFinalizedBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => false, + } } pub(super) fn register_approvals_hashes( @@ -488,10 +553,11 @@ impl BlockBuilder { approvals_hashes: &ApprovalsHashes, maybe_peer: Option, ) -> Result<(), Error> { + let was_waiting_for_approvals_hashes = self.waiting_for_approvals_hashes(); let acceptance = self .acquisition_state .register_approvals_hashes(approvals_hashes, self.should_fetch_execution_state); - self.handle_acceptance(maybe_peer, acceptance) + self.handle_acceptance(maybe_peer, acceptance, was_waiting_for_approvals_hashes) } pub(super) fn register_finality_signature_pending(&mut self, validator: PublicKey) { @@ -518,11 +584,17 @@ impl BlockBuilder { } } + pub(super) fn waiting_for_signatures(&self) -> bool { + self.acquisition_state + .actively_acquiring_signatures(self.should_fetch_execution_state) + } + pub(super) fn register_finality_signature( &mut self, finality_signature: FinalitySignature, maybe_peer: Option, ) -> Result<(), Error> { + let was_waiting_for_sigs = self.waiting_for_signatures(); let validator_weights = self .validator_weights .as_ref() @@ -532,7 +604,7 @@ impl BlockBuilder { validator_weights, self.should_fetch_execution_state, ); - self.handle_acceptance(maybe_peer, acceptance) + self.handle_acceptance(maybe_peer, acceptance, was_waiting_for_sigs) } pub(super) fn register_global_state(&mut self, global_state: Digest) -> Result<(), Error> { @@ -562,12 +634,31 @@ impl BlockBuilder { Ok(()) } + pub(super) fn waiting_for_execution_results(&self) -> bool { + match &self.acquisition_state { + BlockAcquisitionState::HaveGlobalState(..) if self.should_fetch_execution_state => true, + BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveFinalizedBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => false, + } + } + pub(super) fn register_fetched_execution_results( &mut self, maybe_peer: Option, block_execution_results_or_chunk: BlockExecutionResultsOrChunk, ) -> Result>, Error> { debug!(block_hash=%self.block_hash, "register_fetched_execution_results"); + let was_waiting_for_execution_results = self.waiting_for_execution_results(); match self.acquisition_state.register_execution_results_or_chunk( block_execution_results_or_chunk, self.should_fetch_execution_state, @@ -580,7 +671,11 @@ impl BlockBuilder { ?acceptance, "register_fetched_execution_results: Ok(RegisterExecResultsOutcome)" ); - self.handle_acceptance(maybe_peer, Ok(acceptance))?; + self.handle_acceptance( + maybe_peer, + Ok(acceptance), + was_waiting_for_execution_results, + )?; Ok(exec_results) } Err(BlockAcquisitionError::ExecutionResults(error)) => { @@ -596,6 +691,9 @@ impl BlockBuilder { | execution_results_acquisition::Error::AttemptToApplyDataWhenMissingChecksum { .. } | execution_results_acquisition::Error::InvalidOutcomeFromApplyingChunk { .. } => { + if was_waiting_for_execution_results { + self.latch_decrement(); + } debug!( "register_fetched_execution_results: BlockHashMismatch | \ InvalidAttemptToApplyChecksum | AttemptToApplyDataWhenMissingChecksum \ @@ -619,6 +717,9 @@ impl BlockBuilder { self.disqualify_peer(peer); } } + if was_waiting_for_execution_results { + self.latch_decrement(); + } } // malicious peer execution_results_acquisition::Error::InvalidChunkCount { .. } @@ -629,10 +730,16 @@ impl BlockBuilder { if let Some(peer) = maybe_peer { self.disqualify_peer(peer); } + if was_waiting_for_execution_results { + self.latch_decrement(); + } } // checksum unavailable, so unknown if this peer is malicious execution_results_acquisition::Error::ChunksWithDifferentChecksum { .. } => { debug!("register_fetched_execution_results: ChunksWithDifferentChecksum"); + if was_waiting_for_execution_results { + self.latch_decrement(); + } } } Err(Error::BlockAcquisition( @@ -660,15 +767,44 @@ impl BlockBuilder { Ok(()) } + pub(super) fn waiting_for_deploys(&self) -> bool { + match &self.acquisition_state { + BlockAcquisitionState::HaveApprovalsHashes(_, _, deploys) => { + deploys.needs_deploy().is_some() + } + BlockAcquisitionState::HaveAllExecutionResults(_, _, deploys, checksum) + if self.should_fetch_execution_state => + { + if !checksum.is_checkable() { + deploys.needs_deploy().is_some() + } else { + false + } + } + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveFinalizedBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => false, + } + } + pub(super) fn register_deploy( &mut self, deploy_id: DeployId, maybe_peer: Option, ) -> Result<(), Error> { + let was_waiting_for_deploys = self.waiting_for_deploys(); let acceptance = self .acquisition_state .register_deploy(deploy_id, self.should_fetch_execution_state); - self.handle_acceptance(maybe_peer, acceptance) + self.handle_acceptance(maybe_peer, acceptance, was_waiting_for_deploys) } pub(super) fn register_peers(&mut self, peers: Vec) { @@ -690,19 +826,36 @@ impl BlockBuilder { &mut self, maybe_peer: Option, acceptance: Result, BlockAcquisitionError>, + should_unlatch: bool, ) -> Result<(), Error> { match acceptance { Ok(Some(Acceptance::NeededIt)) => { + // Got a useful response. Unlatch in all cases since we want to get the next item. self.touch(); if let Some(peer) = maybe_peer { self.promote_peer(peer); } } - Ok(Some(Acceptance::HadIt)) | Ok(None) => (), + Ok(Some(Acceptance::HadIt)) => { + // Already had this item, which means that this was a late response for a previous + // fetch. We don't unlatch in this case and wait for a valid response. + } + Ok(None) => { + if should_unlatch { + self.latch_decrement(); + } + } Err(error) => { if let Some(peer) = maybe_peer { self.disqualify_peer(peer); } + + // If we were waiting for a response and the item was not good, + // decrement latch. Fetch will be retried when unlatched. + if should_unlatch { + self.latch_decrement(); + } + return Err(Error::BlockAcquisition(error)); } } diff --git a/node/src/components/block_synchronizer/block_builder/tests.rs b/node/src/components/block_synchronizer/block_builder/tests.rs index cf9c594082..762e99ef67 100644 --- a/node/src/components/block_synchronizer/block_builder/tests.rs +++ b/node/src/components/block_synchronizer/block_builder/tests.rs @@ -8,7 +8,7 @@ use crate::components::consensus::tests::utils::{ALICE_PUBLIC_KEY, ALICE_SECRET_ use super::*; #[test] -fn handle_acceptance() { +fn handle_acceptance_promotes_and_disqualifies_peers() { let mut rng = TestRng::new(); let block = Block::random(&mut rng); let mut builder = BlockBuilder::new( @@ -25,38 +25,38 @@ fn handle_acceptance() { // Builder acceptance for needed signature from ourselves. assert!(builder - .handle_acceptance(None, Ok(Some(Acceptance::NeededIt))) + .handle_acceptance(None, Ok(Some(Acceptance::NeededIt)), true) .is_ok()); assert!(builder.peer_list().qualified_peers(&mut rng).is_empty()); assert!(builder.peer_list().dishonest_peers().is_empty()); // Builder acceptance for existent signature from ourselves. assert!(builder - .handle_acceptance(None, Ok(Some(Acceptance::HadIt))) + .handle_acceptance(None, Ok(Some(Acceptance::HadIt)), true) .is_ok()); assert!(builder.peer_list().qualified_peers(&mut rng).is_empty()); assert!(builder.peer_list().dishonest_peers().is_empty()); // Builder acceptance for no signature from ourselves. - assert!(builder.handle_acceptance(None, Ok(None)).is_ok()); + assert!(builder.handle_acceptance(None, Ok(None), true).is_ok()); assert!(builder.peer_list().qualified_peers(&mut rng).is_empty()); assert!(builder.peer_list().dishonest_peers().is_empty()); // Builder acceptance for no signature from a peer. // Peer shouldn't be registered. assert!(builder - .handle_acceptance(Some(honest_peer), Ok(None)) + .handle_acceptance(Some(honest_peer), Ok(None), true) .is_ok()); assert!(builder.peer_list().qualified_peers(&mut rng).is_empty()); assert!(builder.peer_list().dishonest_peers().is_empty()); // Builder acceptance for existent signature from a peer. // Peer shouldn't be registered. assert!(builder - .handle_acceptance(Some(honest_peer), Ok(Some(Acceptance::HadIt))) + .handle_acceptance(Some(honest_peer), Ok(Some(Acceptance::HadIt)), true) .is_ok()); assert!(builder.peer_list().qualified_peers(&mut rng).is_empty()); assert!(builder.peer_list().dishonest_peers().is_empty()); // Builder acceptance for needed signature from a peer. // Peer should be registered as honest. assert!(builder - .handle_acceptance(Some(honest_peer), Ok(Some(Acceptance::NeededIt))) + .handle_acceptance(Some(honest_peer), Ok(Some(Acceptance::NeededIt)), true) .is_ok()); assert!(builder .peer_list() @@ -65,7 +65,11 @@ fn handle_acceptance() { assert!(builder.peer_list().dishonest_peers().is_empty()); // Builder acceptance for error on signature handling from ourselves. assert!(builder - .handle_acceptance(None, Err(BlockAcquisitionError::InvalidStateTransition)) + .handle_acceptance( + None, + Err(BlockAcquisitionError::InvalidStateTransition), + true + ) .is_err()); assert!(builder .peer_list() @@ -77,7 +81,8 @@ fn handle_acceptance() { assert!(builder .handle_acceptance( Some(dishonest_peer), - Err(BlockAcquisitionError::InvalidStateTransition) + Err(BlockAcquisitionError::InvalidStateTransition), + true ) .is_err()); assert!(builder @@ -90,6 +95,71 @@ fn handle_acceptance() { .contains(&dishonest_peer)); } +#[test] +fn handle_acceptance_unlatches_builder() { + let mut rng = TestRng::new(); + let block = Block::random(&mut rng); + let mut builder = BlockBuilder::new( + block.header().block_hash(), + false, + 1, + TimeDiff::from_seconds(1), + LegacyRequiredFinality::Strict, + ProtocolVersion::V1_0_0, + ); + + // Check that if a valid element was received, the latch is reset + builder.latch_by(2); + assert!(builder + .handle_acceptance(None, Ok(Some(Acceptance::NeededIt)), true) + .is_ok()); + assert_eq!(builder.latch.count(), 0); + builder.latch_by(2); + assert!(builder + .handle_acceptance(None, Ok(Some(Acceptance::NeededIt)), false) + .is_ok()); + assert_eq!(builder.latch.count(), 0); + + // Check that if a element that was previously received, + // the latch is not decremented since this is a late response + builder.latch_by(2); + assert!(builder + .handle_acceptance(None, Ok(Some(Acceptance::HadIt)), true) + .is_ok()); + assert_eq!(builder.latch.count(), 2); + assert!(builder + .handle_acceptance(None, Ok(Some(Acceptance::HadIt)), false) + .is_ok()); + assert_eq!(builder.latch.count(), 2); + + // Check that the latch is decremented if a response lead to an error, + // but only if the builder was waiting for that element in its current state + assert!(builder + .handle_acceptance( + None, + Err(BlockAcquisitionError::InvalidStateTransition), + true + ) + .is_err()); + assert_eq!(builder.latch.count(), 1); + assert!(builder + .handle_acceptance( + None, + Err(BlockAcquisitionError::InvalidStateTransition), + false + ) + .is_err()); + assert_eq!(builder.latch.count(), 1); + + // Check that the latch is decremented if a valid response was received that did not produce any + // side effect, but only if the builder was waiting for that element in its current state + builder.latch_by(1); + assert!(builder.handle_acceptance(None, Ok(None), false).is_ok()); + assert_eq!(builder.latch.count(), 2); + assert!(builder.handle_acceptance(None, Ok(None), true).is_ok()); + assert_eq!(builder.latch.count(), 1); +} + #[test] fn register_era_validator_weights() { let mut rng = TestRng::new(); diff --git a/node/src/components/block_synchronizer/tests.rs b/node/src/components/block_synchronizer/tests.rs index 256145cb0b..462b83447c 100644 --- a/node/src/components/block_synchronizer/tests.rs +++ b/node/src/components/block_synchronizer/tests.rs @@ -28,7 +28,10 @@ use crate::{ effect::Effect, reactor::{EventQueueHandle, QueueKind, Scheduler}, tls::KeyFingerprint, - types::{chainspec::LegacyRequiredFinality, DeployId, TestBlockBuilder}, + types::{ + chainspec::LegacyRequiredFinality, BlockExecutionResultsOrChunkId, DeployId, + TestBlockBuilder, ValueOrChunk, + }, utils, }; @@ -299,6 +302,11 @@ fn weak_finality_threshold(n: usize) -> usize { n / 3 + 1 } +/// Returns the number of validators that need a signature for a strict finality of 2/3. +fn strict_finality_threshold(n: usize) -> usize { + n * 2 / 3 + 1 +} + fn latch_inner_check(builder: Option<&BlockBuilder>, expected: bool, msg: &str) { assert_eq!( builder.expect("builder should exist").latched(), @@ -308,6 +316,15 @@ fn latch_inner_check(builder: Option<&BlockBuilder>, expected: bool, msg: &str) ); } +fn latch_count_check(builder: Option<&BlockBuilder>, expected: u8, msg: &str) { + assert_eq!( + builder.expect("builder should exist").latch_count(), + expected, + "{}", + msg + ); +} + fn need_next_inner_check( builder: Option<&mut BlockBuilder>, rng: &mut TestRng, @@ -2496,14 +2513,13 @@ async fn historical_sync_no_legacy_block() { ); } + let execution_results = BlockExecutionResultsOrChunk::new_mock_value(rng, *block.hash()); let effects = block_synchronizer.handle_event( mock_reactor.effect_builder(), rng, Event::ExecutionResultsFetched { block_hash: *block.hash(), - result: Ok(FetchedData::from_storage(Box::new( - BlockExecutionResultsOrChunk::new_mock_value(*block.hash()), - ))), + result: Ok(FetchedData::from_storage(Box::new(execution_results))), }, ); @@ -2718,14 +2734,13 @@ async fn historical_sync_legacy_block_strict_finality() { ); } + let execution_results = BlockExecutionResultsOrChunk::new_mock_value(rng, *block.hash()); let effects = block_synchronizer.handle_event( mock_reactor.effect_builder(), rng, Event::ExecutionResultsFetched { block_hash: *block.hash(), - result: Ok(FetchedData::from_storage(Box::new( - BlockExecutionResultsOrChunk::new_mock_value(*block.hash()), - ))), + result: Ok(FetchedData::from_storage(Box::new(execution_results))), }, ); @@ -2916,14 +2931,13 @@ async fn historical_sync_legacy_block_weak_finality() { ); } + let execution_results = BlockExecutionResultsOrChunk::new_mock_value(rng, *block.hash()); let effects = block_synchronizer.handle_event( mock_reactor.effect_builder(), rng, Event::ExecutionResultsFetched { block_hash: *block.hash(), - result: Ok(FetchedData::from_storage(Box::new( - BlockExecutionResultsOrChunk::new_mock_value(*block.hash()), - ))), + result: Ok(FetchedData::from_storage(Box::new(execution_results))), }, ); @@ -3125,14 +3139,13 @@ async fn historical_sync_legacy_block_any_finality() { ); } + let execution_results = BlockExecutionResultsOrChunk::new_mock_value(rng, *block.hash()); let effects = block_synchronizer.handle_event( mock_reactor.effect_builder(), rng, Event::ExecutionResultsFetched { block_hash: *block.hash(), - result: Ok(FetchedData::from_storage(Box::new( - BlockExecutionResultsOrChunk::new_mock_value(*block.hash()), - ))), + result: Ok(FetchedData::from_storage(Box::new(execution_results))), }, ); @@ -3210,3 +3223,943 @@ async fn historical_sync_legacy_block_any_finality() { assert_matches!(event, MockReactorEvent::MarkBlockCompletedRequest(_)); } } + +#[tokio::test] +async fn fwd_sync_latch_should_not_decrement_for_old_responses() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let deploys = [Deploy::random(&mut rng)]; + let test_env = TestEnv::random(&mut rng).with_block( + TestBlockBuilder::new() + .era(1) + .deploys(deploys.iter()) + .build(&mut rng), + ); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + + // Start syncing. + { + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::Request(BlockSynchronizerRequest::NeedNext), + ); + assert_eq!(effects.len(), 1); + + // First, the synchronizer should get peers. + let events = mock_reactor.process_effects(effects).await; + assert_matches!( + events[0], + MockReactorEvent::BlockAccumulatorRequest(BlockAccumulatorRequest::GetPeersForBlock { + block_hash, + .. + }) if block_hash == *block.hash() + ); + + latch_inner_check( + block_synchronizer.forward.as_ref(), + true, + "should be latched waiting for peers", + ); + } + + // Register peers. This would make the synchronizer ask for the block header. + { + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::AccumulatedPeers(*block.hash(), Some(peers.clone())), + ); + let events = mock_reactor.process_effects(effects).await; + + let mut peers_asked = Vec::new(); + for event in events { + assert_matches!( + event, + MockReactorEvent::BlockHeaderFetcherRequest(FetcherRequest { + id, + peer, + .. + }) if peers.contains(&peer) && id == *block.hash() => { + peers_asked.push(peer); + } + ); + } + + latch_count_check( + block_synchronizer.forward.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no block header was received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Simulate successful fetch of the block header. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::BlockHeaderFetched(Ok(FetchedData::FromPeer { + item: Box::new(block.clone().take_header()), + peer: peers_asked[0], + })), + ); + let events = mock_reactor.process_effects(effects).await; + + let expected_latch_count = events.len() as u8; // number of finality sig fetches. + + // Check what signatures were requested + let mut sigs_requested = Vec::new(); + for event in events { + assert_matches!( + event, + MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest { + id, + peer, + .. + }) => { + assert_eq!(id.block_hash, *block.hash()); + assert_eq!(id.era_id, block.header().era_id()); + sigs_requested.push((peer, id.public_key)); + } + ); + } + + latch_count_check( + block_synchronizer.forward.as_ref(), + expected_latch_count, + format!( + "Latch count should be {} since no finality sigs were received.", + expected_latch_count + ) + .as_str(), + ); + + // Receive a late response with the block header. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::BlockHeaderFetched(Ok(FetchedData::FromPeer { + item: Box::new(block.clone().take_header()), + peer: peers_asked[1], + })), + ); + + assert_eq!(effects.len(), 0); + latch_count_check( + block_synchronizer.forward.as_ref(), + expected_latch_count, + format!( + "Latch count should be {} since no finality sigs were received.", + expected_latch_count + ) + .as_str(), + ); + } + + // Register finality sigs. This would make the synchronizer switch to have weak finality and + // continue asking for the block body. + { + let mut generated_effects = Effects::new(); + for secret_key in validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())) + { + // Register a finality signature + let signature = FinalitySignature::create( + *block.hash(), + block.header().era_id(), + secret_key.as_ref(), + PublicKey::from(secret_key.as_ref()), + ); + assert!(signature.is_verified().is_ok()); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::FinalitySignatureFetched(Ok(FetchedData::FromPeer { + item: Box::new(signature), + peer: peers[2], + })), + ); + generated_effects.extend(effects); + } + + let events = mock_reactor + .process_effects( + generated_effects + .into_iter() + .rev() + .take(MAX_SIMULTANEOUS_PEERS as usize), + ) + .await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::BlockFetcherRequest(FetcherRequest { + id, + peer, + .. + }) => { + assert!(peers.contains(&peer)); + assert_eq!(id, *block.hash()); + } + ); + } + + latch_count_check( + block_synchronizer.forward.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no block was received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive some more finality signatures to check if the latch decrements. + let mut generated_effects = Effects::new(); + for secret_key in validators_secret_keys + .iter() + .skip(weak_finality_threshold(validators_secret_keys.len())) + .take(2) + { + // Register a finality signature + let signature = FinalitySignature::create( + *block.hash(), + block.header().era_id(), + secret_key.as_ref(), + PublicKey::from(secret_key.as_ref()), + ); + assert!(signature.is_verified().is_ok()); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::FinalitySignatureFetched(Ok(FetchedData::FromPeer { + item: Box::new(signature), + peer: peers[2], + })), + ); + generated_effects.extend(effects); + } + + assert_eq!(generated_effects.len(), 0); + + latch_count_check( + block_synchronizer.forward.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no block was received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + } + + // Register a block response. This would make the synchronizer switch to HaveBlock and continue + // asking for the approvals hashes. + { + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::BlockFetched(Ok(FetchedData::FromPeer { + item: Box::new(block.clone()), + peer: peers[0], + })), + ); + let events = mock_reactor.process_effects(effects).await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest { + id, + peer, + .. + }) if peers.contains(&peer) && id == *block.hash() + ); + } + + latch_count_check( + block_synchronizer.forward.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no approval hashes were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive another response with the block. This is the second response out of the 5 we sent + // out earlier. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::BlockFetched(Ok(FetchedData::FromPeer { + item: Box::new(block.clone()), + peer: peers[1], + })), + ); + assert_eq!(effects.len(), 0); + + latch_count_check( + block_synchronizer.forward.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no approval hashes were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + } + + // Register approvals hashes. This would make the synchronizer switch to HaveApprovalsHashes and + // continue asking for the deploys. + { + let approvals_hashes = ApprovalsHashes::new( + block.hash(), + deploys + .iter() + .map(|deploy| deploy.approvals_hash().unwrap()) + .collect(), + dummy_merkle_proof(), + ); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::ApprovalsHashesFetched(Ok(FetchedData::FromPeer { + item: Box::new(approvals_hashes.clone()), + peer: peers[0], + })), + ); + assert_eq!(effects.len(), MAX_SIMULTANEOUS_PEERS as usize); + for event in mock_reactor.process_effects(effects).await { + assert_matches!( + event, + MockReactorEvent::DeployFetcherRequest(FetcherRequest { + id, + peer, + .. + }) => { + assert!(peers.contains(&peer)); + assert_eq!(id, DeployId::new( + *deploys[0].hash(), + approvals_hashes.approvals_hashes()[0], + )); + } + ); + } + + latch_count_check( + block_synchronizer.forward.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no deploys were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive a late response with the approvals hashes. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::ApprovalsHashesFetched(Ok(FetchedData::FromPeer { + item: Box::new(approvals_hashes.clone()), + peer: peers[1], + })), + ); + + assert_eq!(effects.len(), 0); + latch_count_check( + block_synchronizer.forward.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no deploys were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + } + + // Receive a deploy. This would make the synchonizer switch to HaveAllDeploys and continue + // asking for more finality signatures in order to reach strict finality. + { + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Right(Ok(FetchedData::from_storage(Box::new(deploys[0].clone())))), + }, + ); + let events = mock_reactor.process_effects(effects).await; + let expected_latch_count = events.len() as u8; + + latch_count_check( + block_synchronizer.forward.as_ref(), + expected_latch_count, + format!( + "Latch count should be {} since no new signatures were received.", + expected_latch_count + ) + .as_str(), + ); + + // Since it's the single deploy in the block, the next step is to get the rest of the + // finality signatures to get strict finality. + for event in events { + assert_matches!( + event, + MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest { + id, + .. + }) => { + assert_eq!(id.block_hash, *block.hash()); + assert_eq!(id.era_id, block.header().era_id()); + } + ); + } + + // Receive a late deploy response. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Right(Ok(FetchedData::from_storage(Box::new(deploys[0].clone())))), + }, + ); + + assert_eq!(effects.len(), 0); + latch_count_check( + block_synchronizer.forward.as_ref(), + expected_latch_count, + "Latch should not have changed since we did not receive a new signature yet.", + ); + } + + // Receive the rest of the the missing signatures to get strict finality. This would switch the + // state to HaveStrictFinality and continue to request to make the block executable. + { + let mut generated_effects = Effects::new(); + for secret_key in validators_secret_keys.iter().rev().take( + strict_finality_threshold(validators_secret_keys.len()) + - weak_finality_threshold(validators_secret_keys.len()), + ) { + // Register a finality signature + let signature = FinalitySignature::create( + *block.hash(), + block.header().era_id(), + secret_key.as_ref(), + PublicKey::from(secret_key.as_ref()), + ); + assert!(signature.is_verified().is_ok()); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::FinalitySignatureFetched(Ok(FetchedData::FromPeer { + item: Box::new(signature), + peer: peers[2], + })), + ); + generated_effects.extend(effects); + } + + // Once strict finality is achieved, the synchronizer will try to make the block executable. + let events = mock_reactor + .process_effects(generated_effects.into_iter().rev().take(1)) + .await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::MakeBlockExecutableRequest(MakeBlockExecutableRequest { + block_hash, + .. + }) if block_hash == *block.hash() + ); + } + + latch_count_check( + block_synchronizer.forward.as_ref(), + 1, + "Latch count should still be 1 since no FinalizedBlock was received.", + ); + } +} + +#[tokio::test] +async fn historical_sync_latch_should_not_decrement_for_old_deploy_fetch_responses() { + let rng = &mut TestRng::new(); + let mock_reactor = MockReactor::new(); + let first_deploy = Deploy::random(rng); + let second_deploy = Deploy::random(rng); + let third_deploy = Deploy::random(rng); + let test_env = TestEnv::random(rng).with_block( + TestBlockBuilder::new() + .era(1) + .deploys( + [ + first_deploy.clone(), + second_deploy.clone(), + third_deploy.clone(), + ] + .iter(), + ) + .build(rng), + ); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default()) + .with_legacy_finality(LegacyRequiredFinality::Strict); + + // Register block for historical sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE)); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let historical_builder = block_synchronizer + .historical + .as_mut() + .expect("Historical builder should have been initialized"); + historical_builder + .register_block_header(block.clone().take_header(), None) + .expect("header registration works"); + historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + register_multiple_signatures( + historical_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + ); + assert!(historical_builder.register_block(block, None).is_ok()); + + let _effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GlobalStateSynced { + block_hash: *block.hash(), + result: Ok(GlobalStateSynchronizerResponse::new( + super::global_state_synchronizer::RootHash::new(*block.state_root_hash()), + vec![], + )), + }, + ); + + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveGlobalState { .. } + ); + + let _effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GotExecutionResultsChecksum { + block_hash: *block.hash(), + result: Ok(Some(Digest::SENTINEL_NONE)), + }, + ); + + let execution_results = + BlockExecutionResultsOrChunk::new_mock_value_with_multiple_random_results( + rng, + *block.hash(), + 3, + ); + let _effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsFetched { + block_hash: *block.hash(), + result: Ok(FetchedData::from_storage(Box::new(execution_results))), + }, + ); + + let _effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsStored(*block.hash()), + ); + + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveAllExecutionResults(_, _, _, checksum) + if checksum.is_checkable() == true + ); + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ApprovalsHashesFetched(Ok(FetchedData::from_storage(Box::new( + ApprovalsHashes::new( + block.hash(), + vec![ + first_deploy.approvals_hash().unwrap(), + second_deploy.approvals_hash().unwrap(), + third_deploy.approvals_hash().unwrap(), + ], + dummy_merkle_proof(), + ), + )))), + ); + + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveApprovalsHashes(_, _, _) + ); + + let events = mock_reactor.process_effects(effects).await; + for event in events { + assert_matches!( + event, + MockReactorEvent::DeployFetcherRequest(FetcherRequest { .. }) + ); + } + + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no deploys were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive 1 out of MAX_SIMULTANEOUS_PEERS requests for the first deploy. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Right(Ok(FetchedData::from_storage(Box::new( + first_deploy.clone(), + )))), + }, + ); + + // The first deploy was registered. The synchronizer will create MAX_SIMULTANEOUS_PEERS fetch + // requests for another deploy. + for event in mock_reactor.process_effects(effects).await { + assert_matches!( + event, + MockReactorEvent::DeployFetcherRequest(FetcherRequest { .. }) + ); + } + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since the node should ask for the second deploy.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive 1 out of MAX_SIMULTANEOUS_PEERS requests for the second deploy. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Right(Ok(FetchedData::from_storage(Box::new( + second_deploy.clone(), + )))), + }, + ); + + // The second deploy was registered. The synchronizer will create MAX_SIMULTANEOUS_PEERS fetch + // requests for another deploy. + for event in mock_reactor.process_effects(effects).await { + assert_matches!( + event, + MockReactorEvent::DeployFetcherRequest(FetcherRequest { .. }) + ); + } + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since the node should ask for the third deploy.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // The current state is: + // * Sent out MAX_SIMULTANEOUS_PEERS requests for the first deploy and received 1 response. + // * Sent out MAX_SIMULTANEOUS_PEERS requests for the second deploy and received 1 response. + // * Sent out MAX_SIMULTANEOUS_PEERS requests for the third deploy and haven't received anything + // yet. + // + // So we can receive at this point MAX_SIMULTANEOUS_PEERS - 2 "late" responses for the first and + // second deploys and MAX_SIMULTANEOUS_PEERS responses for the third deploy. + // + // Simulate that we receive the "late" responses first. The synchronizer shouldn't unlatch and + // try to send out more requests for the third deploy. It should hold off until the right + // response comes through. + + // Receive the late responses for the first deploy + for _ in 1..MAX_SIMULTANEOUS_PEERS { + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Right(Ok(FetchedData::from_storage(Box::new( + first_deploy.clone(), + )))), + }, + ); + + assert_eq!(effects.len(), 0); + + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + "Shouldn't decrement the latch since this was a late response", + ); + } + + // Receive the late responses for the second deploy + for _ in 1..MAX_SIMULTANEOUS_PEERS { + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Right(Ok(FetchedData::from_storage(Box::new( + second_deploy.clone(), + )))), + }, + ); + + assert_eq!(effects.len(), 0); + + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + "Shouldn't decrement the latch since this was a late response", + ); + } + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Right(Ok(FetchedData::from_storage(Box::new( + third_deploy.clone(), + )))), + }, + ); + + // ----- HaveAllDeploys ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveAllDeploys(_, _) + ); + + let events = mock_reactor.process_effects(effects).await; + for event in events { + assert_matches!(event, MockReactorEvent::FinalitySignatureFetcherRequest(_)); + } +} + +#[tokio::test] +async fn historical_sync_latch_should_not_decrement_for_old_execution_results() { + let rng = &mut TestRng::new(); + let mock_reactor = MockReactor::new(); + let first_deploy = Deploy::random(rng); + let second_deploy = Deploy::random(rng); + let third_deploy = Deploy::random(rng); + let test_env = TestEnv::random(rng).with_block( + TestBlockBuilder::new() + .era(1) + .deploys( + [ + first_deploy.clone(), + second_deploy.clone(), + third_deploy.clone(), + ] + .iter(), + ) + .build(rng), + ); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default()) + .with_legacy_finality(LegacyRequiredFinality::Strict); + + // Register block for historical sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE)); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let historical_builder = block_synchronizer + .historical + .as_mut() + .expect("Historical builder should have been initialized"); + historical_builder + .register_block_header(block.clone().take_header(), None) + .expect("header registration works"); + historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + register_multiple_signatures( + historical_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + ); + assert!(historical_builder.register_block(block, None).is_ok()); + + let _effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GlobalStateSynced { + block_hash: *block.hash(), + result: Ok(GlobalStateSynchronizerResponse::new( + super::global_state_synchronizer::RootHash::new(*block.state_root_hash()), + vec![], + )), + }, + ); + + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveGlobalState { .. } + ); + + latch_count_check( + block_synchronizer.historical.as_ref(), + 1, + "Latch count should be 1 since we're waiting for execution results checksum.", + ); + + // Create chunked execution results. + let execution_results = + BlockExecutionResultsOrChunk::new_mock_value_with_multiple_random_results( + rng, + *block.hash(), + 100000, // Lots of results to achieve chunking. + ); + let checksum = assert_matches!( + execution_results.value(), + ValueOrChunk::ChunkWithProof(chunk) => chunk.proof().root_hash() + ); + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GotExecutionResultsChecksum { + block_hash: *block.hash(), + result: Ok(Some(checksum)), + }, + ); + + for event in mock_reactor.process_effects(effects).await { + assert_matches!( + event, + MockReactorEvent::BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest { .. }) + ); + } + + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no chunks of execution results were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive the first chunk of execution results. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsFetched { + block_hash: *block.hash(), + result: Ok(FetchedData::from_storage(Box::new( + execution_results.clone(), + ))), + }, + ); + + // It's expected that the synchronizer will ask for the next chunks of execution results. + for event in mock_reactor.process_effects(effects).await { + assert_matches!( + event, + MockReactorEvent::BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest { id, .. }) if id.chunk_index() != 0 + ); + } + + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no responses with chunks != 0 were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive the first chunk of execution results again (late response). + let _effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsFetched { + block_hash: *block.hash(), + result: Ok(FetchedData::from_storage(Box::new(execution_results))), + }, + ); + + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since we already had the first chunk and no responses with chunks != 0 were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive a fetch error. + let _effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsFetched { + block_hash: *block.hash(), + result: Err(FetcherError::Absent { + id: Box::new(BlockExecutionResultsOrChunkId::new(*block.hash())), + peer: peers[0], + }), + }, + ); + + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS - 1, + format!( + "Latch count should be {} since we received an `Absent` response.", + MAX_SIMULTANEOUS_PEERS - 1 + ) + .as_str(), + ); +} diff --git a/node/src/types/block.rs b/node/src/types/block.rs index 9f6d7a2961..c570fe36e0 100644 --- a/node/src/types/block.rs +++ b/node/src/types/block.rs @@ -1876,17 +1876,30 @@ impl BlockExecutionResultsOrChunk { } #[cfg(test)] - pub(crate) fn new_mock_value(block_hash: BlockHash) -> Self { + pub(crate) fn new_mock_value(rng: &mut TestRng, block_hash: BlockHash) -> Self { + Self::new_mock_value_with_multiple_random_results(rng, block_hash, 1) + } + + #[cfg(test)] + pub(crate) fn new_mock_value_with_multiple_random_results( + rng: &mut TestRng, + block_hash: BlockHash, + num_results: usize, + ) -> Self { + let execution_results: Vec = + (0..num_results).into_iter().map(|_| rng.gen()).collect(); + Self { block_hash, - value: ValueOrChunk::Value(vec![casper_types::ExecutionResult::Success { - effect: Default::default(), - transfers: vec![], - cost: U512::from(123), - }]), + value: ValueOrChunk::new(execution_results, 0).unwrap(), is_valid: OnceCell::with_value(Ok(true)), } } + + #[cfg(test)] + pub(crate) fn value(&self) -> &ValueOrChunk> { + &self.value + } } impl PartialEq for BlockExecutionResultsOrChunk { From f54f86695a641d860da2368eba69355d863b4ded Mon Sep 17 00:00:00 2001 From: Alexandru Sardan Date: Tue, 5 Sep 2023 12:51:15 +0000 Subject: [PATCH 09/41] config-example: remove `stall_limit` parameter Remove `stall_limit` config parameter for the `block_synchronizer` since it is not used anymore. Signed-off-by: Alexandru Sardan --- resources/local/config.toml | 4 ---- resources/production/config-example.toml | 4 ---- 2 files changed, 8 deletions(-) diff --git a/resources/local/config.toml b/resources/local/config.toml index 8fe7e9ce11..fb889b7de5 100644 --- a/resources/local/config.toml +++ b/resources/local/config.toml @@ -500,10 +500,6 @@ disconnect_dishonest_peers_interval = '10sec' # Time interval for resetting the latch in block builders. latch_reset_interval = '5sec' -# Time interval after which synchronization is considered stalled if no successful sync -# activity happened. -stall_limit = '120sec' - # ================================== # Configuration options for fetchers diff --git a/resources/production/config-example.toml b/resources/production/config-example.toml index 0f96addda9..701d67fc75 100644 --- a/resources/production/config-example.toml +++ b/resources/production/config-example.toml @@ -500,10 +500,6 @@ disconnect_dishonest_peers_interval = '10sec' # Time interval for resetting the latch in block builders. latch_reset_interval = '5sec' -# Time interval after which synchronization is considered stalled if no successful sync -# activity happened. -stall_limit = '120sec' - # ================================== # Configuration options for fetchers From 4af098e5a9096eeaa29f2dc13ed3bf5d8723217f Mon Sep 17 00:00:00 2001 From: Alexandru Sardan Date: Tue, 5 Sep 2023 13:06:20 +0000 Subject: [PATCH 10/41] CHANGELOG: add config/chainspec changes related to latch fixes Signed-off-by: Alexandru Sardan --- node/CHANGELOG.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/node/CHANGELOG.md b/node/CHANGELOG.md index 048d8f3505..66801cb735 100644 --- a/node/CHANGELOG.md +++ b/node/CHANGELOG.md @@ -11,6 +11,18 @@ All notable changes to this project will be documented in this file. The format +## Unreleased + +### Added + +### Changed +* Changed the limit of the `core_config.simultaneous_peer_requests` chainspec parameter to 255. + +### Removed +* Removed the `block_synchronizer.stall_limit` node config parameter since it is no longer needed. + + + ## 1.5.2 ### Added From efe0fe2e06bf5bee7e90f1371fb48056e68a6f57 Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Tue, 5 Sep 2023 15:33:15 +0100 Subject: [PATCH 11/41] fix issue in storage --- Makefile | 2 +- node/CHANGELOG.md | 8 ++ node/src/components/storage.rs | 82 +++++++------ node/src/components/storage/tests.rs | 171 +++++++++++++++++++++++++-- node/src/types/deploy/metadata.rs | 19 ++- types/src/execution_result.rs | 21 ++++ 6 files changed, 253 insertions(+), 50 deletions(-) diff --git a/Makefile b/Makefile index eead9dbeed..8431368701 100644 --- a/Makefile +++ b/Makefile @@ -134,7 +134,7 @@ lint-smart-contracts: .PHONY: audit-rs audit-rs: - $(CARGO) audit + $(CARGO) audit --ignore RUSTSEC-2022-0093 .PHONY: audit-as audit-as: diff --git a/node/CHANGELOG.md b/node/CHANGELOG.md index 048d8f3505..7a07181321 100644 --- a/node/CHANGELOG.md +++ b/node/CHANGELOG.md @@ -11,6 +11,14 @@ All notable changes to this project will be documented in this file. The format +## 1.5.2-alt + +### Fixed +* Fix issue in `chain_get_block_transfers` JSON-RPC where blocks with no deploys could be reported as having `null` transfers rather than `[]`. +* Fix issue in `chain_get_block_transfers` JSON-RPC where blocks containing successful transfers could erroneously be reported as having none. + + + ## 1.5.2 ### Added diff --git a/node/src/components/storage.rs b/node/src/components/storage.rs index 688ca62819..d4b1ea8d11 100644 --- a/node/src/components/storage.rs +++ b/node/src/components/storage.rs @@ -42,7 +42,7 @@ mod tests; use std::collections::BTreeSet; use std::{ borrow::Cow, - collections::{btree_map::Entry, BTreeMap, HashMap, HashSet}, + collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, fmt::{self, Display, Formatter}, fs::{self, OpenOptions}, @@ -71,7 +71,7 @@ use tracing::{debug, error, info, trace, warn}; use casper_hashing::Digest; use casper_types::{ bytesrepr::{FromBytes, ToBytes}, - EraId, ExecutionResult, ProtocolVersion, PublicKey, TimeDiff, Timestamp, Transfer, Transform, + EraId, ExecutionResult, ProtocolVersion, PublicKey, TimeDiff, Timestamp, Transfer, }; use crate::{ @@ -853,10 +853,8 @@ impl Storage { block_hash, responder, } => { - let mut txn = self.env.begin_ro_txn()?; - responder - .respond(self.get_transfers(&mut txn, &block_hash)?) - .ignore() + let maybe_transfers = self.get_transfers(&block_hash)?; + responder.respond(maybe_transfers).ignore() } StorageRequest::PutDeploy { deploy, responder } => { responder.respond(self.put_deploy(&deploy)?).ignore() @@ -1466,37 +1464,25 @@ impl Storage { ) -> Result { let mut transfers: Vec = vec![]; for (deploy_hash, execution_result) in execution_results { + transfers.extend(execution_result.successful_transfers()); + let mut metadata = self .get_deploy_metadata(txn, &deploy_hash)? .unwrap_or_default(); // If we have a previous execution result, we can continue if it is the same. - if let Some(prev) = metadata.execution_results.get(block_hash) { - if prev == &execution_result { - continue; - } else { - debug!(%deploy_hash, %block_hash, "different execution result"); - } - } - - if let ExecutionResult::Success { effect, .. } = execution_result.clone() { - for transform_entry in effect.transforms { - if let Transform::WriteTransfer(transfer) = transform_entry.transform { - transfers.push(transfer); + match metadata.execution_results.entry(*block_hash) { + hash_map::Entry::Occupied(entry) => { + if *entry.get() == execution_result { + continue; } + *entry.into_mut() = execution_result; + } + hash_map::Entry::Vacant(vacant) => { + vacant.insert(execution_result); } } - // TODO: this is currently done like this because rpc get_deploy returns the - // data, but the organization of deploy, block_hash, and - // execution_result is incorrectly represented. it should be - // inverted; for a given block_hash 0n deploys and each deploy has exactly 1 - // result (aka deploy_metadata in this context). - - // Update metadata and write back to db. - metadata - .execution_results - .insert(*block_hash, execution_result); let was_written = txn.put_value(self.deploy_metadata_db, &deploy_hash, &metadata, true)?; if !was_written { @@ -2175,16 +2161,40 @@ impl Storage { Ok(txn.get_value(self.deploy_metadata_db, deploy_hash)?) } - /// Retrieves transfers associated with block. + /// Retrieves successful transfers associated with block. /// - /// If no transfers are stored for the block, an empty transfers instance will be - /// created, but not stored. - fn get_transfers( + /// If there is no record of successful transfers for this block, then the list will be built + /// from the execution results and stored to `transfer_db`. The record could have been missing + /// or incorrectly set to an empty collection due to previous synchronization and storage + /// issues. See https://github.com/casper-network/casper-node/issues/4255 and + /// https://github.com/casper-network/casper-node/issues/4268 for further info. + fn get_transfers( &self, - txn: &mut Tx, block_hash: &BlockHash, ) -> Result>, FatalStorageError> { - Ok(txn.get_value(self.transfer_db, block_hash)?) + let mut txn = self.env.begin_rw_txn()?; + if let Some(transfers) = txn.get_value::<_, Vec>(self.transfer_db, block_hash)? { + if !transfers.is_empty() { + return Ok(Some(transfers)); + } + } + + let block = match self.get_single_block(&mut txn, block_hash)? { + Some(block) => block, + None => return Ok(None), + }; + + let mut transfers: Vec = vec![]; + for deploy_hash in block.deploy_and_transfer_hashes() { + let metadata = self + .get_deploy_metadata(&mut txn, deploy_hash)? + .unwrap_or_default(); + + transfers.extend(metadata.successful_transfers(block_hash)); + } + txn.put_value(self.transfer_db, block_hash, &transfers, true)?; + txn.commit()?; + Ok(Some(transfers)) } /// Retrieves block signatures for a block with a given block hash. @@ -2614,10 +2624,10 @@ fn insert_to_block_header_indices( if block_header.is_switch_block() { match switch_block_era_id_index.entry(block_header.era_id()) { - Entry::Vacant(entry) => { + btree_map::Entry::Vacant(entry) => { let _ = entry.insert(block_hash); } - Entry::Occupied(entry) => { + btree_map::Entry::Occupied(entry) => { if *entry.get() != block_hash { return Err(FatalStorageError::DuplicateEraIdIndex { era_id: block_header.era_id(), diff --git a/node/src/components/storage/tests.rs b/node/src/components/storage/tests.rs index ec574b6ad7..763b4c30b5 100644 --- a/node/src/components/storage/tests.rs +++ b/node/src/components/storage/tests.rs @@ -8,18 +8,21 @@ use std::{ sync::Arc, }; +use lmdb::Transaction; use rand::{prelude::SliceRandom, Rng}; use serde::{Deserialize, Serialize}; use smallvec::smallvec; use casper_types::{ generate_ed25519_keypair, system::auction::UnbondingPurse, testing::TestRng, AccessRights, - EraId, ExecutionResult, ProtocolVersion, PublicKey, SecretKey, TimeDiff, URef, U512, + EraId, ExecutionEffect, ExecutionResult, Key, ProtocolVersion, PublicKey, SecretKey, TimeDiff, + Transfer, Transform, TransformEntry, URef, U512, }; use super::{ + lmdb_ext::{deserialize_internal, serialize_internal, TransactionExt, WriteTransactionExt}, move_storage_files_to_network_subdir, should_move_storage_files_to_network_subdir, Config, - Storage, + Storage, FORCE_RESYNC_FILE_NAME, }; use crate::{ components::fetcher::{FetchItem, FetchResponse}, @@ -27,16 +30,13 @@ use crate::{ requests::{MarkBlockCompletedRequest, StorageRequest}, Multiple, }, - storage::{ - lmdb_ext::{deserialize_internal, serialize_internal}, - FORCE_RESYNC_FILE_NAME, - }, testing::{ComponentHarness, UnitTestEvent}, types::{ sync_leap_validation_metadata::SyncLeapValidationMetaData, AvailableBlockRange, Block, BlockHash, BlockHashAndHeight, BlockHeader, BlockHeaderWithMetadata, BlockSignatures, Chainspec, ChainspecRawBytes, Deploy, DeployHash, DeployMetadata, DeployMetadataExt, DeployWithFinalizedApprovals, FinalitySignature, LegacyDeploy, SyncLeapIdentifier, + TestBlockBuilder, }, utils::{Loadable, WithDir}, }; @@ -1056,21 +1056,168 @@ fn store_execution_results_twice_for_same_block_deploy_pair() { put_execution_results(&mut harness, &mut storage, block_hash, exec_result_2); } +fn prepare_exec_result_with_transfer( + rng: &mut TestRng, + deploy_hash: &DeployHash, +) -> (ExecutionResult, Transfer) { + let transfer = Transfer::new( + (*deploy_hash).into(), + rng.gen(), + Some(rng.gen()), + rng.gen(), + rng.gen(), + rng.gen(), + rng.gen(), + Some(rng.gen()), + ); + let transform = TransformEntry { + key: Key::DeployInfo((*deploy_hash).into()).to_formatted_string(), + transform: Transform::WriteTransfer(transfer), + }; + let effect = ExecutionEffect::new(vec![transform]); + let exec_result = ExecutionResult::Success { + effect, + transfers: vec![], + cost: rng.gen(), + }; + (exec_result, transfer) +} + #[test] fn store_identical_execution_results() { let mut harness = ComponentHarness::default(); let mut storage = storage_fixture(&harness); - let block_hash = BlockHash::random(&mut harness.rng); - let deploy_hash = DeployHash::random(&mut harness.rng); + let deploy = Deploy::random_valid_native_transfer(&mut harness.rng); + let deploy_hash = *deploy.hash(); + let block = Block::random_with_deploys(&mut harness.rng, Some(&deploy)); + storage.write_block(&block).unwrap(); + let block_hash = *block.hash(); - let mut exec_result = HashMap::new(); - exec_result.insert(deploy_hash, harness.rng.gen()); + let (exec_result, transfer) = prepare_exec_result_with_transfer(&mut harness.rng, &deploy_hash); + let mut exec_results = HashMap::new(); + exec_results.insert(deploy_hash, exec_result.clone()); - put_execution_results(&mut harness, &mut storage, block_hash, exec_result.clone()); + put_execution_results(&mut harness, &mut storage, block_hash, exec_results.clone()); + { + let mut txn = storage.env.begin_ro_txn().unwrap(); + let retrieved_results = storage + .get_execution_results(&mut txn, &block_hash) + .expect("should execute get") + .expect("should return Some"); + assert_eq!(retrieved_results.len(), 1); + assert_eq!(retrieved_results[0].0, deploy_hash); + assert_eq!(retrieved_results[0].1, exec_result); + } + let retrieved_transfers = storage + .get_transfers(&block_hash) + .expect("should execute get") + .expect("should return Some"); + assert_eq!(retrieved_transfers.len(), 1); + assert_eq!(retrieved_transfers[0], transfer); // We should be fine storing the exact same result twice. - put_execution_results(&mut harness, &mut storage, block_hash, exec_result); + put_execution_results(&mut harness, &mut storage, block_hash, exec_results); + { + let mut txn = storage.env.begin_ro_txn().unwrap(); + let retrieved_results = storage + .get_execution_results(&mut txn, &block_hash) + .expect("should execute get") + .expect("should return Some"); + assert_eq!(retrieved_results.len(), 1); + assert_eq!(retrieved_results[0].0, deploy_hash); + assert_eq!(retrieved_results[0].1, exec_result); + } + let retrieved_transfers = storage + .get_transfers(&block_hash) + .expect("should execute get") + .expect("should return Some"); + assert_eq!(retrieved_transfers.len(), 1); + assert_eq!(retrieved_transfers[0], transfer); +} + +/// This is a regression test for the issue where `Transfer`s under a block with no deploys could be +/// returned as `None` rather than the expected `Some(vec![])`. The fix should ensure that if no +/// Transfers are found, storage will respond with an empty collection and store the correct value +/// for future requests. +/// +/// See https://github.com/casper-network/casper-node/issues/4255 for further info. +#[test] +fn should_provide_transfers_if_not_stored() { + let mut harness = ComponentHarness::default(); + let mut storage = storage_fixture(&harness); + + let block = TestBlockBuilder::new() + .deploys(None) + .build(&mut harness.rng); + assert_eq!(block.body().deploy_and_transfer_hashes().count(), 0); + storage.write_block(&block).unwrap(); + let block_hash = *block.hash(); + + // Check an empty collection is returned. + let retrieved_transfers = storage + .get_transfers(&block_hash) + .expect("should execute get") + .expect("should return Some"); + assert!(retrieved_transfers.is_empty()); + + // Check the empty collection has been stored. + let mut txn = storage.env.begin_ro_txn().unwrap(); + let maybe_transfers = txn + .get_value::<_, Vec>(storage.transfer_db, &block_hash) + .unwrap(); + assert_eq!(Some(vec![]), maybe_transfers); +} + +/// This is a regression test for the issue where a valid collection of `Transfer`s under a given +/// block could be erroneously replaced with an empty collection. The fix should ensure that if an +/// empty collection of Transfers is found, storage will replace it with the correct collection and +/// store the correct value for future requests. +/// +/// See https://github.com/casper-network/casper-node/issues/4268 for further info. +#[test] +fn should_provide_transfers_after_emptied() { + let mut harness = ComponentHarness::default(); + let mut storage = storage_fixture(&harness); + + let deploy = Deploy::random_valid_native_transfer(&mut harness.rng); + let deploy_hash = *deploy.hash(); + let block = Block::random_with_deploys(&mut harness.rng, Some(&deploy)); + storage.write_block(&block).unwrap(); + let block_hash = *block.hash(); + + let (exec_result, transfer) = prepare_exec_result_with_transfer(&mut harness.rng, &deploy_hash); + let mut exec_results = HashMap::new(); + exec_results.insert(deploy_hash, exec_result); + + put_execution_results(&mut harness, &mut storage, block_hash, exec_results.clone()); + // Replace the valid collection with an empty one. + { + let mut txn = storage.env.begin_rw_txn().unwrap(); + txn.put_value( + storage.transfer_db, + &block_hash, + &Vec::::new(), + true, + ) + .unwrap(); + txn.commit().unwrap(); + } + + // Check the correct value is returned. + let retrieved_transfers = storage + .get_transfers(&block_hash) + .expect("should execute get") + .expect("should return Some"); + assert_eq!(retrieved_transfers.len(), 1); + assert_eq!(retrieved_transfers[0], transfer); + + // Check the correct value has been stored. + let mut txn = storage.env.begin_ro_txn().unwrap(); + let maybe_transfers = txn + .get_value::<_, Vec>(storage.transfer_db, &block_hash) + .unwrap(); + assert_eq!(Some(vec![transfer]), maybe_transfers); } /// Example state used in storage. diff --git a/node/src/types/deploy/metadata.rs b/node/src/types/deploy/metadata.rs index 0d15a2886a..c3333669f9 100644 --- a/node/src/types/deploy/metadata.rs +++ b/node/src/types/deploy/metadata.rs @@ -1,8 +1,9 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; +use tracing::error; -use casper_types::ExecutionResult; +use casper_types::{ExecutionResult, Transfer}; use crate::types::{BlockHash, BlockHashAndHeight}; @@ -17,6 +18,22 @@ pub(crate) struct Metadata { pub(crate) execution_results: HashMap, } +impl Metadata { + pub(crate) fn successful_transfers(&self, block_hash: &BlockHash) -> Vec { + match self.execution_results.get(block_hash) { + Some(exec_result) => exec_result.successful_transfers(), + None => { + error!( + execution_results = ?self.execution_results, + %block_hash, + "should have exec result" + ); + vec![] + } + } + } +} + /// Additional information describing a deploy. #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] pub(crate) enum MetadataExt { diff --git a/types/src/execution_result.rs b/types/src/execution_result.rs index cc73d9ec91..2523684012 100644 --- a/types/src/execution_result.rs +++ b/types/src/execution_result.rs @@ -179,6 +179,27 @@ pub enum ExecutionResult { } impl ExecutionResult { + /// Returns all `Transform::WriteTransfer`s from the execution effects if this is an + /// `ExecutionResult::Success`, or an empty `Vec` if `ExecutionResult::Failure`. + pub fn successful_transfers(&self) -> Vec { + let effects = match self { + ExecutionResult::Success { effect, .. } => effect, + ExecutionResult::Failure { .. } => return vec![], + }; + + effects + .transforms + .iter() + .filter_map(|transform_entry| { + if let Transform::WriteTransfer(transfer) = transform_entry.transform { + Some(transfer) + } else { + None + } + }) + .collect() + } + // This method is not intended to be used by third party crates. #[doc(hidden)] #[cfg(feature = "json-schema")] From d94de309203f0880a9fb6e7151fcb4b0188998ae Mon Sep 17 00:00:00 2001 From: Joe Sacher <321623+sacherjj@users.noreply.github.com> Date: Thu, 7 Sep 2023 08:37:33 -0400 Subject: [PATCH 12/41] Update Cargo.toml back to https --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 5c77d04195..ec6b18c2dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,4 +43,4 @@ codegen-units = 1 lto = true [patch.crates-io] -parity-wasm = { git = "ssh://git@github.com/casper-network/casper-wasm.git", branch = "casper-0.45.0" } +parity-wasm = { git = "https://github.com/casper-network/casper-wasm.git", branch = "casper-0.45.0" } From 37fe95d976633881868e1ae59bb5412a0e350eeb Mon Sep 17 00:00:00 2001 From: sacherjj <321623+sacherjj@users.noreply.github.com> Date: Thu, 7 Sep 2023 08:55:07 -0400 Subject: [PATCH 13/41] Sync Cargo.lock with casper-wasm --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 7a51409346..14317b9fd0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3163,7 +3163,7 @@ dependencies = [ [[package]] name = "parity-wasm" version = "0.45.0" -source = "git+ssh://git@github.com/casper-network/casper-wasm.git?branch=casper-0.45.0#49752a84f34d2f8748133cdd95e3064d1158b0af" +source = "git+https://github.com/casper-network/casper-wasm.git?branch=casper-0.45.0#49752a84f34d2f8748133cdd95e3064d1158b0af" [[package]] name = "parking_lot" From 2e2276d82346452f49cb99a3af4303a109e804c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Papierski?= Date: Thu, 7 Sep 2023 17:35:07 +0200 Subject: [PATCH 14/41] Added regression test for the eviction issue. --- .../test_support/src/auction.rs | 1 + .../test_support/src/step_request_builder.rs | 6 + .../test_support/src/wasm_test_builder.rs | 15 +- .../src/test/system_contracts/auction/bids.rs | 235 +++++++++++++++--- .../system_contracts/auction/distribute.rs | 24 +- 5 files changed, 227 insertions(+), 54 deletions(-) diff --git a/execution_engine_testing/test_support/src/auction.rs b/execution_engine_testing/test_support/src/auction.rs index 8f1d918290..35c2761d6b 100644 --- a/execution_engine_testing/test_support/src/auction.rs +++ b/execution_engine_testing/test_support/src/auction.rs @@ -158,6 +158,7 @@ pub fn run_blocks_with_transfers_and_step( .iter() .cloned() .map(|id| RewardItem::new(id, 1)), + Vec::new(), ); builder.commit(); } diff --git a/execution_engine_testing/test_support/src/step_request_builder.rs b/execution_engine_testing/test_support/src/step_request_builder.rs index 194a2f47ca..ad0759d447 100644 --- a/execution_engine_testing/test_support/src/step_request_builder.rs +++ b/execution_engine_testing/test_support/src/step_request_builder.rs @@ -60,6 +60,12 @@ impl StepRequestBuilder { self } + /// Pushes the given vector of [`EvictItem`] into `evict_items`. + pub fn with_evict_items(mut self, evict_items: impl IntoIterator) -> Self { + self.evict_items.extend(evict_items); + self + } + /// Sets `run_auction`. pub fn with_run_auction(mut self, run_auction: bool) -> Self { self.run_auction = run_auction; diff --git a/execution_engine_testing/test_support/src/wasm_test_builder.rs b/execution_engine_testing/test_support/src/wasm_test_builder.rs index 3b59944760..3c5c80e4bf 100644 --- a/execution_engine_testing/test_support/src/wasm_test_builder.rs +++ b/execution_engine_testing/test_support/src/wasm_test_builder.rs @@ -23,7 +23,7 @@ use casper_execution_engine::{ execute_request::ExecuteRequest, execution_result::ExecutionResult, run_genesis_request::RunGenesisRequest, - step::{StepRequest, StepSuccess}, + step::{EvictItem, StepRequest, StepSuccess}, BalanceResult, EngineConfig, EngineState, Error, GenesisSuccess, GetBidsRequest, PruneConfig, PruneResult, QueryRequest, QueryResult, RewardItem, StepError, SystemContractRegistry, UpgradeConfig, UpgradeSuccess, DEFAULT_MAX_QUERY_DEPTH, @@ -1376,10 +1376,12 @@ where &mut self, num_eras: u64, reward_items: impl IntoIterator, + evict_items: impl IntoIterator, ) { let step_request_builder = StepRequestBuilder::new() .with_protocol_version(ProtocolVersion::V1_0_0) .with_reward_items(reward_items) + .with_evict_items(evict_items) .with_run_auction(true); for _ in 0..num_eras { @@ -1398,14 +1400,19 @@ where pub fn advance_eras_by_default_auction_delay( &mut self, reward_items: impl IntoIterator, + evict_items: impl IntoIterator, ) { let auction_delay = self.get_auction_delay(); - self.advance_eras_by(auction_delay + 1, reward_items); + self.advance_eras_by(auction_delay + 1, reward_items, evict_items); } /// Advances by a single era. - pub fn advance_era(&mut self, reward_items: impl IntoIterator) { - self.advance_eras_by(1, reward_items); + pub fn advance_era( + &mut self, + reward_items: impl IntoIterator, + evict_items: impl IntoIterator, + ) { + self.advance_eras_by(1, reward_items, evict_items); } /// Returns a trie by hash. diff --git a/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs b/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs index f0b3bbce12..7e1056cc4b 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs @@ -24,6 +24,7 @@ use casper_execution_engine::{ }, genesis::{GenesisAccount, GenesisValidator}, run_genesis_request::RunGenesisRequest, + step::EvictItem, EngineConfig, Error, RewardItem, }, execution, @@ -3228,7 +3229,7 @@ fn should_delegate_and_redelegate() { builder.exec(request).commit().expect_success(); } - builder.advance_eras_by_default_auction_delay(vec![]); + builder.advance_eras_by_default_auction_delay(vec![], vec![]); let delegator_1_undelegate_purse = builder .get_account(*BID_ACCOUNT_1_ADDR) @@ -3275,7 +3276,7 @@ fn should_delegate_and_redelegate() { delegator_1_redelegate_purse_balance ); - builder.advance_era(rewards.clone()) + builder.advance_era(rewards.clone(), vec![]); } // Since a redelegation has been processed no funds should have transferred back to the purse. @@ -3453,7 +3454,7 @@ fn should_handle_redelegation_to_inactive_validator() { builder.exec(request).commit().expect_success(); } - builder.advance_eras_by_default_auction_delay(vec![]); + builder.advance_eras_by_default_auction_delay(vec![], vec![]); let delegator_1_main_purse = builder .get_account(*DELEGATOR_1_ADDR) @@ -3482,10 +3483,13 @@ fn should_handle_redelegation_to_inactive_validator() { .expect_success() .commit(); - builder.advance_era(vec![ - RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), - RewardItem::new(NON_FOUNDER_VALIDATOR_2_PK.clone(), 1), - ]); + builder.advance_era( + vec![ + RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), + RewardItem::new(NON_FOUNDER_VALIDATOR_2_PK.clone(), 1), + ], + vec![], + ); let valid_redelegate_request = ExecuteRequestBuilder::standard( *DELEGATOR_2_ADDR, @@ -3516,7 +3520,7 @@ fn should_handle_redelegation_to_inactive_validator() { let delegator_2_purse_balance = builder.get_purse_balance(delegator_2_main_purse); assert_eq!(delegator_2_purse_balance, delegator_2_purse_balance_before); - builder.advance_era(rewards.clone()); + builder.advance_era(rewards.clone(), vec![]); } // The invalid redelegation will force an unbond which will transfer funds to @@ -3609,11 +3613,14 @@ fn should_continue_auction_state_from_release_1_4_x() { let delegator_3_purse_balance_pre_step = builder.get_purse_balance(delegator_3_undelegate_purse); - builder.advance_era(vec![ - RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), - ]); + builder.advance_era( + vec![ + RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), + RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), + RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), + ], + vec![], + ); let delegator_1_purse_balance_post_step = builder.get_purse_balance(delegator_1_undelegate_purse); @@ -3705,11 +3712,14 @@ fn should_continue_auction_state_from_release_1_4_x() { delegator_4_purse_balance_before ); - builder.advance_era(vec![ - RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), - ]); + builder.advance_era( + vec![ + RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), + RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), + RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), + ], + vec![], + ); } let delegator_4_purse_balance_after = builder.get_purse_balance(delegator_4_purse); @@ -3794,11 +3804,14 @@ fn should_transfer_to_main_purse_when_validator_is_no_longer_active() { let delegator_1_purse_balance_pre_step = builder.get_purse_balance(delegator_1_undelegate_purse); - builder.advance_era(vec![ - RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), - ]); + builder.advance_era( + vec![ + RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), + RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), + RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), + ], + vec![], + ); let delegator_1_purse_balance_post_step = builder.get_purse_balance(delegator_1_undelegate_purse); @@ -3816,11 +3829,14 @@ fn should_transfer_to_main_purse_when_validator_is_no_longer_active() { let delegator_2_purse_balance_pre_step = builder.get_purse_balance(delegator_2_undelegate_purse); - builder.advance_era(vec![ - RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), - ]); + builder.advance_era( + vec![ + RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), + RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), + RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), + ], + vec![], + ); let delegator_2_purse_balance_post_step = builder.get_purse_balance(delegator_2_undelegate_purse); @@ -3838,11 +3854,14 @@ fn should_transfer_to_main_purse_when_validator_is_no_longer_active() { let delegator_3_purse_balance_pre_step = builder.get_purse_balance(delegator_3_undelegate_purse); - builder.advance_era(vec![ - RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), - ]); + builder.advance_era( + vec![ + RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), + RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), + RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), + ], + vec![], + ); let delegator_3_purse_balance_post_step = builder.get_purse_balance(delegator_3_undelegate_purse); @@ -3912,11 +3931,14 @@ fn should_transfer_to_main_purse_when_validator_is_no_longer_active() { builder.exec(withdraw_request).expect_success().commit(); - builder.advance_eras_by_default_auction_delay(vec![ - RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), - ]); + builder.advance_eras_by_default_auction_delay( + vec![ + RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), + RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), + RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), + ], + vec![], + ); let delegator_4_purse = builder .get_account(*DELEGATOR_2_ADDR) @@ -3937,7 +3959,7 @@ fn should_transfer_to_main_purse_when_validator_is_no_longer_active() { delegator_4_purse_balance_before ); - builder.advance_era(rewards.clone()); + builder.advance_era(rewards.clone(), vec![]); } let delegator_4_purse_balance_after = builder.get_purse_balance(delegator_4_purse); @@ -4470,7 +4492,7 @@ fn should_transfer_to_main_purse_in_case_of_redelegation_past_max_delegation_cap builder.exec(request).expect_success().commit(); } - builder.advance_eras_by_default_auction_delay(vec![]); + builder.advance_eras_by_default_auction_delay(vec![], vec![]); let delegator_1_main_purse = builder .get_account(*BID_ACCOUNT_1_ADDR) @@ -4517,7 +4539,7 @@ fn should_transfer_to_main_purse_in_case_of_redelegation_past_max_delegation_cap delegator_1_redelegate_purse_balance ); - builder.advance_era(rewards.clone()) + builder.advance_era(rewards.clone(), vec![]) } let delegator_1_purse_balance_after = builder.get_purse_balance(delegator_1_main_purse); @@ -4528,3 +4550,134 @@ fn should_transfer_to_main_purse_in_case_of_redelegation_past_max_delegation_cap delegator_1_purse_balance_after ) } + +#[ignore] +#[test] +fn should_delegate_and_redelegat_with_eviction_regression_test() { + let system_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *SYSTEM_ADDR, + ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_2_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let delegator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + let validator_2_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_2_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); + + let post_genesis_requests = vec![ + system_fund_request, + delegator_1_fund_request, + validator_1_fund_request, + validator_2_fund_request, + validator_1_add_bid_request, + validator_2_add_bid_request, + delegator_1_validator_1_delegate_request, + ]; + + let mut builder = InMemoryWasmTestBuilder::default(); + + builder.run_genesis(&PRODUCTION_RUN_GENESIS_REQUEST); + + for request in post_genesis_requests { + builder.exec(request).commit().expect_success(); + } + + let delegator_1_redelegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_REDELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + ARG_NEW_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone() + }, + ) + .build(); + + builder + .exec(delegator_1_redelegate_request) + .commit() + .expect_success(); + + builder.advance_eras_by(DEFAULT_UNBONDING_DELAY, vec![], vec![]); + + // Advance one more era, this is the point where the redelegate request is processed (era >= + // unbonding_delay + 1) + builder.advance_era( + vec![], + vec![ + // NOTE: This is not the same validator as the one we're redelegating into + EvictItem::new(NON_FOUNDER_VALIDATOR_2_PK.clone()), + ], + ); + + let bids: Bids = builder.get_bids(); + let delegators = bids[&NON_FOUNDER_VALIDATOR_1_PK].delegators(); + assert!(!delegators.contains_key(&BID_ACCOUNT_1_PK)); + + let delegators = bids[&NON_FOUNDER_VALIDATOR_2_PK].delegators(); + assert!(delegators.contains_key(&BID_ACCOUNT_1_PK)); +} diff --git a/execution_engine_testing/tests/src/test/system_contracts/auction/distribute.rs b/execution_engine_testing/tests/src/test/system_contracts/auction/distribute.rs index 710af9d9e1..c818311078 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/auction/distribute.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/auction/distribute.rs @@ -4370,7 +4370,7 @@ fn should_not_restake_after_full_unbond() { // advance past the initial auction delay due to special condition of post-genesis behavior. - builder.advance_eras_by_default_auction_delay(vec![]); + builder.advance_eras_by_default_auction_delay(vec![], vec![]); let validator_1_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -4434,7 +4434,7 @@ fn should_not_restake_after_full_unbond() { .expect_success() .commit(); - builder.advance_era(vec![]); + builder.advance_era(vec![], vec![]); let delegator = get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); @@ -4444,7 +4444,7 @@ fn should_not_restake_after_full_unbond() { U512::from(DELEGATOR_1_STAKE) ); - builder.advance_era(vec![]); + builder.advance_era(vec![], vec![]); // undelegate in the era right after we delegated. undelegate( @@ -4473,11 +4473,14 @@ fn should_not_restake_after_full_unbond() { ); // step until validator receives rewards. - builder.advance_eras_by(2, vec![]); + builder.advance_eras_by(2, vec![], vec![]); // validator receives rewards after this step. - builder.advance_era(vec![RewardItem::new(VALIDATOR_1.clone(), BLOCK_REWARD)]); + builder.advance_era( + vec![RewardItem::new(VALIDATOR_1.clone(), BLOCK_REWARD)], + vec![], + ); // Delegator should not remain delegated even though they were eligible for rewards in the // second era. @@ -4500,7 +4503,7 @@ fn delegator_full_unbond_during_first_reward_era() { builder.run_genesis(&PRODUCTION_RUN_GENESIS_REQUEST); // advance past the initial auction delay due to special condition of post-genesis behavior. - builder.advance_eras_by_default_auction_delay(vec![]); + builder.advance_eras_by_default_auction_delay(vec![], vec![]); let validator_1_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -4565,7 +4568,7 @@ fn delegator_full_unbond_during_first_reward_era() { .commit(); // first step after funding, adding bid and delegating. - builder.advance_era(vec![]); + builder.advance_era(vec![], vec![]); let delegator = get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()) .expect("should be delegator"); @@ -4576,7 +4579,7 @@ fn delegator_full_unbond_during_first_reward_era() { ); // step until validator receives rewards. - builder.advance_eras_by(3, vec![]); + builder.advance_eras_by(3, vec![], vec![]); // assert that the validator should indeed receive rewards and that // the delegator is scheduled to receive rewards this era. @@ -4624,7 +4627,10 @@ fn delegator_full_unbond_during_first_reward_era() { ); // validator receives rewards after this step. - builder.advance_era(vec![RewardItem::new(VALIDATOR_1.clone(), BLOCK_REWARD)]); + builder.advance_era( + vec![RewardItem::new(VALIDATOR_1.clone(), BLOCK_REWARD)], + vec![], + ); // Delegator should not remain delegated even though they were eligible for rewards in the // second era. From 172ce1cf9f87b0ba7b2634c089cc82c7f3aac65c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Papierski?= Date: Thu, 7 Sep 2023 17:36:27 +0200 Subject: [PATCH 15/41] Move the bids read line after. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bartłomiej Kamiński --- execution_engine/src/system/auction.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/execution_engine/src/system/auction.rs b/execution_engine/src/system/auction.rs index 77b30feaa6..07eaf03a83 100644 --- a/execution_engine/src/system/auction.rs +++ b/execution_engine/src/system/auction.rs @@ -426,11 +426,12 @@ pub trait Auction: let auction_delay = detail::get_auction_delay(self)?; let snapshot_size = auction_delay as usize + 1; let mut era_id: EraId = detail::get_era_id(self)?; - let mut bids = detail::get_bids(self)?; // Process unbond requests detail::process_unbond_requests(self, max_delegators_per_validator)?; + let mut bids = detail::get_bids(self)?; + // Process bids let mut bids_modified = false; for (validator_public_key, bid) in bids.iter_mut() { From f1a44955850dd2cd5d34f666c32bb6c5a9920577 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Papierski?= Date: Fri, 8 Sep 2023 11:48:32 +0200 Subject: [PATCH 16/41] Remove extra read. --- execution_engine/src/system/auction/detail.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/execution_engine/src/system/auction/detail.rs b/execution_engine/src/system/auction/detail.rs index 5a793aa919..4d29077721 100644 --- a/execution_engine/src/system/auction/detail.rs +++ b/execution_engine/src/system/auction/detail.rs @@ -217,18 +217,13 @@ pub(crate) fn process_unbond_requests( match provider.read_bid(&new_validator.to_account_hash()) { Ok(Some(new_validator_bid)) => { if !new_validator_bid.staked_amount().is_zero() { - let bid = read_bid_for_validator( - provider, - new_validator.clone().to_account_hash(), - )?; - if is_under_max_delegator_cap( max_delegators_per_validator, new_validator_bid.delegators().len(), ) { handle_delegation( provider, - bid, + new_validator_bid, unbonding_purse.unbonder_public_key().clone(), new_validator.clone(), *unbonding_purse.bonding_purse(), From 6c1f6da55f238081c523bffbca2928bc152afeba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Papierski?= Date: Fri, 8 Sep 2023 14:08:25 +0200 Subject: [PATCH 17/41] Update execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bartłomiej Kamiński --- .../tests/src/test/system_contracts/auction/bids.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs b/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs index 7e1056cc4b..9497fa1884 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs @@ -4553,7 +4553,7 @@ fn should_transfer_to_main_purse_in_case_of_redelegation_past_max_delegation_cap #[ignore] #[test] -fn should_delegate_and_redelegat_with_eviction_regression_test() { +fn should_delegate_and_redelegate_with_eviction_regression_test() { let system_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, From 1d3245167bd6213030a187e57167d5a86e8dcbfa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Chabowski?= Date: Thu, 15 Jun 2023 12:42:13 +0200 Subject: [PATCH 18/41] Purge block synchronizer every time we transition out of `CatchUp` --- node/src/reactor/main_reactor/control.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/node/src/reactor/main_reactor/control.rs b/node/src/reactor/main_reactor/control.rs index 9d5ff8e381..f0731d6f84 100644 --- a/node/src/reactor/main_reactor/control.rs +++ b/node/src/reactor/main_reactor/control.rs @@ -96,6 +96,7 @@ impl MainReactor { CatchUpInstruction::CommitGenesis => match self.commit_genesis(effect_builder) { GenesisInstruction::Validator(duration, effects) => { info!("CatchUp: switch to Validate at genesis"); + self.block_synchronizer.purge(); self.state = ReactorState::Validate; (duration, effects) } @@ -112,6 +113,7 @@ impl MainReactor { CatchUpInstruction::CommitUpgrade => match self.commit_upgrade(effect_builder) { Ok(effects) => { info!("CatchUp: switch to Upgrading"); + self.block_synchronizer.purge(); self.state = ReactorState::Upgrading; self.last_progress = Timestamp::now(); self.attempts = 0; @@ -135,8 +137,8 @@ impl MainReactor { return (Duration::ZERO, fatal!(effect_builder, "{}", msg).ignore()); } // purge to avoid polluting the status endpoints w/ stale state - self.block_synchronizer.purge(); info!("CatchUp: switch to KeepUp"); + self.block_synchronizer.purge(); self.state = ReactorState::KeepUp; (Duration::ZERO, Effects::new()) } @@ -166,9 +168,9 @@ impl MainReactor { (Duration::ZERO, Effects::new()) } KeepUpInstruction::Validate(effects) => { + info!("KeepUp: switch to Validate"); // purge to avoid polluting the status endpoints w/ stale state self.block_synchronizer.purge(); - info!("KeepUp: switch to Validate"); self.state = ReactorState::Validate; (Duration::ZERO, effects) } From 5ec5e67a0cbcf57c4cb4f0feff475a63cecb1efd Mon Sep 17 00:00:00 2001 From: Alexandru Sardan Date: Wed, 21 Jun 2023 17:02:17 +0000 Subject: [PATCH 19/41] consensus: don't error when deactivating era more than once Don't return a fatal error when trying to deactivate an era that was already deactivated. In the case where a node unbonds and drops from the validator list, it will switch to `KeepUp` in order to follow the chain. Switching to `KeepUp` will try to deactivate the current era which would have been already deactivated when the last block of the era was added. Print out a debug message to track when this happens. Signed-off-by: Alexandru Sardan --- node/src/components/consensus/era_supervisor.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/node/src/components/consensus/era_supervisor.rs b/node/src/components/consensus/era_supervisor.rs index ffddfee663..0437eba47a 100644 --- a/node/src/components/consensus/era_supervisor.rs +++ b/node/src/components/consensus/era_supervisor.rs @@ -860,7 +860,8 @@ impl EraSupervisor { .ok_or_else(|| "attempt to deactivate an era with no eras instantiated!".to_string())?; let era = self.era_mut(which_era); if false == era.consensus.is_active() { - return Err(format!("attempt to deactivate inactive era {}", which_era)); + debug!(era_id=%which_era, "attempt to deactivate inactive era"); + return Ok(which_era); } era.consensus.deactivate_validator(); Ok(which_era) From 0415cb7b03b4433c08def10b07b7ebb3d93af56d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bart=C5=82omiej=20Kami=C5=84ski?= Date: Wed, 13 Sep 2023 12:44:36 +0200 Subject: [PATCH 20/41] Add a failing test --- .../src/test/system_contracts/auction/bids.rs | 163 ++++++++++++++++++ 1 file changed, 163 insertions(+) diff --git a/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs b/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs index 9497fa1884..816cae8853 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs @@ -4681,3 +4681,166 @@ fn should_delegate_and_redelegate_with_eviction_regression_test() { let delegators = bids[&NON_FOUNDER_VALIDATOR_2_PK].delegators(); assert!(delegators.contains_key(&BID_ACCOUNT_1_PK)); } + +#[ignore] +#[test] +fn should_increase_existing_delegation_when_limit_exceeded() { + let engine_config = EngineConfig::new( + DEFAULT_MAX_QUERY_DEPTH, + DEFAULT_MAX_ASSOCIATED_KEYS, + DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT, + DEFAULT_MINIMUM_DELEGATION_AMOUNT, + DEFAULT_STRICT_ARGUMENT_CHECKING, + DEFAULT_VESTING_SCHEDULE_LENGTH_MILLIS, + Some(2u32), + WasmConfig::default(), + SystemConfig::default(), + ); + + let mut builder = InMemoryWasmTestBuilder::new_with_config(engine_config); + + builder.run_genesis(&PRODUCTION_RUN_GENESIS_REQUEST); + + let transfer_to_validator_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let transfer_to_delegator_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_1_ADDR, + ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE) + }, + ) + .build(); + + let transfer_to_delegator_2 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_2_ADDR, + ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE) + }, + ) + .build(); + + let transfer_to_delegator_3 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE) + }, + ) + .build(); + + let post_genesis_request = vec![ + transfer_to_validator_1, + transfer_to_delegator_1, + transfer_to_delegator_2, + transfer_to_delegator_3, + ]; + + for request in post_genesis_request { + builder.exec(request).expect_success().commit(); + } + + let add_bid_request_1 = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + builder.exec(add_bid_request_1).expect_success().commit(); + + for _ in 0..=builder.get_auction_delay() { + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V1_0_0) + .with_next_era_id(builder.get_era().successor()) + .with_run_auction(true) + .build(); + + builder + .step(step_request) + .expect("must execute step request"); + } + + let delegation_request_1 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); + + let delegation_request_2 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(), + }, + ) + .build(); + + let delegation_requests = [delegation_request_1, delegation_request_2]; + + for request in delegation_requests { + builder.exec(request).expect_success().commit(); + } + + let delegation_request_3 = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + }, + ) + .build(); + + builder.exec(delegation_request_3).expect_failure(); + + let error = builder.get_error().expect("must get error"); + + assert!(matches!( + error, + Error::Exec(execution::Error::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::ExceededDelegatorSizeLimit as u8)); + + // The validator already has the maximum number of delegators allowed. However, this is a + // delegator that already delegated, so their bid should just be increased. + let delegation_request_2_repeat = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(), + }, + ) + .build(); + + builder + .exec(delegation_request_2_repeat) + .expect_success() + .commit(); +} From 8ae817f27ae6a08c61bb52ce6fb6d6aedb5a59f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bart=C5=82omiej=20Kami=C5=84ski?= Date: Wed, 13 Sep 2023 12:47:36 +0200 Subject: [PATCH 21/41] Properly handle existing delegators --- execution_engine/src/system/auction.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/execution_engine/src/system/auction.rs b/execution_engine/src/system/auction.rs index 07eaf03a83..6ae8ada7d5 100644 --- a/execution_engine/src/system/auction.rs +++ b/execution_engine/src/system/auction.rs @@ -227,8 +227,12 @@ pub trait Auction: let bid = detail::read_bid_for_validator(self, validator_account_hash)?; + let delegator_already_exists = bid.delegators().contains_key(&delegator_public_key); + if let Some(max_delegators_per_validator) = max_delegators_per_validator { - if bid.delegators().len() >= max_delegators_per_validator as usize { + if bid.delegators().len() >= max_delegators_per_validator as usize + && !delegator_already_exists + { return Err(Error::ExceededDelegatorSizeLimit.into()); } } From dd52e2341f4245b6f0b3483e3ec7b1f964008754 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bart=C5=82omiej=20Kami=C5=84ski?= Date: Wed, 13 Sep 2023 14:43:51 +0200 Subject: [PATCH 22/41] Add a failing test --- .../components/consensus/protocols/common.rs | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/node/src/components/consensus/protocols/common.rs b/node/src/components/consensus/protocols/common.rs index 4c8e597151..d04cc95e94 100644 --- a/node/src/components/consensus/protocols/common.rs +++ b/node/src/components/consensus/protocols/common.rs @@ -84,6 +84,7 @@ mod tests { use super::*; use crate::components::consensus::ClContext; use casper_types::{testing::TestRng, PublicKey}; + use rand::Rng; #[test] #[should_panic] @@ -95,4 +96,38 @@ mod tests { validators::(&Default::default(), &Default::default(), validator_stakes); } + + #[test] + fn total_weights_less_than_u64_max() { + let mut rng = TestRng::new(); + + let (test_stake_1, test_stake_2) = (rng.gen(), rng.gen()); + + let mut test_stakes = |a: u64, b: u64| -> BTreeMap { + let mut result = BTreeMap::new(); + result.insert( + PublicKey::random(&mut rng), + U512::from(a) * U512::from(u128::MAX), + ); + result.insert( + PublicKey::random(&mut rng), + U512::from(b) * U512::from(u128::MAX), + ); + result + }; + + // First, we test with random values. + let stakes = test_stakes(test_stake_1, test_stake_2); + let weights = validators::(&Default::default(), &Default::default(), stakes); + assert!(weights.total_weight().0 < u64::MAX); + + // Then, we test with values that were known to cause issues before. + let stakes = test_stakes(514, 771); + let weights = validators::(&Default::default(), &Default::default(), stakes); + assert!(weights.total_weight().0 < u64::MAX); + + let stakes = test_stakes(668, 614); + let weights = validators::(&Default::default(), &Default::default(), stakes); + assert!(weights.total_weight().0 < u64::MAX); + } } From 99e8f981ba7ebf2968412ab1892dc38a5d330f99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bart=C5=82omiej=20Kami=C5=84ski?= Date: Wed, 13 Sep 2023 14:45:36 +0200 Subject: [PATCH 23/41] Ensure that the total weight is less than u64::MAX --- node/src/components/consensus/protocols/common.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/node/src/components/consensus/protocols/common.rs b/node/src/components/consensus/protocols/common.rs index d04cc95e94..12212ddd45 100644 --- a/node/src/components/consensus/protocols/common.rs +++ b/node/src/components/consensus/protocols/common.rs @@ -18,13 +18,9 @@ pub(crate) fn validators( validator_stakes: BTreeMap, ) -> Validators { let sum_stakes = safe_sum(validator_stakes.values().copied()).expect("should not overflow"); - // We use u64 weights. Scale down by sum / u64::MAX, rounded up. - // If we round up the divisor, the resulting sum is guaranteed to be less than - // u64::MAX. - let scaling_factor: U512 = sum_stakes - .checked_add(U512::from(u64::MAX) - 1) - .expect("should not overflow") - / U512::from(u64::MAX); + // We use u64 weights. Scale down by floor(sum / u64::MAX) + 1. + // This guarantees that the resulting sum is less than u64::MAX. + let scaling_factor: U512 = sum_stakes / U512::from(u64::MAX) + 1; // TODO sort validators by descending weight let mut validators: Validators = validator_stakes From 610a3f6ee616af5e66406e981c0caace58097900 Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Fri, 15 Sep 2023 22:31:04 +0100 Subject: [PATCH 24/41] optimise replay protection function in era supervisor --- .../components/consensus/era_supervisor.rs | 26 ++-- node/src/components/storage.rs | 74 ++++----- node/src/components/storage/tests.rs | 147 ++++++++++++++++-- node/src/effect.rs | 13 +- node/src/effect/requests.rs | 14 +- node/src/types.rs | 2 + node/src/types/block_hash_height_and_era.rs | 44 ++++++ 7 files changed, 244 insertions(+), 76 deletions(-) create mode 100644 node/src/types/block_hash_height_and_era.rs diff --git a/node/src/components/consensus/era_supervisor.rs b/node/src/components/consensus/era_supervisor.rs index 0437eba47a..639259f19b 100644 --- a/node/src/components/consensus/era_supervisor.rs +++ b/node/src/components/consensus/era_supervisor.rs @@ -1364,22 +1364,24 @@ async fn check_deploys_for_replay_in_previous_eras_and_validate_block( where REv: From + From, { - for deploy_hash in proposed_block.value().deploys_and_transfers_iter() { - let block_header = match effect_builder - .get_block_header_for_deploy_from_storage(deploy_hash.into()) - .await - { - None => continue, - Some(header) => header, - }; - // We have found the deploy in the database. If it was from a previous era, it was a - // replay attack. + let deploys_era_ids = effect_builder + .get_deploys_era_ids( + proposed_block + .value() + .deploy_and_transfer_hashes() + .copied() + .collect(), + ) + .await; + + for deploy_era_id in deploys_era_ids { + // If the stored deploy was executed in a previous era, it is a replay attack. // - // If not, then it might be this is a deploy for a block we are currently + // If not, then it might be this is a deploy for a block on which we are currently // coming to consensus, and we will rely on the immediate ancestors of the // block_payload within the current era to determine if we are facing a replay // attack. - if block_header.era_id() < proposed_block_era_id { + if deploy_era_id < proposed_block_era_id { return Event::ResolveValidity(ResolveValidity { era_id: proposed_block_era_id, sender, diff --git a/node/src/components/storage.rs b/node/src/components/storage.rs index d4b1ea8d11..01b55a9abf 100644 --- a/node/src/components/storage.rs +++ b/node/src/components/storage.rs @@ -92,10 +92,11 @@ use crate::{ types::{ ApprovalsHash, ApprovalsHashes, AvailableBlockRange, Block, BlockAndDeploys, BlockBody, BlockExecutionResultsOrChunk, BlockExecutionResultsOrChunkId, BlockHash, - BlockHashAndHeight, BlockHeader, BlockHeaderWithMetadata, BlockSignatures, - BlockWithMetadata, Deploy, DeployHash, DeployHeader, DeployId, DeployMetadata, - DeployMetadataExt, DeployWithFinalizedApprovals, FinalitySignature, FinalizedApprovals, - FinalizedBlock, LegacyDeploy, NodeId, SyncLeap, SyncLeapIdentifier, ValueOrChunk, + BlockHashAndHeight, BlockHashHeightAndEra, BlockHeader, BlockHeaderWithMetadata, + BlockSignatures, BlockWithMetadata, Deploy, DeployHash, DeployHeader, DeployId, + DeployMetadata, DeployMetadataExt, DeployWithFinalizedApprovals, FinalitySignature, + FinalizedApprovals, FinalizedBlock, LegacyDeploy, NodeId, SyncLeap, SyncLeapIdentifier, + ValueOrChunk, }, utils::{display_error, WithDir}, NodeRng, @@ -195,8 +196,8 @@ pub struct Storage { block_height_index: BTreeMap, /// A map of era ID to switch block ID. switch_block_era_id_index: BTreeMap, - /// A map of deploy hashes to hashes and heights of blocks containing them. - deploy_hash_index: BTreeMap, + /// A map of deploy hashes to hashes, heights and era IDs of blocks containing them. + deploy_hash_index: BTreeMap, /// Runs of completed blocks known in storage. completed_blocks: DisjointSequences, /// The activation point era of the current protocol version. @@ -470,6 +471,7 @@ impl Storage { block_header.block_hash(), &block_body, block_header.height(), + block_header.era_id(), )?; } } @@ -826,15 +828,12 @@ impl Storage { .respond(self.get_highest_complete_block_header(&mut txn)?) .ignore() } - StorageRequest::GetBlockHeaderForDeploy { - deploy_hash, + StorageRequest::GetDeploysEraIds { + deploy_hashes, responder, - } => { - let mut txn = self.env.begin_ro_txn()?; - responder - .respond(self.get_block_header_by_deploy_hash(&mut txn, deploy_hash)?) - .ignore() - } + } => responder + .respond(self.get_deploys_era_ids(deploy_hashes)) + .ignore(), StorageRequest::GetBlockHeader { block_hash, only_from_available_block_range, @@ -1576,6 +1575,7 @@ impl Storage { *block.hash(), block.body(), block.header().height(), + block.header().era_id(), )?; } Ok(true) @@ -1744,20 +1744,17 @@ impl Storage { ret } - /// Retrieves a single block header by deploy hash by looking it up in the index and returning - /// it. - fn get_block_header_by_deploy_hash( - &self, - txn: &mut Tx, - deploy_hash: DeployHash, - ) -> Result, FatalStorageError> { - self.deploy_hash_index - .get(&deploy_hash) - .and_then(|block_hash_and_height| { - self.get_single_block_header(txn, &block_hash_and_height.block_hash) - .transpose() + /// Returns the era IDs of the blocks in which the given deploys were executed. If none of the + /// deploys have been executed yet, an empty set will be returned. + fn get_deploys_era_ids(&self, deploy_hashes: HashSet) -> HashSet { + deploy_hashes + .iter() + .filter_map(|deploy_hash| { + self.deploy_hash_index + .get(deploy_hash) + .map(|block_hash_height_and_era| block_hash_height_and_era.era_id) }) - .transpose() + .collect() } /// Retrieves the block hash and height for a deploy hash by looking it up in the index @@ -1766,7 +1763,10 @@ impl Storage { &self, deploy_hash: DeployHash, ) -> Result, FatalStorageError> { - Ok(self.deploy_hash_index.get(&deploy_hash).copied()) + Ok(self + .deploy_hash_index + .get(&deploy_hash) + .map(BlockHashAndHeight::from)) } /// Retrieves the highest block from storage, if one exists. May return an LMDB error. @@ -2647,27 +2647,29 @@ fn insert_to_block_header_indices( /// /// If a duplicate entry is encountered, index is not updated and an error is returned. fn insert_to_deploy_index( - deploy_hash_index: &mut BTreeMap, + deploy_hash_index: &mut BTreeMap, block_hash: BlockHash, block_body: &BlockBody, block_height: u64, + era_id: EraId, ) -> Result<(), FatalStorageError> { if let Some(hash) = block_body.deploy_and_transfer_hashes().find(|hash| { - deploy_hash_index - .get(hash) - .map_or(false, |old_block_hash_and_height| { - old_block_hash_and_height.block_hash != block_hash - }) + deploy_hash_index.get(hash).map_or(false, |existing_value| { + existing_value.block_hash != block_hash + }) }) { return Err(FatalStorageError::DuplicateDeployIndex { deploy_hash: *hash, - first: deploy_hash_index[hash], + first: BlockHashAndHeight::from(&deploy_hash_index[hash]), second: BlockHashAndHeight::new(block_hash, block_height), }); } for hash in block_body.deploy_and_transfer_hashes() { - deploy_hash_index.insert(*hash, BlockHashAndHeight::new(block_hash, block_height)); + deploy_hash_index.insert( + *hash, + BlockHashHeightAndEra::new(block_hash, block_height, era_id), + ); } Ok(()) diff --git a/node/src/components/storage/tests.rs b/node/src/components/storage/tests.rs index 763b4c30b5..20057490b3 100644 --- a/node/src/components/storage/tests.rs +++ b/node/src/components/storage/tests.rs @@ -1,7 +1,7 @@ //! Unit tests for the storage component. use std::{ - collections::{BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, HashSet}, fs::{self, File}, iter, rc::Rc, @@ -33,10 +33,10 @@ use crate::{ testing::{ComponentHarness, UnitTestEvent}, types::{ sync_leap_validation_metadata::SyncLeapValidationMetaData, AvailableBlockRange, Block, - BlockHash, BlockHashAndHeight, BlockHeader, BlockHeaderWithMetadata, BlockSignatures, - Chainspec, ChainspecRawBytes, Deploy, DeployHash, DeployMetadata, DeployMetadataExt, - DeployWithFinalizedApprovals, FinalitySignature, LegacyDeploy, SyncLeapIdentifier, - TestBlockBuilder, + BlockHash, BlockHashAndHeight, BlockHashHeightAndEra, BlockHeader, BlockHeaderWithMetadata, + BlockSignatures, Chainspec, ChainspecRawBytes, Deploy, DeployHash, DeployMetadata, + DeployMetadataExt, DeployWithFinalizedApprovals, FinalitySignature, LegacyDeploy, + SyncLeapIdentifier, TestBlockBuilder, }, utils::{Loadable, WithDir}, }; @@ -462,12 +462,12 @@ fn put_deploy( fn insert_to_deploy_index( storage: &mut Storage, - deploy: Deploy, - block_hash_and_height: BlockHashAndHeight, + deploy_hash: &DeployHash, + block_hash_height_and_era: BlockHashHeightAndEra, ) -> bool { storage .deploy_hash_index - .insert(*deploy.hash(), block_hash_and_height) + .insert(*deploy_hash, block_hash_height_and_era) .is_none() } @@ -758,13 +758,13 @@ fn can_retrieve_store_and_load_deploys() { let deploy = Arc::new(Deploy::random(&mut harness.rng)); let was_new = put_deploy(&mut harness, &mut storage, Arc::clone(&deploy)); - let block_hash_and_height = BlockHashAndHeight::random(&mut harness.rng); + let block_hash_height_and_era = BlockHashHeightAndEra::random(&mut harness.rng); // Insert to the deploy hash index as well so that we can perform the GET later. // Also check that we don't have an entry there for this deploy. assert!(insert_to_deploy_index( &mut storage, - (*deploy).clone(), - block_hash_and_height + deploy.hash(), + block_hash_height_and_era )); assert!(was_new, "putting deploy should have returned `true`"); @@ -776,8 +776,8 @@ fn can_retrieve_store_and_load_deploys() { ); assert!(!insert_to_deploy_index( &mut storage, - (*deploy).clone(), - block_hash_and_height + deploy.hash(), + block_hash_height_and_era )); // Retrieve the stored deploy. @@ -802,7 +802,10 @@ fn can_retrieve_store_and_load_deploys() { panic!("We didn't store any metadata but we received it in the response.") } DeployMetadataExt::BlockInfo(recv_block_hash_and_height) => { - assert_eq!(block_hash_and_height, recv_block_hash_and_height) + assert_eq!( + BlockHashAndHeight::from(&block_hash_height_and_era), + recv_block_hash_and_height + ) } DeployMetadataExt::Empty => panic!( "We stored block info in the deploy hash index \ @@ -840,6 +843,122 @@ fn can_retrieve_store_and_load_deploys() { } } +#[test] +fn should_retrieve_deploys_era_ids() { + let mut harness = ComponentHarness::default(); + let mut storage = storage_fixture(&harness); + + // Populate the `deploy_hash_index` with 5 deploys from a block in era 1. + let era_1_deploy_hashes: HashSet = + iter::repeat_with(|| DeployHash::random(&mut harness.rng)) + .take(5) + .collect(); + let block_hash_height_and_era = BlockHashHeightAndEra::new( + BlockHash::random(&mut harness.rng), + harness.rng.gen(), + EraId::new(1), + ); + for deploy_hash in &era_1_deploy_hashes { + assert!(insert_to_deploy_index( + &mut storage, + deploy_hash, + block_hash_height_and_era + )); + } + + // Further populate the `deploy_hash_index` with 5 deploys from a block in era 2. + let era_2_deploy_hashes: HashSet = + iter::repeat_with(|| DeployHash::random(&mut harness.rng)) + .take(5) + .collect(); + let block_hash_height_and_era = BlockHashHeightAndEra::new( + BlockHash::random(&mut harness.rng), + harness.rng.gen(), + EraId::new(2), + ); + for deploy_hash in &era_2_deploy_hashes { + assert!(insert_to_deploy_index( + &mut storage, + deploy_hash, + block_hash_height_and_era + )); + } + + // Check we get an empty set for deploys not yet executed. + let random_deploy_hashes: HashSet = + iter::repeat_with(|| DeployHash::random(&mut harness.rng)) + .take(5) + .collect(); + assert!(storage + .get_deploys_era_ids(random_deploy_hashes.clone()) + .is_empty()); + + // Check we get back only era 1 for all of the era 1 deploys and similarly for era 2 ones. + let era1: HashSet = iter::once(EraId::new(1)).collect(); + assert_eq!( + storage.get_deploys_era_ids(era_1_deploy_hashes.clone()), + era1 + ); + let era2: HashSet = iter::once(EraId::new(2)).collect(); + assert_eq!( + storage.get_deploys_era_ids(era_2_deploy_hashes.clone()), + era2 + ); + + // Check we get back both eras if we use some from each collection. + let both_eras = vec![EraId::new(1), EraId::new(2)].into_iter().collect(); + assert_eq!( + storage.get_deploys_era_ids( + era_1_deploy_hashes + .iter() + .take(3) + .chain(era_2_deploy_hashes.iter().take(3)) + .copied() + .collect(), + ), + both_eras + ); + + // Check we get back only era 1 for era 1 deploys interspersed with unexecuted deploys, and + // similarly for era 2 ones. + assert_eq!( + storage.get_deploys_era_ids( + era_1_deploy_hashes + .iter() + .take(1) + .chain(random_deploy_hashes.iter().take(3)) + .copied() + .collect(), + ), + era1 + ); + assert_eq!( + storage.get_deploys_era_ids( + era_2_deploy_hashes + .iter() + .take(1) + .chain(random_deploy_hashes.iter().take(3)) + .copied() + .collect(), + ), + era2 + ); + + // Check we get back both eras if we use some from each collection and also some unexecuted. + assert_eq!( + storage.get_deploys_era_ids( + era_1_deploy_hashes + .iter() + .take(3) + .chain(era_2_deploy_hashes.iter().take(3)) + .chain(random_deploy_hashes.iter().take(3)) + .copied() + .collect(), + ), + both_eras + ); +} + #[test] fn storing_and_loading_a_lot_of_deploys_does_not_exhaust_handles() { let mut harness = ComponentHarness::default(); diff --git a/node/src/effect.rs b/node/src/effect.rs index 2ad42cd7f9..6c904d4fb5 100644 --- a/node/src/effect.rs +++ b/node/src/effect.rs @@ -1305,17 +1305,18 @@ impl EffectBuilder { .await } - /// Requests the header of the block containing the given deploy. - pub(crate) async fn get_block_header_for_deploy_from_storage( + /// Returns the era IDs of the blocks in which the given deploys were executed. If none of the + /// deploys have been executed yet, an empty set will be returned. + pub(crate) async fn get_deploys_era_ids( self, - deploy_hash: DeployHash, - ) -> Option + deploy_hashes: HashSet, + ) -> HashSet where REv: From, { self.make_request( - |responder| StorageRequest::GetBlockHeaderForDeploy { - deploy_hash, + |responder| StorageRequest::GetDeploysEraIds { + deploy_hashes, responder, }, QueueKind::FromStorage, diff --git a/node/src/effect/requests.rs b/node/src/effect/requests.rs index b454db6afa..7487fcc210 100644 --- a/node/src/effect/requests.rs +++ b/node/src/effect/requests.rs @@ -318,12 +318,10 @@ pub(crate) enum StorageRequest { /// Responder. responder: Responder>, }, - /// Retrieve the header of the block containing the deploy. - GetBlockHeaderForDeploy { - /// Hash of the deploy. - deploy_hash: DeployHash, - /// Responder. - responder: Responder>, + /// Retrieve the era IDs of the blocks in which the given deploys were executed. + GetDeploysEraIds { + deploy_hashes: HashSet, + responder: Responder>, }, /// Retrieve block header with given hash. GetBlockHeader { @@ -526,8 +524,8 @@ impl Display for StorageRequest { StorageRequest::GetHighestCompleteBlockHeader { .. } => { write!(formatter, "get highest complete block header") } - StorageRequest::GetBlockHeaderForDeploy { deploy_hash, .. } => { - write!(formatter, "get block header for deploy {}", deploy_hash) + StorageRequest::GetDeploysEraIds { deploy_hashes, .. } => { + write!(formatter, "get era ids for {} deploys", deploy_hashes.len()) } StorageRequest::GetBlockHeader { block_hash, .. } => { write!(formatter, "get {}", block_hash) diff --git a/node/src/types.rs b/node/src/types.rs index a152ed9721..cfff687f48 100644 --- a/node/src/types.rs +++ b/node/src/types.rs @@ -3,6 +3,7 @@ pub(crate) mod appendable_block; mod available_block_range; mod block; +mod block_hash_height_and_era; pub mod chainspec; mod chunkable; mod deploy; @@ -35,6 +36,7 @@ pub use block::{ BlockExecutionResultsOrChunkId, BlockExecutionResultsOrChunkIdDisplay, BlockHash, BlockHeader, BlockSignatures, FinalitySignature, FinalizedBlock, }; +pub(crate) use block_hash_height_and_era::BlockHashHeightAndEra; pub use chainspec::Chainspec; pub(crate) use chainspec::{ActivationPoint, ChainspecRawBytes}; pub use chunkable::Chunkable; diff --git a/node/src/types/block_hash_height_and_era.rs b/node/src/types/block_hash_height_and_era.rs new file mode 100644 index 0000000000..b047c67434 --- /dev/null +++ b/node/src/types/block_hash_height_and_era.rs @@ -0,0 +1,44 @@ +use datasize::DataSize; +#[cfg(test)] +use rand::Rng; + +#[cfg(test)] +use casper_types::testing::TestRng; +use casper_types::EraId; + +use crate::types::{BlockHash, BlockHashAndHeight}; + +#[derive(Clone, Copy, Debug, DataSize)] +pub(crate) struct BlockHashHeightAndEra { + pub(crate) block_hash: BlockHash, + pub(crate) block_height: u64, + pub(crate) era_id: EraId, +} + +impl BlockHashHeightAndEra { + pub(crate) fn new(block_hash: BlockHash, block_height: u64, era_id: EraId) -> Self { + BlockHashHeightAndEra { + block_hash, + block_height, + era_id, + } + } + + #[cfg(test)] + pub fn random(rng: &mut TestRng) -> Self { + Self { + block_hash: BlockHash::random(rng), + block_height: rng.gen(), + era_id: rng.gen(), + } + } +} + +impl From<&BlockHashHeightAndEra> for BlockHashAndHeight { + fn from(value: &BlockHashHeightAndEra) -> Self { + BlockHashAndHeight { + block_hash: value.block_hash, + block_height: value.block_height, + } + } +} From e4fd3e2f12031fe6d16168571c6916dca81b42d3 Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Mon, 18 Sep 2023 22:02:05 +0100 Subject: [PATCH 25/41] allow small leeway when deciding if a deploy is future-dated --- node/CHANGELOG.md | 7 +- node/src/cli.rs | 4 +- node/src/components/deploy_acceptor.rs | 9 +- node/src/components/deploy_acceptor/config.rs | 29 +++++++ node/src/components/deploy_acceptor/tests.rs | 44 +++++++++- node/src/components/deploy_buffer.rs | 9 +- node/src/lib.rs | 1 + node/src/reactor/main_reactor.rs | 3 +- node/src/reactor/main_reactor/config.rs | 29 ++++++- node/src/types/chainspec/deploy_config.rs | 8 ++ node/src/utils.rs | 5 ++ resources/local/chainspec.toml.in | 12 +-- resources/local/config.toml | 87 +++++++++++-------- resources/production/chainspec.toml | 12 +-- resources/production/config-example.toml | 87 +++++++++++-------- resources/test/valid/0_9_0/chainspec.toml | 1 + .../test/valid/0_9_0_unordered/chainspec.toml | 1 + resources/test/valid/1_0_0/chainspec.toml | 1 + 18 files changed, 251 insertions(+), 98 deletions(-) create mode 100644 node/src/components/deploy_acceptor/config.rs diff --git a/node/CHANGELOG.md b/node/CHANGELOG.md index 86b7af33c0..2ef267f486 100644 --- a/node/CHANGELOG.md +++ b/node/CHANGELOG.md @@ -14,16 +14,19 @@ All notable changes to this project will be documented in this file. The format ## Unreleased ### Added +* Add `deploy_acceptor` section to config with a single option `timestamp_leeway` to allow a small leeway when deciding if a deploy is future-dated. +* Add `deploys.max_timestamp_leeway` chainspec option to define the upper limit for the new config option `deploy_acceptor.timestamp_leeway`. ### Changed -* Changed the limit of the `core_config.simultaneous_peer_requests` chainspec parameter to 255. +* Change the limit of the `core_config.simultaneous_peer_requests` chainspec parameter to 255. ### Fixed * Fix issue in `chain_get_block_transfers` JSON-RPC where blocks with no deploys could be reported as having `null` transfers rather than `[]`. * Fix issue in `chain_get_block_transfers` JSON-RPC where blocks containing successful transfers could erroneously be reported as having none. ### Removed -* Removed the `block_synchronizer.stall_limit` node config parameter since it is no longer needed. +* Remove the `block_synchronizer.stall_limit` node config parameter since it is no longer needed. + ## 1.5.2 diff --git a/node/src/cli.rs b/node/src/cli.rs index ef3f72423c..5d19612810 100644 --- a/node/src/cli.rs +++ b/node/src/cli.rs @@ -149,7 +149,7 @@ impl Cli { // Setup UNIX signal hooks. setup_signal_hooks(); - let validator_config = Self::init(&config, config_ext)?; + let mut validator_config = Self::init(&config, config_ext)?; // We use a `ChaCha20Rng` for the production node. For one, we want to completely // eliminate any chance of runtime failures, regardless of how small (these @@ -172,6 +172,8 @@ impl Cli { bail!("invalid chainspec"); } + validator_config.value_mut().ensure_valid(&chainspec); + let network_identity = NetworkIdentity::from_config(WithDir::new( validator_config.dir(), validator_config.value().network.clone(), diff --git a/node/src/components/deploy_acceptor.rs b/node/src/components/deploy_acceptor.rs index 5d72690ef4..faf2c25331 100644 --- a/node/src/components/deploy_acceptor.rs +++ b/node/src/components/deploy_acceptor.rs @@ -1,5 +1,6 @@ #![allow(clippy::boxed_local)] // We use boxed locals to pass on event data unchanged. +mod config; mod event; mod metrics; mod tests; @@ -42,6 +43,7 @@ use crate::{ NodeRng, }; +pub(crate) use config::Config; pub(crate) use event::{Event, EventMetadata}; const COMPONENT_NAME: &str = "deploy_acceptor"; @@ -203,6 +205,7 @@ impl ReactorEventT for REv where /// ``` #[derive(Debug, DataSize)] pub struct DeployAcceptor { + acceptor_config: Config, chain_name: String, protocol_version: ProtocolVersion, deploy_config: DeployConfig, @@ -213,10 +216,12 @@ pub struct DeployAcceptor { impl DeployAcceptor { pub(crate) fn new( + acceptor_config: Config, chainspec: &Chainspec, registry: &Registry, ) -> Result { Ok(DeployAcceptor { + acceptor_config, chain_name: chainspec.network_config.name.clone(), protocol_version: chainspec.protocol_version(), deploy_config: chainspec.deploy_config, @@ -242,7 +247,7 @@ impl DeployAcceptor { &self.chain_name, &self.deploy_config, self.max_associated_keys, - verification_start_timestamp, + verification_start_timestamp + self.acceptor_config.timestamp_leeway, ); // checks chainspec values if let Err(error) = acceptable_result { @@ -251,7 +256,7 @@ impl DeployAcceptor { effect_builder, Box::new(EventMetadata::new(deploy, source, maybe_responder)), Error::InvalidDeployConfiguration(error), - verification_start_timestamp, + verification_start_timestamp + self.acceptor_config.timestamp_leeway, ); } diff --git a/node/src/components/deploy_acceptor/config.rs b/node/src/components/deploy_acceptor/config.rs new file mode 100644 index 0000000000..e747ee5b4a --- /dev/null +++ b/node/src/components/deploy_acceptor/config.rs @@ -0,0 +1,29 @@ +use std::str::FromStr; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use casper_types::TimeDiff; + +const DEFAULT_TIMESTAMP_LEEWAY: &str = "2sec"; + +/// Configuration options for accepting deploys. +#[derive(Copy, Clone, Serialize, Deserialize, Debug, DataSize)] +pub struct Config { + /// The leeway allowed when considering whether a deploy is future-dated or not. + /// + /// To accommodate minor clock drift, deploys whose timestamps are within `timestamp_leeway` in + /// the future are still acceptable. + /// + /// The maximum value to which `timestamp_leeway` can be set is defined by the chainspec + /// setting `deploys.max_timestamp_leeway`. + pub timestamp_leeway: TimeDiff, +} + +impl Default for Config { + fn default() -> Self { + Config { + timestamp_leeway: TimeDiff::from_str(DEFAULT_TIMESTAMP_LEEWAY).unwrap(), + } + } +} diff --git a/node/src/components/deploy_acceptor/tests.rs b/node/src/components/deploy_acceptor/tests.rs index 9ac7d780d0..bac91585e0 100644 --- a/node/src/components/deploy_acceptor/tests.rs +++ b/node/src/components/deploy_acceptor/tests.rs @@ -26,7 +26,7 @@ use casper_execution_engine::{ use casper_types::{ account::{Account, ActionThresholds, AssociatedKeys, Weight}, testing::TestRng, - CLValue, EraId, StoredValue, URef, U512, + CLValue, EraId, StoredValue, TimeDiff, URef, U512, }; use super::*; @@ -162,6 +162,8 @@ enum TestScenario { FromPeerSessionContract(ContractScenario), FromPeerSessionContractPackage(ContractPackageScenario), FromClientInvalidDeploy, + FromClientSlightlyFutureDatedDeploy, + FromClientFutureDatedDeploy, FromClientMissingAccount, FromClientInsufficientBalance, FromClientValidDeploy, @@ -203,6 +205,8 @@ impl TestScenario { Source::Peer(NodeId::random(rng)) } TestScenario::FromClientInvalidDeploy + | TestScenario::FromClientSlightlyFutureDatedDeploy + | TestScenario::FromClientFutureDatedDeploy | TestScenario::FromClientMissingAccount | TestScenario::FromClientInsufficientBalance | TestScenario::FromClientValidDeploy @@ -328,6 +332,18 @@ impl TestScenario { | TestScenario::ShouldNotAcceptExpiredDeploySentByClient => { Deploy::random_expired_deploy(rng) } + TestScenario::FromClientSlightlyFutureDatedDeploy => { + let timestamp = Timestamp::now() + (Config::default().timestamp_leeway / 2); + let ttl = TimeDiff::from_seconds(300); + Deploy::random_valid_native_transfer_with_timestamp_and_ttl(rng, timestamp, ttl) + } + TestScenario::FromClientFutureDatedDeploy => { + let timestamp = Timestamp::now() + + Config::default().timestamp_leeway + + TimeDiff::from_millis(100); + let ttl = TimeDiff::from_seconds(300); + Deploy::random_valid_native_transfer_with_timestamp_and_ttl(rng, timestamp, ttl) + } } } @@ -340,11 +356,13 @@ impl TestScenario { | TestScenario::FromPeerAccountWithInvalidAssociatedKeys // account check skipped if from peer | TestScenario::FromClientRepeatedValidDeploy | TestScenario::FromClientValidDeploy + | TestScenario::FromClientSlightlyFutureDatedDeploy | TestScenario::ShouldAcceptExpiredDeploySentByPeer=> true, TestScenario::FromPeerInvalidDeploy | TestScenario::FromClientInsufficientBalance | TestScenario::FromClientMissingAccount | TestScenario::FromClientInvalidDeploy + | TestScenario::FromClientFutureDatedDeploy | TestScenario::FromClientAccountWithInsufficientWeight | TestScenario::FromClientAccountWithInvalidAssociatedKeys | TestScenario::AccountWithUnknownBalance @@ -436,7 +454,8 @@ impl reactor::Reactor for Reactor { let (storage_config, storage_tempdir) = storage::Config::default_for_tests(); let storage_withdir = WithDir::new(storage_tempdir.path(), storage_config); - let deploy_acceptor = DeployAcceptor::new(chainspec.as_ref(), registry).unwrap(); + let deploy_acceptor = + DeployAcceptor::new(Config::default(), chainspec.as_ref(), registry).unwrap(); let storage = Storage::new( &storage_withdir, @@ -805,6 +824,7 @@ async fn run_deploy_acceptor_without_timeout( // Check that invalid deploys sent by a client raise the `InvalidDeploy` announcement // with the appropriate source. TestScenario::FromClientInvalidDeploy + | TestScenario::FromClientFutureDatedDeploy | TestScenario::FromClientMissingAccount | TestScenario::FromClientInsufficientBalance | TestScenario::FromClientAccountWithInvalidAssociatedKeys @@ -910,7 +930,8 @@ async fn run_deploy_acceptor_without_timeout( } // Check that a, new and valid, deploy sent by a client raises an `AcceptedNewDeploy` // announcement with the appropriate source. - TestScenario::FromClientValidDeploy => { + TestScenario::FromClientValidDeploy + | TestScenario::FromClientSlightlyFutureDatedDeploy => { matches!( event, Event::DeployAcceptorAnnouncement( @@ -1026,6 +1047,23 @@ async fn should_reject_invalid_deploy_from_client() { )) } +#[tokio::test] +async fn should_accept_slightly_future_dated_deploy_from_client() { + let result = run_deploy_acceptor(TestScenario::FromClientSlightlyFutureDatedDeploy).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_reject_future_dated_deploy_from_client() { + let result = run_deploy_acceptor(TestScenario::FromClientFutureDatedDeploy).await; + assert!(matches!( + result, + Err(super::Error::InvalidDeployConfiguration( + DeployConfigurationFailure::TimestampInFuture { .. } + )) + )) +} + #[tokio::test] async fn should_reject_valid_deploy_from_client_for_missing_account() { let result = run_deploy_acceptor(TestScenario::FromClientMissingAccount).await; diff --git a/node/src/components/deploy_buffer.rs b/node/src/components/deploy_buffer.rs index ff13431a54..713e2dbd79 100644 --- a/node/src/components/deploy_buffer.rs +++ b/node/src/components/deploy_buffer.rs @@ -365,9 +365,12 @@ impl DeployBuffer { self.dead.insert(deploy_hash); } AddError::InvalidDeploy => { - // it should not be possible for an invalid deploy to get buffered - // in the first place, thus this should be unreachable - error!( + // It should not generally be possible for an invalid deploy to get + // buffered in the first place, thus this should be unreachable. There + // is a small potential for a slightly future-dated deploy to be + // accepted (if within `timestamp_leeway`) and still be future-dated by + // the time we try and add it to a proposed block here. + warn!( ?deploy_hash, "DeployBuffer: invalid deploy in deploy buffer" ); diff --git a/node/src/lib.rs b/node/src/lib.rs index 2d2d5e53d3..c1478cb532 100644 --- a/node/src/lib.rs +++ b/node/src/lib.rs @@ -56,6 +56,7 @@ pub(crate) use components::{ block_synchronizer::Config as BlockSynchronizerConfig, consensus::Config as ConsensusConfig, contract_runtime::Config as ContractRuntimeConfig, + deploy_acceptor::Config as DeployAcceptorConfig, deploy_buffer::Config as DeployBufferConfig, diagnostics_port::Config as DiagnosticsPortConfig, event_stream_server::Config as EventStreamServerConfig, diff --git a/node/src/reactor/main_reactor.rs b/node/src/reactor/main_reactor.rs index 55f25dcaa2..d8c03a138a 100644 --- a/node/src/reactor/main_reactor.rs +++ b/node/src/reactor/main_reactor.rs @@ -1145,7 +1145,8 @@ impl reactor::Reactor for MainReactor { let block_validator = BlockValidator::new(Arc::clone(&chainspec)); let upgrade_watcher = UpgradeWatcher::new(chainspec.as_ref(), config.upgrade_watcher, &root_dir)?; - let deploy_acceptor = DeployAcceptor::new(chainspec.as_ref(), registry)?; + let deploy_acceptor = + DeployAcceptor::new(config.deploy_acceptor, chainspec.as_ref(), registry)?; let deploy_buffer = DeployBuffer::new(chainspec.deploy_config, config.deploy_buffer, registry)?; diff --git a/node/src/reactor/main_reactor/config.rs b/node/src/reactor/main_reactor/config.rs index b921c9fe54..584571e298 100644 --- a/node/src/reactor/main_reactor/config.rs +++ b/node/src/reactor/main_reactor/config.rs @@ -1,11 +1,14 @@ use datasize::DataSize; use serde::{Deserialize, Serialize}; +use tracing::error; use crate::{ - logging::LoggingConfig, types::NodeConfig, BlockAccumulatorConfig, BlockSynchronizerConfig, - ConsensusConfig, ContractRuntimeConfig, DeployBufferConfig, DiagnosticsPortConfig, - EventStreamServerConfig, FetcherConfig, GossipConfig, NetworkConfig, RestServerConfig, - RpcServerConfig, SpeculativeExecConfig, StorageConfig, UpgradeWatcherConfig, + logging::LoggingConfig, + types::{Chainspec, NodeConfig}, + BlockAccumulatorConfig, BlockSynchronizerConfig, ConsensusConfig, ContractRuntimeConfig, + DeployAcceptorConfig, DeployBufferConfig, DiagnosticsPortConfig, EventStreamServerConfig, + FetcherConfig, GossipConfig, NetworkConfig, RestServerConfig, RpcServerConfig, + SpeculativeExecConfig, StorageConfig, UpgradeWatcherConfig, }; /// Root configuration. @@ -37,6 +40,8 @@ pub struct Config { pub fetcher: FetcherConfig, /// Config values for the contract runtime. pub contract_runtime: ContractRuntimeConfig, + /// Config values for the deploy acceptor. + pub deploy_acceptor: DeployAcceptorConfig, /// Config values for the deploy buffer. pub deploy_buffer: DeployBufferConfig, /// Config values for the diagnostics port. @@ -48,3 +53,19 @@ pub struct Config { /// Config values for the upgrade watcher. pub upgrade_watcher: UpgradeWatcherConfig, } + +impl Config { + /// This modifies `self` so that all configured options are within the bounds set in the + /// provided chainspec. + pub(crate) fn ensure_valid(&mut self, chainspec: &Chainspec) { + if self.deploy_acceptor.timestamp_leeway > chainspec.deploy_config.max_timestamp_leeway { + error!( + configured_timestamp_leeway = %self.deploy_acceptor.timestamp_leeway, + max_timestamp_leeway = %chainspec.deploy_config.max_timestamp_leeway, + "setting value for 'deploy_acceptor.timestamp_leeway' to maximum permitted by \ + chainspec 'deploy_config.max_timestamp_leeway'", + ); + self.deploy_acceptor.timestamp_leeway = chainspec.deploy_config.max_timestamp_leeway; + } + } +} diff --git a/node/src/types/chainspec/deploy_config.rs b/node/src/types/chainspec/deploy_config.rs index 6564babf87..07f9e05b9b 100644 --- a/node/src/types/chainspec/deploy_config.rs +++ b/node/src/types/chainspec/deploy_config.rs @@ -34,6 +34,7 @@ pub struct DeployConfig { pub(crate) payment_args_max_length: u32, pub(crate) session_args_max_length: u32, pub(crate) native_transfer_minimum_motes: u64, + pub(crate) max_timestamp_leeway: TimeDiff, } impl DeployConfig { @@ -69,6 +70,7 @@ impl DeployConfig { let session_args_max_length = rng.gen(); let native_transfer_minimum_motes = rng.gen_range(MAX_PAYMENT_AMOUNT..1_000_000_000_000_000); + let max_timestamp_leeway = TimeDiff::from_seconds(rng.gen_range(0..6)); DeployConfig { max_payment_cost, @@ -83,6 +85,7 @@ impl DeployConfig { payment_args_max_length, session_args_max_length, native_transfer_minimum_motes, + max_timestamp_leeway, } } } @@ -103,6 +106,7 @@ impl Default for DeployConfig { payment_args_max_length: 1024, session_args_max_length: 1024, native_transfer_minimum_motes: MAX_PAYMENT_AMOUNT, + max_timestamp_leeway: TimeDiff::from_str("5sec").unwrap(), } } } @@ -122,6 +126,7 @@ impl ToBytes for DeployConfig { buffer.extend(self.payment_args_max_length.to_bytes()?); buffer.extend(self.session_args_max_length.to_bytes()?); buffer.extend(self.native_transfer_minimum_motes.to_bytes()?); + buffer.extend(self.max_timestamp_leeway.to_bytes()?); Ok(buffer) } @@ -138,6 +143,7 @@ impl ToBytes for DeployConfig { + self.payment_args_max_length.serialized_length() + self.session_args_max_length.serialized_length() + self.native_transfer_minimum_motes.serialized_length() + + self.max_timestamp_leeway.serialized_length() } } @@ -156,6 +162,7 @@ impl FromBytes for DeployConfig { let (payment_args_max_length, remainder) = u32::from_bytes(remainder)?; let (session_args_max_length, remainder) = u32::from_bytes(remainder)?; let (native_transfer_minimum_motes, remainder) = u64::from_bytes(remainder)?; + let (max_timestamp_leeway, remainder) = TimeDiff::from_bytes(remainder)?; let config = DeployConfig { max_payment_cost, max_ttl, @@ -169,6 +176,7 @@ impl FromBytes for DeployConfig { payment_args_max_length, session_args_max_length, native_transfer_minimum_motes, + max_timestamp_leeway, }; Ok((config, remainder)) } diff --git a/node/src/utils.rs b/node/src/utils.rs index 7ca3085f04..07e4c5f379 100644 --- a/node/src/utils.rs +++ b/node/src/utils.rs @@ -270,6 +270,11 @@ impl WithDir { &self.value } + /// Get a mutable reference to the inner value. + pub fn value_mut(&mut self) -> &mut T { + &mut self.value + } + /// Adds `self.dir` as a parent if `path` is relative, otherwise returns `path` unchanged. pub fn with_dir(&self, path: PathBuf) -> PathBuf { if path.is_relative() { diff --git a/resources/local/chainspec.toml.in b/resources/local/chainspec.toml.in index 09c940af7b..d0822a4f1b 100644 --- a/resources/local/chainspec.toml.in +++ b/resources/local/chainspec.toml.in @@ -24,12 +24,12 @@ maximum_net_message_size = 25_165_824 [core] # Era duration. -era_duration = '41seconds' +era_duration = '41 seconds' # Minimum number of blocks per era. An era will take longer than `era_duration` if that is necessary to reach the # minimum height. minimum_era_height = 5 # Minimum difference between a block's and its child's timestamp. -minimum_block_time = '4096ms' +minimum_block_time = '4096 ms' # Number of slots available in validator auction. validator_slots = 7 # A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer. @@ -48,7 +48,7 @@ legacy_required_finality = 'Strict' # you will be a validator in era N + auction_delay + 1. auction_delay = 1 # The period after genesis during which a genesis validator's bid is locked. -locked_funds_period = '90days' +locked_funds_period = '90 days' # The period in which genesis validator's bid is released over time after it's unlocked. vesting_schedule_period = '13 weeks' # Default number of eras that need to pass to be able to withdraw unbonded funds. @@ -76,7 +76,7 @@ max_delegators_per_validator = 0 [highway] # Highway dynamically chooses its round length, between minimum_block_time and maximum_round_length. -maximum_round_length = '525seconds' +maximum_round_length = '525 seconds' # The factor by which rewards for a round are multiplied if the greatest summit has ≤50% quorum, i.e. no finality. # Expressed as a fraction (1/5 by default). reduced_reward_multiplier = [1, 5] @@ -85,7 +85,7 @@ reduced_reward_multiplier = [1, 5] # The maximum number of Motes allowed to be spent during payment. 0 means unlimited. max_payment_cost = '0' # The duration after the deploy timestamp that it can be included in a block. -max_ttl = '18hours' +max_ttl = '18 hours' # The maximum number of other deploys a deploy can depend on (require to have been executed before it can execute). max_dependencies = 10 # Maximum block size in bytes including deploys contained by the block. 0 means unlimited. @@ -106,6 +106,8 @@ payment_args_max_length = 1024 session_args_max_length = 1024 # The minimum amount in motes for a valid native transfer. native_transfer_minimum_motes = 2_500_000_000 +# The maximum value to which `deploy_acceptor.timestamp_leeway` can be set in the config.toml file. +max_timestamp_leeway = '5 seconds' [wasm] # Amount of free memory (in 64kB pages) each contract can use for stack. diff --git a/resources/local/config.toml b/resources/local/config.toml index fb889b7de5..d63b52b594 100644 --- a/resources/local/config.toml +++ b/resources/local/config.toml @@ -10,23 +10,23 @@ sync_to_genesis = true # Idle time after which the syncing process is considered stalled. -idle_tolerance = '20min' +idle_tolerance = '20 minutes' # When the syncing process is considered stalled, it'll be retried up to `max_attempts` times. max_attempts = 3 # Default delay for the control events that have no dedicated delay requirements. -control_logic_default_delay = '1sec' +control_logic_default_delay = '1 second' # Flag which forces the node to resync all of the blocks. force_resync = false # A timeout for the ShutdownForUpgrade state, after which the node will upgrade even if not all # conditions are satisfied. -shutdown_for_upgrade_timeout = '2min' +shutdown_for_upgrade_timeout = '2 minutes' # Maximum time a node will wait for an upgrade to commit. -upgrade_timeout = '30sec' +upgrade_timeout = '30 seconds' # ================================= @@ -64,16 +64,16 @@ max_execution_delay = 3 [consensus.zug] # Request the latest protocol state from a random peer periodically, with this interval. -# '0sec' means it is disabled and we never request the protocol state from a peer. -sync_state_interval = '50ms' +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +sync_state_interval = '50 ms' # Log inactive or faulty validators periodically, with this interval. -# '0sec' means it is disabled and we never print the log message. -log_participation_interval = '1min' +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '1 minute' # The minimal proposal timeout. Validators wait this long for a proposal to receive a quorum of # echo messages, before they vote to make the round skippable and move on to the next proposer. -proposal_timeout = '10sec' +proposal_timeout = '10 seconds' # The additional proposal delay that is still considered fast enough, in percent. This should # take into account variables like empty vs. full blocks, network traffic etc. @@ -88,7 +88,7 @@ proposal_timeout_inertia = 10 # The maximum difference between validators' clocks we expect. Incoming proposals whose timestamp # lies in the future by more than that are rejected. -clock_tolerance = '1sec' +clock_tolerance = '1 second' # =========================================== @@ -97,19 +97,19 @@ clock_tolerance = '1sec' [consensus.highway] # The duration for which incoming vertices with missing dependencies should be kept in a queue. -pending_vertex_timeout = '1min' +pending_vertex_timeout = '1 minute' # Request the latest protocol state from a random peer periodically, with this interval. -# '0sec' means it is disabled and we never request the protocol state from a peer. -request_state_interval = '20sec' +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +request_state_interval = '20 seconds' # Log inactive or faulty validators periodically, with this interval. -# '0sec' means it is disabled and we never print the log message. -log_participation_interval = '15sec' +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '15 seconds' # Log the synchronizer state periodically, with this interval. -# '0sec' means it is disabled and we never print the log message. -log_synchronizer_interval = '5sec' +# '0 seconds' means it is disabled and we never print the log message. +log_synchronizer_interval = '5 seconds' # Log the size of every incoming and outgoing serialized unit. log_unit_sizes = false @@ -174,19 +174,19 @@ known_addresses = ['127.0.0.1:34553'] min_peers_for_initialization = 3 # The interval between each fresh round of gossiping the node's public address. -gossip_interval = '30sec' +gossip_interval = '30 seconds' # Initial delay for starting address gossipping after the network starts. This should be slightly # more than the expected time required for initial connections to complete. -initial_gossip_delay = '5sec' +initial_gossip_delay = '5 seconds' # How long a connection is allowed to be stuck as pending before it is abandoned. -max_addr_pending_time = '1min' +max_addr_pending_time = '1 minute' # Maximum time allowed for a connection handshake between two nodes to be completed. Connections # exceeding this threshold are considered unlikely to be healthy or even malicious and thus # terminated. -handshake_timeout = '20sec' +handshake_timeout = '20 seconds' # Maximum number of incoming connections per unique peer allowed. If the limit is hit, additional # connections will be rejected. A value of `0` means unlimited. @@ -216,7 +216,7 @@ max_in_flight_demands = 50 tarpit_version_threshold = '1.2.1' # How long to hold connections to trapped legacy nodes. -tarpit_duration = '10min' +tarpit_duration = '10 minutes' # The probability [0.0, 1.0] of this node trapping a legacy node. # @@ -227,7 +227,7 @@ tarpit_duration = '10min' tarpit_chance = 0.2 # How long peers remain blocked after they get blocklisted. -blocklist_retain_duration = '1min' +blocklist_retain_duration = '1 minute' # Identity of a node # @@ -449,20 +449,20 @@ saturation_limit_percent = 80 # # The longer they are retained, the lower the likelihood of re-gossiping a piece of data. However, # the longer they are retained, the larger the list of finished entries can grow. -finished_entry_duration = '1min' +finished_entry_duration = '1 minute' # The timeout duration for a single gossip request, i.e. for a single gossip message # sent from this node, it will be considered timed out if the expected response from that peer is # not received within this specified duration. -gossip_request_timeout = '10sec' +gossip_request_timeout = '10 seconds' # The timeout duration for retrieving the remaining part(s) of newly-discovered data # from a peer which gossiped information about that data to this node. -get_remainder_timeout = '5sec' +get_remainder_timeout = '5 seconds' # The timeout duration for a newly-received, gossiped item to be validated and stored by another # component before the gossiper abandons waiting to gossip the item onwards. -validate_and_store_timeout = '1min' +validate_and_store_timeout = '1 minute' # =============================================== @@ -474,10 +474,10 @@ validate_and_store_timeout = '1min' attempt_execution_threshold = 3 # Accepted time interval for inactivity in block accumulator. -dead_air_interval = '3min' +dead_air_interval = '3 minutes' # Time after which the block acceptors are considered old and can be purged. -purge_interval = '5min' +purge_interval = '5 minutes' # ================================================ @@ -489,16 +489,16 @@ purge_interval = '5min' max_parallel_trie_fetches = 5000 # Time interval for the node to ask for refreshed peers. -peer_refresh_interval = '90sec' +peer_refresh_interval = '90 seconds' # Time interval for the node to check what the block synchronizer needs to acquire next. -need_next_interval = '1sec' +need_next_interval = '1 second' # Time interval for recurring disconnection of dishonest peers. -disconnect_dishonest_peers_interval = '10sec' +disconnect_dishonest_peers_interval = '10 seconds' # Time interval for resetting the latch in block builders. -latch_reset_interval = '5sec' +latch_reset_interval = '5 seconds' # ================================== @@ -509,7 +509,7 @@ latch_reset_interval = '5sec' # The timeout duration for a single fetcher request, i.e. for a single fetcher message # sent from this node to another node, it will be considered timed out if the expected response from that peer is # not received within this specified duration. -get_from_peer_timeout = '10sec' +get_from_peer_timeout = '10 seconds' # ======================================================== @@ -535,13 +535,28 @@ max_query_depth = 5 enable_manual_sync = true +# ============================================= +# Configuration options for the deploy acceptor +# ============================================= +[deploy_acceptor] + +# The leeway allowed when considering whether a deploy is future-dated or not. +# +# To accommodate minor clock drift, deploys whose timestamps are within `timestamp_leeway` in the +# future are still acceptable. +# +# The maximum value to which `timestamp_leeway` can be set is defined by the chainspec setting +# `deploys.max_timestamp_leeway`. +timestamp_leeway = '2 seconds' + + # =========================================== # Configuration options for the deploy buffer # =========================================== [deploy_buffer] # The interval of checking for expired deploys. -expiry_check_interval = '1min' +expiry_check_interval = '1 minute' # ============================================== @@ -567,4 +582,4 @@ socket_umask = 0o077 [upgrade_watcher] # How often to scan file system for available upgrades. -upgrade_check_interval = '30sec' +upgrade_check_interval = '30 seconds' diff --git a/resources/production/chainspec.toml b/resources/production/chainspec.toml index 1925fe1df1..5847af59a8 100644 --- a/resources/production/chainspec.toml +++ b/resources/production/chainspec.toml @@ -24,12 +24,12 @@ maximum_net_message_size = 25_165_824 [core] # Era duration. -era_duration = '120min' +era_duration = '120 minutes' # Minimum number of blocks per era. An era will take longer than `era_duration` if that is necessary to reach the # minimum height. minimum_era_height = 20 # Minimum difference between a block's and its child's timestamp. -minimum_block_time = '32768ms' +minimum_block_time = '32768 ms' # Number of slots available in validator auction. validator_slots = 100 # A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer. @@ -48,7 +48,7 @@ legacy_required_finality = 'Strict' # you will be a validator in era N + auction_delay + 1. auction_delay = 1 # The period after genesis during which a genesis validator's bid is locked. -locked_funds_period = '90days' +locked_funds_period = '90 days' # The period in which genesis validator's bid is released over time after it's unlocked. vesting_schedule_period = '13 weeks' # Default number of eras that need to pass to be able to withdraw unbonded funds. @@ -83,7 +83,7 @@ max_delegators_per_validator = 1200 [highway] # Highway dynamically chooses its round length, between minimum_block_time and maximum_round_length. -maximum_round_length = '132seconds' +maximum_round_length = '132 seconds' # The factor by which rewards for a round are multiplied if the greatest summit has ≤50% quorum, i.e. no finality. # Expressed as a fraction (1/5 by default). reduced_reward_multiplier = [1, 5] @@ -92,7 +92,7 @@ reduced_reward_multiplier = [1, 5] # The maximum number of Motes allowed to be spent during payment. 0 means unlimited. max_payment_cost = '0' # The duration after the deploy timestamp that it can be included in a block. -max_ttl = '18hours' +max_ttl = '18 hours' # The maximum number of other deploys a deploy can depend on (require to have been executed before it can execute). max_dependencies = 10 # Maximum block size in bytes including deploys contained by the block. 0 means unlimited. @@ -113,6 +113,8 @@ payment_args_max_length = 1024 session_args_max_length = 1024 # The minimum amount in motes for a valid native transfer. native_transfer_minimum_motes = 2_500_000_000 +# The maximum value to which `deploy_acceptor.timestamp_leeway` can be set in the config.toml file. +max_timestamp_leeway = '5 seconds' [wasm] # Amount of free memory (in 64kB pages) each contract can use for stack. diff --git a/resources/production/config-example.toml b/resources/production/config-example.toml index 701d67fc75..093471b365 100644 --- a/resources/production/config-example.toml +++ b/resources/production/config-example.toml @@ -10,23 +10,23 @@ sync_to_genesis = true # Idle time after which the syncing process is considered stalled. -idle_tolerance = '20min' +idle_tolerance = '20 minutes' # When the syncing process is considered stalled, it'll be retried up to `max_attempts` times. max_attempts = 3 # Default delay for the control events that have no dedicated delay requirements. -control_logic_default_delay = '1sec' +control_logic_default_delay = '1 second' # Flag which forces the node to resync all of the blocks. force_resync = false # A timeout for the ShutdownForUpgrade state, after which the node will upgrade even if not all # conditions are satisfied. -shutdown_for_upgrade_timeout = '2min' +shutdown_for_upgrade_timeout = '2 minutes' # Maximum time a node will wait for an upgrade to commit. -upgrade_timeout = '30sec' +upgrade_timeout = '30 seconds' # ================================= @@ -64,16 +64,16 @@ max_execution_delay = 3 [consensus.zug] # Request the latest protocol state from a random peer periodically, with this interval. -# '0sec' means it is disabled and we never request the protocol state from a peer. -sync_state_interval = '1sec' +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +sync_state_interval = '1 second' # Log inactive or faulty validators periodically, with this interval. -# '0sec' means it is disabled and we never print the log message. -log_participation_interval = '1min' +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '1 minute' # The minimal proposal timeout. Validators wait this long for a proposal to receive a quorum of # echo messages, before they vote to make the round skippable and move on to the next proposer. -proposal_timeout = '10sec' +proposal_timeout = '10 seconds' # The additional proposal delay that is still considered fast enough, in percent. This should # take into account variables like empty vs. full blocks, network traffic etc. @@ -88,7 +88,7 @@ proposal_timeout_inertia = 10 # The maximum difference between validators' clocks we expect. Incoming proposals whose timestamp # lies in the future by more than that are rejected. -clock_tolerance = '1sec' +clock_tolerance = '1 second' # =========================================== @@ -97,19 +97,19 @@ clock_tolerance = '1sec' [consensus.highway] # The duration for which incoming vertices with missing dependencies should be kept in a queue. -pending_vertex_timeout = '30min' +pending_vertex_timeout = '30 minutes' # Request the latest protocol state from a random peer periodically, with this interval. -# '0sec' means it is disabled and we never request the protocol state from a peer. -request_state_interval = '20sec' +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +request_state_interval = '20 seconds' # Log inactive or faulty validators periodically, with this interval. -# '0sec' means it is disabled and we never print the log message. -log_participation_interval = '1min' +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '1 minute' # Log the synchronizer state periodically, with this interval. -# '0sec' means it is disabled and we never print the log message. -log_synchronizer_interval = '5sec' +# '0 seconds' means it is disabled and we never print the log message. +log_synchronizer_interval = '5 seconds' # Log the size of every incoming and outgoing serialized unit. log_unit_sizes = false @@ -174,19 +174,19 @@ known_addresses = ['168.119.137.143:35000','47.251.14.254:35000','47.242.53.164: min_peers_for_initialization = 3 # The interval between each fresh round of gossiping the node's public address. -gossip_interval = '120sec' +gossip_interval = '120 seconds' # Initial delay for starting address gossipping after the network starts. This should be slightly # more than the expected time required for initial connections to complete. -initial_gossip_delay = '5sec' +initial_gossip_delay = '5 seconds' # How long a connection is allowed to be stuck as pending before it is abandoned. -max_addr_pending_time = '1min' +max_addr_pending_time = '1 minute' # Maximum time allowed for a connection handshake between two nodes to be completed. Connections # exceeding this threshold are considered unlikely to be healthy or even malicious and thus # terminated. -handshake_timeout = '20sec' +handshake_timeout = '20 seconds' # Maximum number of incoming connections per unique peer allowed. If the limit is hit, additional # connections will be rejected. A value of `0` means unlimited. @@ -216,7 +216,7 @@ max_in_flight_demands = 50 tarpit_version_threshold = '1.2.1' # How long to hold connections to trapped legacy nodes. -tarpit_duration = '10min' +tarpit_duration = '10 minutes' # The probability [0.0, 1.0] of this node trapping a legacy node. # @@ -227,7 +227,7 @@ tarpit_duration = '10min' tarpit_chance = 0.2 # How long peers remain blocked after they get blocklisted. -blocklist_retain_duration = '10min' +blocklist_retain_duration = '10 minutes' # Identity of a node # @@ -449,20 +449,20 @@ saturation_limit_percent = 80 # # The longer they are retained, the lower the likelihood of re-gossiping a piece of data. However, # the longer they are retained, the larger the list of finished entries can grow. -finished_entry_duration = '1min' +finished_entry_duration = '1 minute' # The timeout duration for a single gossip request, i.e. for a single gossip message # sent from this node, it will be considered timed out if the expected response from that peer is # not received within this specified duration. -gossip_request_timeout = '30sec' +gossip_request_timeout = '30 seconds' # The timeout duration for retrieving the remaining part(s) of newly-discovered data # from a peer which gossiped information about that data to this node. -get_remainder_timeout = '5sec' +get_remainder_timeout = '5 seconds' # The timeout duration for a newly-received, gossiped item to be validated and stored by another # component before the gossiper abandons waiting to gossip the item onwards. -validate_and_store_timeout = '1min' +validate_and_store_timeout = '1 minute' # =============================================== @@ -474,10 +474,10 @@ validate_and_store_timeout = '1min' attempt_execution_threshold = 3 # Accepted time interval for inactivity in block accumulator. -dead_air_interval = '3min' +dead_air_interval = '3 minutes' # Time after which the block acceptors are considered old and can be purged. -purge_interval = '1min' +purge_interval = '1 minute' # ================================================ @@ -489,16 +489,16 @@ purge_interval = '1min' max_parallel_trie_fetches = 5000 # Time interval for the node to ask for refreshed peers. -peer_refresh_interval = '90sec' +peer_refresh_interval = '90 seconds' # Time interval for the node to check what the block synchronizer needs to acquire next. -need_next_interval = '1sec' +need_next_interval = '1 second' # Time interval for recurring disconnection of dishonest peers. -disconnect_dishonest_peers_interval = '10sec' +disconnect_dishonest_peers_interval = '10 seconds' # Time interval for resetting the latch in block builders. -latch_reset_interval = '5sec' +latch_reset_interval = '5 seconds' # ================================== @@ -509,7 +509,7 @@ latch_reset_interval = '5sec' # The timeout duration for a single fetcher request, i.e. for a single fetcher message # sent from this node to another node, it will be considered timed out if the expected response from that peer is # not received within this specified duration. -get_from_peer_timeout = '10sec' +get_from_peer_timeout = '10 seconds' # ======================================================== @@ -535,13 +535,28 @@ max_global_state_size = 2_089_072_132_096 #enable_manual_sync = true +# ============================================= +# Configuration options for the deploy acceptor +# ============================================= +[deploy_acceptor] + +# The leeway allowed when considering whether a deploy is future-dated or not. +# +# To accommodate minor clock drift, deploys whose timestamps are within `timestamp_leeway` in the +# future are still acceptable. +# +# The maximum value to which `timestamp_leeway` can be set is defined by the chainspec setting +# `deploys.max_timestamp_leeway`. +timestamp_leeway = '2 seconds' + + # =========================================== # Configuration options for the deploy buffer # =========================================== [deploy_buffer] # The interval of checking for expired deploys. -expiry_check_interval = '1min' +expiry_check_interval = '1 minute' # ============================================== @@ -567,4 +582,4 @@ socket_umask = 0o077 [upgrade_watcher] # How often to scan file system for available upgrades. -upgrade_check_interval = '30sec' +upgrade_check_interval = '30 seconds' diff --git a/resources/test/valid/0_9_0/chainspec.toml b/resources/test/valid/0_9_0/chainspec.toml index dbd9fed677..61e44f17f0 100644 --- a/resources/test/valid/0_9_0/chainspec.toml +++ b/resources/test/valid/0_9_0/chainspec.toml @@ -43,6 +43,7 @@ block_gas_limit = 13 payment_args_max_length = 1024 session_args_max_length = 1024 native_transfer_minimum_motes = 2_500_000_000 +max_timestamp_leeway = '5 seconds' [wasm] max_memory = 17 diff --git a/resources/test/valid/0_9_0_unordered/chainspec.toml b/resources/test/valid/0_9_0_unordered/chainspec.toml index e7cff551e0..44fe48fc91 100644 --- a/resources/test/valid/0_9_0_unordered/chainspec.toml +++ b/resources/test/valid/0_9_0_unordered/chainspec.toml @@ -43,6 +43,7 @@ block_gas_limit = 13 payment_args_max_length = 1024 session_args_max_length = 1024 native_transfer_minimum_motes = 2_500_000_000 +max_timestamp_leeway = '5 seconds' [wasm] max_memory = 17 diff --git a/resources/test/valid/1_0_0/chainspec.toml b/resources/test/valid/1_0_0/chainspec.toml index 9f456b6cce..dbae670ab7 100644 --- a/resources/test/valid/1_0_0/chainspec.toml +++ b/resources/test/valid/1_0_0/chainspec.toml @@ -43,6 +43,7 @@ block_gas_limit = 13 payment_args_max_length = 1024 session_args_max_length = 1024 native_transfer_minimum_motes = 2_500_000_000 +max_timestamp_leeway = '5 seconds' [wasm] max_memory = 17 From 9220fd80143c6000f2ef29d184a50984e7ef391d Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Wed, 20 Sep 2023 11:50:22 +0100 Subject: [PATCH 26/41] include timestamp leeway in error variant --- node/src/components/deploy_acceptor.rs | 3 +- node/src/types/appendable_block.rs | 18 +++++++++-- node/src/types/deploy.rs | 45 ++++++++++++++++++++++++-- node/src/types/deploy/deploy_header.rs | 6 ++-- node/src/types/deploy/error.rs | 5 ++- 5 files changed, 68 insertions(+), 9 deletions(-) diff --git a/node/src/components/deploy_acceptor.rs b/node/src/components/deploy_acceptor.rs index faf2c25331..80ffdc1691 100644 --- a/node/src/components/deploy_acceptor.rs +++ b/node/src/components/deploy_acceptor.rs @@ -247,7 +247,8 @@ impl DeployAcceptor { &self.chain_name, &self.deploy_config, self.max_associated_keys, - verification_start_timestamp + self.acceptor_config.timestamp_leeway, + self.acceptor_config.timestamp_leeway, + verification_start_timestamp, ); // checks chainspec values if let Err(error) = acceptable_result { diff --git a/node/src/types/appendable_block.rs b/node/src/types/appendable_block.rs index 5b7d51b78a..0de06595a1 100644 --- a/node/src/types/appendable_block.rs +++ b/node/src/types/appendable_block.rs @@ -3,7 +3,7 @@ use std::{ fmt::{self, Display, Formatter}, }; -use casper_types::{Gas, PublicKey, Timestamp}; +use casper_types::{Gas, PublicKey, TimeDiff, Timestamp}; use datasize::DataSize; use num_traits::Zero; use thiserror::Error; @@ -13,6 +13,8 @@ use crate::types::{ DeployHashWithApprovals, }; +const NO_LEEWAY: TimeDiff = TimeDiff::from_millis(0); + #[derive(Debug, Error)] pub(crate) enum AddError { #[error("would exceed maximum transfer count per block")] @@ -96,7 +98,12 @@ impl AppendableBlock { } if footprint .header - .is_valid(&self.deploy_config, self.timestamp, transfer.deploy_hash()) + .is_valid( + &self.deploy_config, + NO_LEEWAY, + self.timestamp, + transfer.deploy_hash(), + ) .is_err() { return Err(AddError::InvalidDeploy); @@ -131,7 +138,12 @@ impl AppendableBlock { } if footprint .header - .is_valid(&self.deploy_config, self.timestamp, deploy.deploy_hash()) + .is_valid( + &self.deploy_config, + NO_LEEWAY, + self.timestamp, + deploy.deploy_hash(), + ) .is_err() { return Err(AddError::InvalidDeploy); diff --git a/node/src/types/deploy.rs b/node/src/types/deploy.rs index f73ce17908..efd68e2a9a 100644 --- a/node/src/types/deploy.rs +++ b/node/src/types/deploy.rs @@ -297,6 +297,7 @@ impl Deploy { chain_name: &str, config: &DeployConfig, max_associated_keys: u32, + timestamp_leeway: TimeDiff, at: Timestamp, ) -> Result<(), DeployConfigurationFailure> { self.is_valid_size(config.max_deploy_size)?; @@ -315,7 +316,7 @@ impl Deploy { }); } - header.is_valid(config, at, &self.hash)?; + header.is_valid(config, timestamp_leeway, at, &self.hash)?; if self.approvals.len() > max_associated_keys as usize { debug!( @@ -1254,6 +1255,7 @@ mod tests { chain_name, &deploy_config, DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), current_timestamp, ) .expect("should be acceptable"); @@ -1284,6 +1286,7 @@ mod tests { expected_chain_name, &deploy_config, DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), current_timestamp ), Err(expected_error) @@ -1320,6 +1323,7 @@ mod tests { chain_name, &deploy_config, DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), current_timestamp ), Err(expected_error) @@ -1356,6 +1360,7 @@ mod tests { chain_name, &deploy_config, DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), current_timestamp ), Err(expected_error) @@ -1371,6 +1376,7 @@ mod tests { let mut rng = crate::new_rng(); let chain_name = "net-1"; let deploy_config = DeployConfig::default(); + let leeway = TimeDiff::from_seconds(2); let deploy = create_deploy( &mut rng, @@ -1378,10 +1384,11 @@ mod tests { deploy_config.max_dependencies.into(), chain_name, ); - let current_timestamp = deploy.header.timestamp() - TimeDiff::from_seconds(1); + let current_timestamp = deploy.header.timestamp() - leeway - TimeDiff::from_seconds(1); let expected_error = DeployConfigurationFailure::TimestampInFuture { validation_timestamp: current_timestamp, + timestamp_leeway: leeway, got: deploy.header.timestamp(), }; @@ -1390,6 +1397,7 @@ mod tests { chain_name, &deploy_config, DEFAULT_MAX_ASSOCIATED_KEYS, + leeway, current_timestamp ), Err(expected_error) @@ -1400,6 +1408,31 @@ mod tests { ); } + #[test] + fn acceptable_if_timestamp_slightly_in_future() { + let mut rng = crate::new_rng(); + let chain_name = "net-1"; + let deploy_config = DeployConfig::default(); + let leeway = TimeDiff::from_seconds(2); + + let deploy = create_deploy( + &mut rng, + deploy_config.max_ttl, + deploy_config.max_dependencies.into(), + chain_name, + ); + let current_timestamp = deploy.header.timestamp() - (leeway / 2); + deploy + .is_config_compliant( + chain_name, + &deploy_config, + DEFAULT_MAX_ASSOCIATED_KEYS, + leeway, + current_timestamp, + ) + .expect("should be acceptable"); + } + #[test] fn not_acceptable_due_to_missing_payment_amount() { let mut rng = crate::new_rng(); @@ -1435,6 +1468,7 @@ mod tests { chain_name, &deploy_config, DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), current_timestamp ), Err(DeployConfigurationFailure::MissingPaymentAmount) @@ -1482,6 +1516,7 @@ mod tests { chain_name, &deploy_config, DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), current_timestamp ), Err(DeployConfigurationFailure::FailedToParsePaymentAmount) @@ -1535,6 +1570,7 @@ mod tests { chain_name, &deploy_config, DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), current_timestamp ), Err(expected_error) @@ -1589,6 +1625,7 @@ mod tests { chain_name, &deploy_config, DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), current_timestamp ) ) @@ -1618,6 +1655,7 @@ mod tests { chain_name, &deploy_config, max_associated_keys, + TimeDiff::default(), current_timestamp ) ) @@ -1648,6 +1686,7 @@ mod tests { chain_name, &deploy_config, DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), current_timestamp ) ) @@ -1682,6 +1721,7 @@ mod tests { chain_name, &deploy_config, DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), current_timestamp ) ) @@ -1722,6 +1762,7 @@ mod tests { chain_name, &deploy_config, DEFAULT_MAX_ASSOCIATED_KEYS, + TimeDiff::default(), current_timestamp ) ) diff --git a/node/src/types/deploy/deploy_header.rs b/node/src/types/deploy/deploy_header.rs index 2a974373d3..8250668082 100644 --- a/node/src/types/deploy/deploy_header.rs +++ b/node/src/types/deploy/deploy_header.rs @@ -93,10 +93,11 @@ impl DeployHeader { } /// Returns Ok if and only if the dependencies count and TTL are within limits, and the - /// timestamp is not later than `at`. Does NOT check for expiry. + /// timestamp is not later than `at + timestamp_leeway`. Does NOT check for expiry. pub fn is_valid( &self, config: &DeployConfig, + timestamp_leeway: TimeDiff, at: Timestamp, deploy_hash: &DeployHash, ) -> Result<(), DeployConfigurationFailure> { @@ -126,10 +127,11 @@ impl DeployHeader { }); } - if self.timestamp() > at { + if self.timestamp() > at + timestamp_leeway { debug!(%deploy_hash, deploy_header = %self, %at, "deploy timestamp in the future"); return Err(DeployConfigurationFailure::TimestampInFuture { validation_timestamp: at, + timestamp_leeway, got: self.timestamp(), }); } diff --git a/node/src/types/deploy/error.rs b/node/src/types/deploy/error.rs index f95c88cc36..f70ea6d676 100644 --- a/node/src/types/deploy/error.rs +++ b/node/src/types/deploy/error.rs @@ -42,11 +42,14 @@ pub enum DeployConfigurationFailure { /// Deploy's timestamp is in the future. #[error( - "timestamp of {got} is later than node's validation timestamp of {validation_timestamp}" + "timestamp of {got} is later than node's validation timestamp of {validation_timestamp} \ + plus leeway of {timestamp_leeway}" )] TimestampInFuture { /// The node's timestamp when validating the deploy. validation_timestamp: Timestamp, + /// Any configured leeway added to `validation_timestamp`. + timestamp_leeway: TimeDiff, /// The deploy's timestamp. got: Timestamp, }, From bd8c43990ab294c0b8adb0b9d6e8fae6e04c2346 Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Tue, 19 Sep 2023 17:05:59 +0100 Subject: [PATCH 27/41] add further logging to BlockValidator --- node/src/components/block_validator.rs | 50 ++++++++++++++++++-------- node/src/types/appendable_block.rs | 4 +++ 2 files changed, 40 insertions(+), 14 deletions(-) diff --git a/node/src/components/block_validator.rs b/node/src/components/block_validator.rs index 264b2c4b39..81cd71d3fb 100644 --- a/node/src/components/block_validator.rs +++ b/node/src/components/block_validator.rs @@ -12,7 +12,7 @@ mod keyed_counter; mod tests; use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, + collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap}, fmt::Debug, sync::Arc, }; @@ -21,7 +21,7 @@ use datasize::DataSize; use derive_more::{Display, From}; use itertools::Itertools; use smallvec::{smallvec, SmallVec}; -use tracing::{info, warn}; +use tracing::{debug, info, warn}; use casper_types::Timestamp; @@ -186,6 +186,7 @@ where sender, responder, }) => { + debug!(?block, "validating proposed block"); if block.deploy_hashes().count() > self.chainspec.deploy_config.block_max_deploy_count as usize { @@ -211,20 +212,31 @@ where } let block_timestamp = block.timestamp(); - let state = self - .validation_states - .entry(block) - .or_insert(BlockValidationState { - appendable_block: AppendableBlock::new( - self.chainspec.deploy_config, - block_timestamp, - ), - missing_deploys: block_deploys.clone(), - responders: smallvec![], - }); + let state = match self.validation_states.entry(block) { + Entry::Occupied(entry) => { + let state = entry.into_mut(); + debug!(?state, "already validating this proposed block"); + state + } + Entry::Vacant(entry) => { + let state = BlockValidationState { + appendable_block: AppendableBlock::new( + self.chainspec.deploy_config, + block_timestamp, + ), + missing_deploys: block_deploys.clone(), + responders: smallvec![], + }; + entry.insert(state) + } + }; if state.missing_deploys.is_empty() { - // Block has already been validated successfully, early return to caller. + debug!( + block_timestamp = %state.appendable_block.timestamp(), + "no missing deploys - block validation complete" + ); + // Block has already been validated successfully or has no deploys. return responder.respond(true).ignore(); } @@ -272,6 +284,12 @@ where info!(block = ?key, %dt_hash, ?deploy_footprint, ?err, "block invalid"); invalid.push(key.clone()); } + debug!( + block_timestamp = %state.appendable_block.timestamp(), + deploy_hash = %dt_hash, + missing_deploy_count = %state.missing_deploys.len(), + "found deploy for block validation" + ); } } @@ -284,6 +302,10 @@ where if state.missing_deploys.is_empty() { // This one is done and valid. effects.extend(state.respond(true)); + debug!( + block_timestamp = %state.appendable_block.timestamp(), + "no further missing deploys - block validation complete" + ); return false; } true diff --git a/node/src/types/appendable_block.rs b/node/src/types/appendable_block.rs index 0de06595a1..ca0944d500 100644 --- a/node/src/types/appendable_block.rs +++ b/node/src/types/appendable_block.rs @@ -189,6 +189,10 @@ impl AppendableBlock { BlockPayload::new(deploys, transfers, accusations, random_bit) } + pub(crate) fn timestamp(&self) -> Timestamp { + self.timestamp + } + /// Returns `true` if the number of transfers is already the maximum allowed count, i.e. no /// more transfers can be added to this block. fn has_max_transfer_count(&self) -> bool { From 13672ccc0e351d78d5b829042dd1174771160031 Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Wed, 20 Sep 2023 12:52:04 +0100 Subject: [PATCH 28/41] improve logging in BlockValidator --- node/src/components/block_validator.rs | 50 ++++++++++++++++---------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/node/src/components/block_validator.rs b/node/src/components/block_validator.rs index 81cd71d3fb..3950b43b9f 100644 --- a/node/src/components/block_validator.rs +++ b/node/src/components/block_validator.rs @@ -13,7 +13,7 @@ mod tests; use std::{ collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap}, - fmt::Debug, + fmt::{self, Debug, Display, Formatter}, sync::Arc, }; @@ -123,6 +123,18 @@ impl BlockValidationState { } } +impl Display for BlockValidationState { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "BlockValidationState({}, {} missing deploys, {} responders)", + self.appendable_block, + self.missing_deploys.len(), + self.responders.len() + ) + } +} + #[derive(DataSize, Debug)] pub(crate) struct BlockValidator { /// Chainspec loaded for deploy validation. @@ -157,7 +169,7 @@ impl BlockValidator { }) .join(", "); info!( - peer_id=?sender, %duplicates, + peer_id=%sender, %duplicates, "received invalid block containing duplicated deploys" ); } @@ -186,7 +198,7 @@ where sender, responder, }) => { - debug!(?block, "validating proposed block"); + debug!(%sender, %block, "validating proposed block"); if block.deploy_hashes().count() > self.chainspec.deploy_config.block_max_deploy_count as usize { @@ -215,7 +227,7 @@ where let state = match self.validation_states.entry(block) { Entry::Occupied(entry) => { let state = entry.into_mut(); - debug!(?state, "already validating this proposed block"); + debug!(%state, "already validating this proposed block"); state } Entry::Vacant(entry) => { @@ -281,15 +293,10 @@ where } }; if let Err(err) = add_result { - info!(block = ?key, %dt_hash, ?deploy_footprint, ?err, "block invalid"); + info!(block = %key, %dt_hash, ?deploy_footprint, %err, "block invalid"); invalid.push(key.clone()); } - debug!( - block_timestamp = %state.appendable_block.timestamp(), - deploy_hash = %dt_hash, - missing_deploy_count = %state.missing_deploys.len(), - "found deploy for block validation" - ); + debug!(deploy_hash = %dt_hash, %state, "found deploy for block validation"); } } @@ -325,7 +332,11 @@ where } // Notify everyone still waiting on it that all is lost. - info!(block = ?key, %dt_hash, "could not validate the deploy. block is invalid"); + info!( + block = %key, + %dt_hash, + "could not validate the deploy. block is invalid" + ); // This validation state contains a deploy hash we failed to fetch from all // sources, it can never succeed. effects.extend(state.respond(false)); @@ -341,7 +352,8 @@ where if state.missing_deploys.contains_key(&dt_hash) { // Notify everyone still waiting on it that all is lost. info!( - block = ?key, %dt_hash, + block = %key, + %dt_hash, "could not convert deploy to deploy type. block is invalid" ); // This validation state contains a failed deploy hash, it can never @@ -390,9 +402,9 @@ where }; if deploy.deploy_or_transfer_hash() != dt_hash { warn!( - deploy = ?deploy, - expected_deploy_or_transfer_hash = ?dt_hash, - actual_deploy_or_transfer_hash = ?deploy.deploy_or_transfer_hash(), + deploy = %deploy, + expected_deploy_or_transfer_hash = %dt_hash, + actual_deploy_or_transfer_hash = %deploy.deploy_or_transfer_hash(), "Deploy has incorrect transfer hash" ); return Event::CannotConvertDeploy(dt_hash); @@ -404,9 +416,9 @@ where }, Err(error) => { warn!( - deploy = ?deploy, - deploy_or_transfer_hash = ?dt_hash, - ?error, + deploy = %deploy, + deploy_or_transfer_hash = %dt_hash, + %error, "Could not convert deploy", ); Event::CannotConvertDeploy(dt_hash) From 7ae34f3954a72e0c57184b5ca082cbbf12f395fc Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Wed, 20 Sep 2023 14:50:30 +0100 Subject: [PATCH 29/41] change BlockValidator to fetch deploys rather than legacy deploys --- node/src/components/block_validator.rs | 61 ++++++++++++-------- node/src/components/block_validator/tests.rs | 13 +++-- 2 files changed, 46 insertions(+), 28 deletions(-) diff --git a/node/src/components/block_validator.rs b/node/src/components/block_validator.rs index 3950b43b9f..17bc64f81a 100644 --- a/node/src/components/block_validator.rs +++ b/node/src/components/block_validator.rs @@ -36,8 +36,9 @@ use crate::{ EffectBuilder, EffectExt, Effects, Responder, }, types::{ - appendable_block::AppendableBlock, Approval, Chainspec, Deploy, DeployFootprint, - DeployHash, DeployHashWithApprovals, DeployOrTransferHash, LegacyDeploy, NodeId, + appendable_block::AppendableBlock, Approval, ApprovalsHash, Chainspec, Deploy, + DeployFootprint, DeployHash, DeployHashWithApprovals, DeployId, DeployOrTransferHash, + NodeId, }, NodeRng, }; @@ -179,7 +180,7 @@ impl Component for BlockValidator where REv: From + From - + From> + + From> + From + Send, { @@ -223,6 +224,20 @@ where return responder.respond(false).ignore(); } + // Prepare all approvals hashes. + let mut deploy_ids = HashMap::new(); + for (dt_hash, approvals) in block_deploys.iter() { + match ApprovalsHash::compute(approvals) { + Ok(approvals_hash) => { + deploy_ids.insert(*dt_hash, approvals_hash); + } + Err(error) => { + warn!(%dt_hash, %error, "could not compute approvals hash"); + return responder.respond(false).ignore(); + } + } + } + let block_timestamp = block.timestamp(); let state = match self.validation_states.entry(block) { Entry::Occupied(entry) => { @@ -236,7 +251,7 @@ where self.chainspec.deploy_config, block_timestamp, ), - missing_deploys: block_deploys.clone(), + missing_deploys: block_deploys, responders: smallvec![], }; entry.insert(state) @@ -255,12 +270,16 @@ where // We register ourselves as someone interested in the ultimate validation result. state.responders.push(responder); - effects.extend(block_deploys.into_iter().flat_map(|(dt_hash, _)| { - // For every request, increase the number of in-flight... - self.in_flight.inc(&dt_hash.into()); - // ...then request it. - fetch_deploy(effect_builder, dt_hash, sender) - })); + effects.extend( + deploy_ids + .into_iter() + .flat_map(|(dt_hash, approvals_hash)| { + // For every request, increase the number of in-flight... + self.in_flight.inc(&dt_hash.into()); + // ...then request it. + fetch_deploy(effect_builder, dt_hash, approvals_hash, sender) + }), + ); } Event::DeployFound { dt_hash, @@ -378,25 +397,21 @@ where fn fetch_deploy( effect_builder: EffectBuilder, dt_hash: DeployOrTransferHash, + approvals_hash: ApprovalsHash, sender: NodeId, ) -> Effects where - REv: From + From> + Send, + REv: From + From> + Send, { async move { - let deploy_hash: DeployHash = dt_hash.into(); + let deploy_id = DeployId::new(dt_hash.into(), approvals_hash); let deploy = match effect_builder - .fetch::(deploy_hash, sender, Box::new(EmptyValidationMetadata)) + .fetch::(deploy_id, sender, Box::new(EmptyValidationMetadata)) .await { - Ok(FetchedData::FromStorage { item }) | Ok(FetchedData::FromPeer { item, .. }) => { - Deploy::from(*item) - } - Err(fetcher_error) => { - warn!( - "Could not fetch deploy with deploy hash {}: {}", - deploy_hash, fetcher_error - ); + Ok(FetchedData::FromStorage { item }) | Ok(FetchedData::FromPeer { item, .. }) => *item, + Err(error) => { + warn!(%deploy_id, %error, "could not fetch deploy"); return Event::DeployMissing(dt_hash); } }; @@ -405,7 +420,7 @@ where deploy = %deploy, expected_deploy_or_transfer_hash = %dt_hash, actual_deploy_or_transfer_hash = %deploy.deploy_or_transfer_hash(), - "Deploy has incorrect transfer hash" + "deploy has incorrect hash" ); return Event::CannotConvertDeploy(dt_hash); } @@ -419,7 +434,7 @@ where deploy = %deploy, deploy_or_transfer_hash = %dt_hash, %error, - "Could not convert deploy", + "could not convert deploy", ); Event::CannotConvertDeploy(dt_hash) } diff --git a/node/src/components/block_validator/tests.rs b/node/src/components/block_validator/tests.rs index 51f05ace00..c9622b40a4 100644 --- a/node/src/components/block_validator/tests.rs +++ b/node/src/components/block_validator/tests.rs @@ -13,7 +13,10 @@ use derive_more::From; use itertools::Itertools; use crate::{ - components::{consensus::BlockContext, fetcher}, + components::{ + consensus::BlockContext, + fetcher::{self, FetchItem}, + }, reactor::{EventQueueHandle, QueueKind, Scheduler}, types::{BlockPayload, ChainspecRawBytes, DeployHashWithApprovals}, utils::{self, Loadable}, @@ -26,7 +29,7 @@ enum ReactorEvent { #[from] BlockValidator(Event), #[from] - Fetcher(FetcherRequest), + Fetcher(FetcherRequest), #[from] Storage(StorageRequest), } @@ -73,15 +76,15 @@ impl MockReactor { { if let Some((position, _)) = deploys_to_fetch .iter() - .find_position(|deploy| *deploy.hash() == id) + .find_position(|deploy| deploy.fetch_id() == id) { let deploy = deploys_to_fetch.remove(position); let response = FetchedData::FromPeer { - item: Box::new(LegacyDeploy::from(deploy)), + item: Box::new(deploy), peer, }; responder.respond(Ok(response)).await; - } else if deploys_to_not_fetch.remove(&id) { + } else if deploys_to_not_fetch.remove(id.deploy_hash()) { responder .respond(Err(fetcher::Error::Absent { id: Box::new(id), From 49bb21c14c5e5df0da33b11a8d6eb12fab6f3177 Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Thu, 21 Sep 2023 05:40:09 +0100 Subject: [PATCH 30/41] major overhaul of BlockValidator to improve performance --- node/CHANGELOG.md | 2 + node/src/components/block_validator.rs | 631 ++++++------ node/src/components/block_validator/config.rs | 16 + node/src/components/block_validator/event.rs | 19 + .../block_validator/keyed_counter.rs | 111 --- node/src/components/block_validator/state.rs | 941 ++++++++++++++++++ node/src/components/block_validator/tests.rs | 128 +-- node/src/lib.rs | 1 + node/src/reactor/main_reactor.rs | 2 +- node/src/reactor/main_reactor/config.rs | 10 +- node/src/types/appendable_block.rs | 2 +- resources/local/config.toml | 12 + resources/production/config-example.toml | 12 + 13 files changed, 1372 insertions(+), 515 deletions(-) create mode 100644 node/src/components/block_validator/config.rs create mode 100644 node/src/components/block_validator/event.rs delete mode 100644 node/src/components/block_validator/keyed_counter.rs create mode 100644 node/src/components/block_validator/state.rs diff --git a/node/CHANGELOG.md b/node/CHANGELOG.md index 2ef267f486..6e1f74d5ad 100644 --- a/node/CHANGELOG.md +++ b/node/CHANGELOG.md @@ -16,9 +16,11 @@ All notable changes to this project will be documented in this file. The format ### Added * Add `deploy_acceptor` section to config with a single option `timestamp_leeway` to allow a small leeway when deciding if a deploy is future-dated. * Add `deploys.max_timestamp_leeway` chainspec option to define the upper limit for the new config option `deploy_acceptor.timestamp_leeway`. +* Add `block_validator.max_completed_entries` config option to control the number of recently validated proposed blocks to retain. ### Changed * Change the limit of the `core_config.simultaneous_peer_requests` chainspec parameter to 255. +* Optimize the `BlockValidator` component to reduce the number of simultaneous fetch events created for a given proposed block. ### Fixed * Fix issue in `chain_get_block_transfers` JSON-RPC where blocks with no deploys could be reported as having `null` transfers rather than `[]`. diff --git a/node/src/components/block_validator.rs b/node/src/components/block_validator.rs index 17bc64f81a..6d59fc72c8 100644 --- a/node/src/components/block_validator.rs +++ b/node/src/components/block_validator.rs @@ -7,28 +7,23 @@ //! true if valid, but only fail if all sources have been exhausted. This is only relevant when //! calling for validation of the same proposed block multiple times at the same time. -mod keyed_counter; +mod config; +mod event; +mod state; #[cfg(test)] mod tests; -use std::{ - collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap}, - fmt::{self, Debug, Display, Formatter}, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; use datasize::DataSize; -use derive_more::{Display, From}; -use itertools::Itertools; -use smallvec::{smallvec, SmallVec}; -use tracing::{debug, info, warn}; +use tracing::{debug, error, warn}; use casper_types::Timestamp; use crate::{ components::{ consensus::{ClContext, ProposedBlock}, - fetcher::{EmptyValidationMetadata, FetchedData}, + fetcher::{self, EmptyValidationMetadata, FetchResult, FetchedData}, Component, }, effect::{ @@ -36,13 +31,14 @@ use crate::{ EffectBuilder, EffectExt, Effects, Responder, }, types::{ - appendable_block::AppendableBlock, Approval, ApprovalsHash, Chainspec, Deploy, - DeployFootprint, DeployHash, DeployHashWithApprovals, DeployId, DeployOrTransferHash, + ApprovalsHash, Chainspec, Deploy, DeployHashWithApprovals, DeployId, DeployOrTransferHash, NodeId, }, NodeRng, }; -use keyed_counter::KeyedCounter; +pub use config::Config; +pub(crate) use event::Event; +use state::{AddResponderResult, BlockValidationState, MaybeStartFetching}; const COMPONENT_NAME: &str = "block_validator"; @@ -51,89 +47,21 @@ impl ProposedBlock { self.context().timestamp() } - fn deploy_hashes(&self) -> impl Iterator + '_ { - self.value().deploy_hashes() - } - - fn transfer_hashes(&self) -> impl Iterator + '_ { - self.value().transfer_hashes() - } - - fn deploys_and_transfers_iter( - &self, - ) -> impl Iterator)> + '_ { - let deploys = self.value().deploys().iter().map(|dwa| { - ( - DeployOrTransferHash::Deploy(*dwa.deploy_hash()), - dwa.approvals().clone(), - ) - }); - let transfers = self.value().transfers().iter().map(|dwa| { - ( - DeployOrTransferHash::Transfer(*dwa.deploy_hash()), - dwa.approvals().clone(), - ) - }); - deploys.chain(transfers) + fn deploys(&self) -> &Vec { + self.value().deploys() } -} - -/// Block validator component event. -#[derive(Debug, From, Display)] -pub(crate) enum Event { - /// A request made of the block validator component. - #[from] - Request(BlockValidationRequest), - - /// A deploy has been successfully found. - #[display(fmt = "{} found", dt_hash)] - DeployFound { - dt_hash: DeployOrTransferHash, - deploy_footprint: Box, - }, - - /// A request to find a specific deploy, potentially from a peer, failed. - #[display(fmt = "{} missing", _0)] - DeployMissing(DeployOrTransferHash), - - /// Deploy was invalid. Unable to convert to a deploy type. - #[display(fmt = "{} invalid", _0)] - CannotConvertDeploy(DeployOrTransferHash), -} - -/// State of the current process of block validation. -/// -/// Tracks whether or not there are deploys still missing and who is interested in the final result. -#[derive(DataSize, Debug)] -pub(crate) struct BlockValidationState { - /// Appendable block ensuring that the deploys satisfy the validity conditions. - appendable_block: AppendableBlock, - /// The set of approvals contains approvals from deploys that would be finalized with the - /// block. - missing_deploys: HashMap>, - /// A list of responders that are awaiting an answer. - responders: SmallVec<[Responder; 2]>, -} -impl BlockValidationState { - fn respond(&mut self, value: bool) -> Effects { - self.responders - .drain(..) - .flat_map(|responder| responder.respond(value).ignore()) - .collect() + fn transfers(&self) -> &Vec { + self.value().transfers() } } -impl Display for BlockValidationState { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - write!( - formatter, - "BlockValidationState({}, {} missing deploys, {} responders)", - self.appendable_block, - self.missing_deploys.len(), - self.responders.len() - ) - } +/// The return type of trying to handle a validation request as an already-existing request. +enum MaybeHandled { + /// The request is already being handled - return the wrapped effects and finish. + Handled(Effects), + /// The request is new - it still needs to be handled. + NotHandled(BlockValidationRequest), } #[derive(DataSize, Debug)] @@ -141,251 +69,293 @@ pub(crate) struct BlockValidator { /// Chainspec loaded for deploy validation. #[data_size(skip)] chainspec: Arc, + config: Config, /// State of validation of a specific block. validation_states: HashMap, BlockValidationState>, - /// Number of requests for a specific deploy hash still in flight. - in_flight: KeyedCounter, } impl BlockValidator { /// Creates a new block validator instance. - pub(crate) fn new(chainspec: Arc) -> Self { + pub(crate) fn new(chainspec: Arc, config: Config) -> Self { BlockValidator { chainspec, + config, validation_states: HashMap::new(), - in_flight: KeyedCounter::default(), - } - } - - /// Prints a log message about an invalid block with duplicated deploys. - fn log_block_with_replay(&self, sender: NodeId, block: &ProposedBlock) { - let mut deploy_counts = BTreeMap::new(); - for (dt_hash, _) in block.deploys_and_transfers_iter() { - *deploy_counts.entry(dt_hash).or_default() += 1; } - let duplicates = deploy_counts - .into_iter() - .filter_map(|(dt_hash, count): (DeployOrTransferHash, usize)| { - (count > 1).then(|| format!("{} * {}", count, dt_hash)) - }) - .join(", "); - info!( - peer_id=%sender, %duplicates, - "received invalid block containing duplicated deploys" - ); } -} -impl Component for BlockValidator -where - REv: From - + From - + From> - + From - + Send, -{ - type Event = Event; - - fn handle_event( + /// If the request is already being handled, we record the new info and return effects. If not, + /// the request is returned for processing as a new request. + fn try_handle_as_existing_request( &mut self, effect_builder: EffectBuilder, - _rng: &mut NodeRng, - event: Self::Event, - ) -> Effects { - let mut effects = Effects::new(); - match event { - Event::Request(BlockValidationRequest { + request: BlockValidationRequest, + ) -> MaybeHandled + where + REv: From + From> + Send, + { + if let Some(state) = self.validation_states.get_mut(&request.block) { + let BlockValidationRequest { block, sender, responder, - }) => { - debug!(%sender, %block, "validating proposed block"); - if block.deploy_hashes().count() - > self.chainspec.deploy_config.block_max_deploy_count as usize - { - return responder.respond(false).ignore(); + } = request; + debug!(%sender, %block, "already validating proposed block"); + match state.add_responder(responder) { + AddResponderResult::Added => {} + AddResponderResult::ValidationCompleted { + responder, + response_to_send, + } => { + debug!(%response_to_send, "proposed block validation already completed"); + return MaybeHandled::Handled(responder.respond(response_to_send).ignore()); } - if block.transfer_hashes().count() - > self.chainspec.deploy_config.block_max_transfer_count as usize - { - return responder.respond(false).ignore(); - } - - let deploy_count = block.deploy_hashes().count() + block.transfer_hashes().count(); - if deploy_count == 0 { - // If there are no deploys, return early. - return responder.respond(true).ignore(); + } + state.add_holder(sender); + + let effects = match state.start_fetching() { + MaybeStartFetching::Start { + holder, + missing_deploys, + } => fetch_deploys(effect_builder, holder, missing_deploys), + MaybeStartFetching::Ongoing => { + debug!("ongoing fetches while validating proposed block - noop"); + Effects::new() } - - // Collect the deploys in a map. If they are fewer now, then there was a duplicate! - let block_deploys: HashMap<_, _> = block.deploys_and_transfers_iter().collect(); - if block_deploys.len() != deploy_count { - self.log_block_with_replay(sender, &block); - return responder.respond(false).ignore(); + MaybeStartFetching::Unable => { + debug!("no new info while validating proposed block - responding `false`"); + respond(false, state.take_responders()) } - - // Prepare all approvals hashes. - let mut deploy_ids = HashMap::new(); - for (dt_hash, approvals) in block_deploys.iter() { - match ApprovalsHash::compute(approvals) { - Ok(approvals_hash) => { - deploy_ids.insert(*dt_hash, approvals_hash); - } - Err(error) => { - warn!(%dt_hash, %error, "could not compute approvals hash"); - return responder.respond(false).ignore(); - } - } + MaybeStartFetching::ValidationSucceeded | MaybeStartFetching::ValidationFailed => { + // If validation is already completed, we should have exited in the + // `AddResponderResult::ValidationCompleted` branch above. + error!("proposed block validation already completed - noop"); + Effects::new() } + }; + MaybeHandled::Handled(effects) + } else { + MaybeHandled::NotHandled(request) + } + } - let block_timestamp = block.timestamp(); - let state = match self.validation_states.entry(block) { - Entry::Occupied(entry) => { - let state = entry.into_mut(); - debug!(%state, "already validating this proposed block"); - state - } - Entry::Vacant(entry) => { - let state = BlockValidationState { - appendable_block: AppendableBlock::new( - self.chainspec.deploy_config, - block_timestamp, - ), - missing_deploys: block_deploys, - responders: smallvec![], - }; - entry.insert(state) - } - }; + fn handle_new_request( + &mut self, + effect_builder: EffectBuilder, + BlockValidationRequest { + block, + sender, + responder, + }: BlockValidationRequest, + ) -> Effects + where + REv: From + From> + Send, + { + debug!(%sender, %block, "validating new proposed block"); + debug_assert!(!self.validation_states.contains_key(&block)); + let (mut state, maybe_responder) = + BlockValidationState::new(&block, sender, responder, self.chainspec.as_ref()); + let effects = match state.start_fetching() { + MaybeStartFetching::Start { + holder, + missing_deploys, + } => fetch_deploys(effect_builder, holder, missing_deploys), + MaybeStartFetching::ValidationSucceeded => { + debug!("no deploys - block validation complete"); + debug_assert!(maybe_responder.is_some()); + respond(true, maybe_responder) + } + MaybeStartFetching::ValidationFailed => { + debug_assert!(maybe_responder.is_some()); + respond(false, maybe_responder) + } + MaybeStartFetching::Ongoing | MaybeStartFetching::Unable => { + // This `MaybeStartFetching` variant should never be returned here. + error!(%state, "invalid state while handling new block validation"); + debug_assert!(false, "invalid state {}", state); + respond(false, state.take_responders()) + } + }; + self.validation_states.insert(block, state); + self.purge_oldest_complete(); + effects + } - if state.missing_deploys.is_empty() { + fn purge_oldest_complete(&mut self) { + let mut completed_times: Vec<_> = self + .validation_states + .values() + .filter_map(BlockValidationState::block_timestamp_if_completed) + .collect(); + // Sort from newest (highest timestamp) to oldest. + completed_times.sort_unstable_by(|lhs, rhs| rhs.cmp(lhs)); + + // Normally we'll only need to remove a maximum of a single entry, but loop until we don't + // exceed the completed limit to cover any edge cases. + let max_completed_entries = self.config.max_completed_entries as usize; + while completed_times.len() > max_completed_entries { + self.validation_states.retain(|_block, state| { + if completed_times.len() <= max_completed_entries { + return true; + } + if state.block_timestamp_if_completed().as_ref() == completed_times.last() { debug!( - block_timestamp = %state.appendable_block.timestamp(), - "no missing deploys - block validation complete" + %state, + num_completed_remaining = (completed_times.len() - 1), + "purging completed block validation state" ); - // Block has already been validated successfully or has no deploys. - return responder.respond(true).ignore(); + let _ = completed_times.pop(); + return false; } + true + }); + } + } - // We register ourselves as someone interested in the ultimate validation result. - state.responders.push(responder); - - effects.extend( - deploy_ids - .into_iter() - .flat_map(|(dt_hash, approvals_hash)| { - // For every request, increase the number of in-flight... - self.in_flight.inc(&dt_hash.into()); - // ...then request it. - fetch_deploy(effect_builder, dt_hash, approvals_hash, sender) - }), - ); + fn handle_deploy_fetched( + &mut self, + effect_builder: EffectBuilder, + dt_hash: DeployOrTransferHash, + result: FetchResult, + ) -> Effects + where + REv: From + From> + Send, + { + match &result { + Ok(FetchedData::FromPeer { peer, .. }) => { + debug!(%dt_hash, %peer, "fetched deploy from peer") } - Event::DeployFound { - dt_hash, - deploy_footprint, - } => { - // We successfully found a hash. Decrease the number of outstanding requests. - self.in_flight.dec(&dt_hash.into()); - - // If a deploy is received for a given block that makes that block invalid somehow, - // mark it for removal. - let mut invalid = Vec::new(); + Ok(FetchedData::FromStorage { .. }) => debug!(%dt_hash, "fetched deploy locally"), + Err(error) => warn!(%dt_hash, %error, "could not fetch deploy"), + } + match result { + Ok(FetchedData::FromStorage { item }) | Ok(FetchedData::FromPeer { item, .. }) => { + if item.deploy_or_transfer_hash() != dt_hash { + warn!( + deploy = %item, + expected_deploy_or_transfer_hash = %dt_hash, + actual_deploy_or_transfer_hash = %item.deploy_or_transfer_hash(), + "deploy has incorrect deploy-or-transfer hash" + ); + // Hard failure - change state to Invalid. + let responders = self + .validation_states + .values_mut() + .flat_map(|state| state.try_mark_invalid(&dt_hash)); + return respond(false, responders); + } + let deploy_footprint = match item.footprint() { + Ok(footprint) => footprint, + Err(error) => { + warn!( + deploy = %item, + %dt_hash, + %error, + "could not convert deploy", + ); + // Hard failure - change state to Invalid. + let responders = self + .validation_states + .values_mut() + .flat_map(|state| state.try_mark_invalid(&dt_hash)); + return respond(false, responders); + } + }; - // Our first pass updates all validation states, crossing off the found deploy. - for (key, state) in self.validation_states.iter_mut() { - if let Some(approvals) = state.missing_deploys.remove(&dt_hash) { - // If the deploy is of the wrong type or would be invalid for this block, - // notify everyone still waiting on it that all is lost. - let add_result = match dt_hash { - DeployOrTransferHash::Deploy(hash) => { - state.appendable_block.add_deploy( - DeployHashWithApprovals::new(hash, approvals.clone()), - &deploy_footprint, - ) - } - DeployOrTransferHash::Transfer(hash) => { - state.appendable_block.add_transfer( - DeployHashWithApprovals::new(hash, approvals.clone()), - &deploy_footprint, - ) - } - }; - if let Err(err) = add_result { - info!(block = %key, %dt_hash, ?deploy_footprint, %err, "block invalid"); - invalid.push(key.clone()); - } - debug!(deploy_hash = %dt_hash, %state, "found deploy for block validation"); + let mut effects = Effects::new(); + for state in self.validation_states.values_mut() { + let responders = state.try_add_deploy_footprint(&dt_hash, &deploy_footprint); + if !responders.is_empty() { + let is_valid = matches!(state, BlockValidationState::Valid(_)); + effects.extend(respond(is_valid, responders)); } } - - // Now we remove all states that have finished and notify the requesters. - self.validation_states.retain(|key, state| { - if invalid.contains(key) { - effects.extend(state.respond(false)); - return false; + effects + } + Err(error) => { + match error { + fetcher::Error::Absent { peer, .. } + | fetcher::Error::Rejected { peer, .. } + | fetcher::Error::TimedOut { peer, .. } => { + // Soft failure - just mark the holder as failed and see if we can start + // fetching using a different holder. + let mut effects = Effects::new(); + self.validation_states.values_mut().for_each(|state| { + state.try_mark_holder_failed(&peer); + match state.start_fetching() { + MaybeStartFetching::Start { + holder, + missing_deploys, + } => { + debug!( + %holder, + missing_deploys_len = missing_deploys.len(), + "fetching missing deploys from different peer" + ); + effects.extend(fetch_deploys( + effect_builder, + holder, + missing_deploys, + )) + } + MaybeStartFetching::Unable => { + debug!( + "exhausted peers while validating proposed block - \ + responding `false`" + ); + effects.extend(respond(false, state.take_responders())); + } + MaybeStartFetching::Ongoing + | MaybeStartFetching::ValidationSucceeded + | MaybeStartFetching::ValidationFailed => {} + } + }); + effects } - if state.missing_deploys.is_empty() { - // This one is done and valid. - effects.extend(state.respond(true)); - debug!( - block_timestamp = %state.appendable_block.timestamp(), - "no further missing deploys - block validation complete" - ); - return false; + fetcher::Error::CouldNotConstructGetRequest { .. } + | fetcher::Error::ValidationMetadataMismatch { .. } => { + // Hard failure - change state to Invalid. + let responders = self + .validation_states + .values_mut() + .flat_map(|state| state.try_mark_invalid(&dt_hash)); + respond(false, responders) } - true - }); - } - Event::DeployMissing(dt_hash) => { - info!(%dt_hash, "request to download deploy timed out"); - // A deploy failed to fetch. If there is still hope (i.e. other outstanding - // requests), we just ignore this little accident. - if self.in_flight.dec(&dt_hash.into()) != 0 { - return Effects::new(); } - - self.validation_states.retain(|key, state| { - if !state.missing_deploys.contains_key(&dt_hash) { - return true; - } - - // Notify everyone still waiting on it that all is lost. - info!( - block = %key, - %dt_hash, - "could not validate the deploy. block is invalid" - ); - // This validation state contains a deploy hash we failed to fetch from all - // sources, it can never succeed. - effects.extend(state.respond(false)); - false - }); } - Event::CannotConvertDeploy(dt_hash) => { - // Deploy is invalid. There's no point waiting for other in-flight requests to - // finish. - self.in_flight.dec(&dt_hash.into()); + } + } +} - self.validation_states.retain(|key, state| { - if state.missing_deploys.contains_key(&dt_hash) { - // Notify everyone still waiting on it that all is lost. - info!( - block = %key, - %dt_hash, - "could not convert deploy to deploy type. block is invalid" - ); - // This validation state contains a failed deploy hash, it can never - // succeed. - effects.extend(state.respond(false)); - false - } else { - true +impl Component for BlockValidator +where + REv: From + + From + + From> + + From + + Send, +{ + type Event = Event; + + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + _rng: &mut NodeRng, + event: Self::Event, + ) -> Effects { + match event { + Event::Request(request) => { + match self.try_handle_as_existing_request(effect_builder, request) { + MaybeHandled::Handled(effects) => effects, + MaybeHandled::NotHandled(request) => { + self.handle_new_request(effect_builder, request) } - }); + } + } + Event::DeployFetched { dt_hash, result } => { + self.handle_deploy_fetched(effect_builder, dt_hash, result) } } - effects } fn name(&self) -> &str { @@ -393,52 +363,31 @@ where } } -/// Returns effects that fetch the deploy and validate it. -fn fetch_deploy( +fn fetch_deploys( effect_builder: EffectBuilder, - dt_hash: DeployOrTransferHash, - approvals_hash: ApprovalsHash, - sender: NodeId, + holder: NodeId, + missing_deploys: HashMap, ) -> Effects where REv: From + From> + Send, { - async move { - let deploy_id = DeployId::new(dt_hash.into(), approvals_hash); - let deploy = match effect_builder - .fetch::(deploy_id, sender, Box::new(EmptyValidationMetadata)) - .await - { - Ok(FetchedData::FromStorage { item }) | Ok(FetchedData::FromPeer { item, .. }) => *item, - Err(error) => { - warn!(%deploy_id, %error, "could not fetch deploy"); - return Event::DeployMissing(dt_hash); - } - }; - if deploy.deploy_or_transfer_hash() != dt_hash { - warn!( - deploy = %deploy, - expected_deploy_or_transfer_hash = %dt_hash, - actual_deploy_or_transfer_hash = %deploy.deploy_or_transfer_hash(), - "deploy has incorrect hash" - ); - return Event::CannotConvertDeploy(dt_hash); - } - match deploy.footprint() { - Ok(deploy_footprint) => Event::DeployFound { - dt_hash, - deploy_footprint: Box::new(deploy_footprint), - }, - Err(error) => { - warn!( - deploy = %deploy, - deploy_or_transfer_hash = %dt_hash, - %error, - "could not convert deploy", - ); - Event::CannotConvertDeploy(dt_hash) - } - } - } - .event(std::convert::identity) + missing_deploys + .into_iter() + .flat_map(|(dt_hash, approvals_hash)| { + let deploy_id = DeployId::new(dt_hash.into(), approvals_hash); + effect_builder + .fetch::(deploy_id, holder, Box::new(EmptyValidationMetadata)) + .event(move |result| Event::DeployFetched { dt_hash, result }) + }) + .collect() +} + +fn respond( + is_valid: bool, + responders: impl IntoIterator>, +) -> Effects { + responders + .into_iter() + .flat_map(|responder| responder.respond(is_valid).ignore()) + .collect() } diff --git a/node/src/components/block_validator/config.rs b/node/src/components/block_validator/config.rs new file mode 100644 index 0000000000..2263273632 --- /dev/null +++ b/node/src/components/block_validator/config.rs @@ -0,0 +1,16 @@ +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +/// Configuration options for block validation. +#[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)] +pub struct Config { + pub max_completed_entries: u32, +} + +impl Default for Config { + fn default() -> Self { + Config { + max_completed_entries: 3, + } + } +} diff --git a/node/src/components/block_validator/event.rs b/node/src/components/block_validator/event.rs new file mode 100644 index 0000000000..fede68ca13 --- /dev/null +++ b/node/src/components/block_validator/event.rs @@ -0,0 +1,19 @@ +use derive_more::{Display, From}; + +use crate::{ + components::fetcher::FetchResult, + effect::requests::BlockValidationRequest, + types::{Deploy, DeployOrTransferHash}, +}; + +#[derive(Debug, From, Display)] +pub(crate) enum Event { + #[from] + Request(BlockValidationRequest), + + #[display(fmt = "{} fetched", dt_hash)] + DeployFetched { + dt_hash: DeployOrTransferHash, + result: FetchResult, + }, +} diff --git a/node/src/components/block_validator/keyed_counter.rs b/node/src/components/block_validator/keyed_counter.rs deleted file mode 100644 index 497fa51ea3..0000000000 --- a/node/src/components/block_validator/keyed_counter.rs +++ /dev/null @@ -1,111 +0,0 @@ -//! Tracks positive integers for keys. - -use std::{collections::HashMap, hash::Hash}; - -use datasize::DataSize; - -/// A key-counter. -/// -/// Allows tracking a counter for any key `K`. -/// -/// Any counter that is set to `0` will not use any memory. -#[derive(DataSize, Debug)] -pub(super) struct KeyedCounter(HashMap); - -impl KeyedCounter { - /// Creates a new keyed counter. - fn new() -> Self { - KeyedCounter(Default::default()) - } -} - -impl Default for KeyedCounter { - fn default() -> Self { - Self::new() - } -} - -impl KeyedCounter -where - K: Clone + Eq + Hash, -{ - /// Increases count for a specific key. - /// - /// Returns the new value. - pub(super) fn inc(&mut self, key: &K) -> usize { - match self.0.get_mut(key) { - None => { - self.0.insert(key.clone(), 1); - 1 - } - Some(value) => { - *value += 1; - *value - } - } - } - - /// Decreases count for a specific key. - /// - /// Returns the new value. - /// - /// # Panics - /// - /// Panics if `dec` would become negative. - pub(super) fn dec(&mut self, key: &K) -> usize { - match self.0.get_mut(key) { - Some(value) => { - assert_ne!(*value, 0, "counter should never be zero in tracker"); - - *value -= 1; - - if *value != 0 { - return *value; - } - } - None => panic!("tried to decrease in-flight to negative value"), - }; - - assert_eq!(self.0.remove(key), Some(0)); - - 0 - } -} - -#[cfg(test)] -mod tests { - use super::KeyedCounter; - - #[test] - fn can_count_up() { - let mut kc = KeyedCounter::new(); - assert_eq!(kc.inc(&'a'), 1); - assert_eq!(kc.inc(&'b'), 1); - assert_eq!(kc.inc(&'a'), 2); - } - - #[test] - fn can_count_down() { - let mut kc = KeyedCounter::new(); - assert_eq!(kc.inc(&'a'), 1); - assert_eq!(kc.inc(&'b'), 1); - assert_eq!(kc.dec(&'a'), 0); - assert_eq!(kc.dec(&'b'), 0); - } - - #[test] - #[should_panic(expected = "tried to decrease in-flight to negative value")] - fn panics_on_underflow() { - let mut kc = KeyedCounter::new(); - assert_eq!(kc.inc(&'a'), 1); - assert_eq!(kc.dec(&'a'), 0); - kc.dec(&'a'); - } - - #[test] - #[should_panic(expected = "tried to decrease in-flight to negative value")] - fn panics_on_immediate_underflow() { - let mut kc = KeyedCounter::new(); - kc.dec(&'a'); - } -} diff --git a/node/src/components/block_validator/state.rs b/node/src/components/block_validator/state.rs new file mode 100644 index 0000000000..f7daa4f0ff --- /dev/null +++ b/node/src/components/block_validator/state.rs @@ -0,0 +1,941 @@ +use std::{ + collections::{hash_map::Entry, BTreeSet, HashMap}, + fmt::{self, Debug, Display, Formatter}, + iter, mem, +}; + +use datasize::DataSize; +use tracing::{debug, error, warn}; + +use casper_types::Timestamp; + +#[cfg(test)] +use crate::types::DeployHash; +use crate::{ + components::consensus::{ClContext, ProposedBlock}, + effect::Responder, + types::{ + appendable_block::AppendableBlock, Approval, ApprovalsHash, Chainspec, DeployFootprint, + DeployHashWithApprovals, DeployOrTransferHash, NodeId, + }, +}; + +/// The state of a peer which claims to be a holder of the deploys. +#[derive(Clone, Copy, Eq, PartialEq, DataSize, Debug)] +pub(super) enum HolderState { + /// No fetch attempt has been made using this peer. + Unasked, + /// At least one fetch attempt has been made and no fetch attempts have failed when using this + /// peer. + Asked, + /// At least one fetch attempt has failed when using this peer. + Failed, +} + +/// The return type of `BlockValidationState::add_responder`. +pub(super) enum AddResponderResult { + /// The responder was added, meaning validation is still ongoing. + Added, + /// Validation is completed, so the responder should be called with the provided value. + ValidationCompleted { + responder: Responder, + response_to_send: bool, + }, +} + +/// The return type of `BlockValidationState::start_fetching`. +#[derive(Eq, PartialEq, Debug)] +pub(super) enum MaybeStartFetching { + /// Should start a new round of fetches. + Start { + holder: NodeId, + missing_deploys: HashMap, + }, + /// No new round of fetches should be started as one is already in progress. + Ongoing, + /// We still have missing deploys, but all holders have failed. + Unable, + /// Validation has succeeded already. + ValidationSucceeded, + /// Validation has failed already. + ValidationFailed, +} + +#[derive(Clone, Eq, PartialEq, DataSize, Debug)] +pub(super) struct ApprovalInfo { + approvals: BTreeSet, + approvals_hash: ApprovalsHash, +} + +impl ApprovalInfo { + fn new(approvals: BTreeSet, approvals_hash: ApprovalsHash) -> Self { + ApprovalInfo { + approvals, + approvals_hash, + } + } +} + +/// State of the current process of block validation. +/// +/// Tracks whether or not there are deploys still missing and who is interested in the final result. +#[derive(DataSize, Debug)] +pub(super) enum BlockValidationState { + /// The validity is not yet decided. + InProgress { + /// Appendable block ensuring that the deploys satisfy the validity conditions. + appendable_block: AppendableBlock, + /// The set of approvals contains approvals from deploys that would be finalized with the + /// block. + missing_deploys: HashMap, + /// The set of peers which each claim to hold all the deploys. + holders: HashMap, + /// A list of responders that are awaiting an answer. + responders: Vec>, + }, + /// The proposed block with the given timestamp is valid. + Valid(Timestamp), + /// The proposed block with the given timestamp is invalid. + /// + /// Note that only hard failures in validation will result in this state. For soft failures, + /// like failing to fetch from a peer, the state will remain `Unknown`, even if there are no + /// more peers to ask, since more peers could be provided before this `BlockValidationState` is + /// purged. + Invalid(Timestamp), +} + +impl BlockValidationState { + /// Returns a new `BlockValidationState`. + /// + /// If the new state is `Valid` or `Invalid`, the provided responder is also returned so it can + /// be actioned. + pub(super) fn new( + block: &ProposedBlock, + sender: NodeId, + responder: Responder, + chainspec: &Chainspec, + ) -> (Self, Option>) { + let deploy_count = block.deploys().len() + block.transfers().len(); + if deploy_count == 0 { + let state = BlockValidationState::Valid(block.timestamp()); + return (state, Some(responder)); + } + + if block.deploys().len() > chainspec.deploy_config.block_max_deploy_count as usize { + warn!("too many non-transfer deploys"); + let state = BlockValidationState::Invalid(block.timestamp()); + return (state, Some(responder)); + } + if block.transfers().len() > chainspec.deploy_config.block_max_transfer_count as usize { + warn!("too many transfers"); + let state = BlockValidationState::Invalid(block.timestamp()); + return (state, Some(responder)); + } + + let appendable_block = AppendableBlock::new(chainspec.deploy_config, block.timestamp()); + + let mut missing_deploys = HashMap::new(); + let deploys_iter = block.deploys().iter().map(|dhwa| { + let dt_hash = DeployOrTransferHash::Deploy(*dhwa.deploy_hash()); + (dt_hash, dhwa.approvals().clone()) + }); + let transfers_iter = block.transfers().iter().map(|dhwa| { + let dt_hash = DeployOrTransferHash::Transfer(*dhwa.deploy_hash()); + (dt_hash, dhwa.approvals().clone()) + }); + for (dt_hash, approvals) in deploys_iter.chain(transfers_iter) { + let approval_info = match ApprovalsHash::compute(&approvals) { + Ok(approvals_hash) => ApprovalInfo::new(approvals, approvals_hash), + Err(error) => { + warn!(%dt_hash, %error, "could not compute approvals hash"); + let state = BlockValidationState::Invalid(block.timestamp()); + return (state, Some(responder)); + } + }; + + if missing_deploys.insert(dt_hash, approval_info).is_some() { + warn!(%dt_hash, "duplicated deploy in proposed block"); + let state = BlockValidationState::Invalid(block.timestamp()); + return (state, Some(responder)); + } + } + + let state = BlockValidationState::InProgress { + appendable_block, + missing_deploys, + holders: iter::once((sender, HolderState::Unasked)).collect(), + responders: vec![responder], + }; + + (state, None) + } + + /// Adds the given responder to the collection if the current state is `InProgress` and returns + /// `Added`. + /// + /// If the state is not `InProgress`, `ValidationCompleted` is returned with the responder and + /// the value which should be provided to the responder. + pub(super) fn add_responder(&mut self, responder: Responder) -> AddResponderResult { + match self { + BlockValidationState::InProgress { responders, .. } => { + responders.push(responder); + AddResponderResult::Added + } + BlockValidationState::Valid(_) => AddResponderResult::ValidationCompleted { + responder, + response_to_send: true, + }, + BlockValidationState::Invalid(_) => AddResponderResult::ValidationCompleted { + responder, + response_to_send: false, + }, + } + } + + /// If the current state is `InProgress` and the peer isn't already known, adds the peer. + /// Otherwise any existing entry is not updated and `false` is returned. + pub(super) fn add_holder(&mut self, holder: NodeId) { + match self { + BlockValidationState::InProgress { + appendable_block, + holders, + .. + } => match holders.entry(holder) { + Entry::Occupied(entry) => { + debug!( + block_timestamp = %appendable_block.timestamp(), + peer = %entry.key(), + "already registered peer as holder for block validation" + ); + } + Entry::Vacant(entry) => { + entry.insert(HolderState::Unasked); + } + }, + BlockValidationState::Valid(_) | BlockValidationState::Invalid(_) => { + error!(state = %self, "unexpected state when adding holder"); + } + } + } + + /// If the current state is `InProgress` and the holder is present, sets the holder's state to + /// `Failed`. + pub(super) fn try_mark_holder_failed(&mut self, holder: &NodeId) { + if let BlockValidationState::InProgress { holders, .. } = self { + if let Some(holder_state) = holders.get_mut(holder) { + debug_assert!(*holder_state != HolderState::Unasked); + *holder_state = HolderState::Failed; + } + } + } + + /// Returns fetch info based on the current state: + /// * if `InProgress` and there are no holders `Asked` (i.e. no ongoing fetches) and at least + /// one `Unasked` holder, returns `Start` + /// * if `InProgress` and any holder `Asked`, returns `Ongoing` + /// * if `InProgress` and all holders `Failed`, returns `Unable` + /// * if `Valid` or `Invalid`, returns `ValidationSucceeded` or `ValidationFailed` + /// respectively + pub(super) fn start_fetching(&mut self) -> MaybeStartFetching { + match self { + BlockValidationState::InProgress { + missing_deploys, + holders, + .. + } => { + if missing_deploys.is_empty() { + error!("should always have missing deploys while in state `InProgress`"); + debug_assert!(false, "invalid state"); + return MaybeStartFetching::ValidationFailed; + } + let mut unasked = None; + for (peer_id, holder_state) in holders.iter() { + match holder_state { + HolderState::Unasked => { + unasked = Some(*peer_id); + } + HolderState::Asked => return MaybeStartFetching::Ongoing, + HolderState::Failed => {} + } + } + + let holder = match unasked { + Some(peer) => peer, + None => return MaybeStartFetching::Unable, + }; + // Mark the holder as `Asked`. Safe to `expect` as we just found the entry above. + *holders.get_mut(&holder).expect("must be in set") = HolderState::Asked; + let missing_deploys = missing_deploys + .iter() + .map(|(dt_hash, infos)| (*dt_hash, infos.approvals_hash)) + .collect(); + MaybeStartFetching::Start { + holder, + missing_deploys, + } + } + BlockValidationState::Valid(_) => MaybeStartFetching::ValidationSucceeded, + BlockValidationState::Invalid(_) => MaybeStartFetching::ValidationFailed, + } + } + + pub(super) fn take_responders(&mut self) -> Vec> { + match self { + BlockValidationState::InProgress { responders, .. } => mem::take(responders), + BlockValidationState::Valid(_) | BlockValidationState::Invalid(_) => vec![], + } + } + + /// If the current state is `InProgress` and `dt_hash` is present, tries to add the footprint to + /// the appendable block to continue validation of the proposed block. + pub(super) fn try_add_deploy_footprint( + &mut self, + dt_hash: &DeployOrTransferHash, + footprint: &DeployFootprint, + ) -> Vec> { + let (new_state, responders) = match self { + BlockValidationState::InProgress { + appendable_block, + missing_deploys, + responders, + .. + } => { + let approvals_info = match missing_deploys.remove(dt_hash) { + Some(info) => info, + None => { + // If this deploy is not present, just return. + return vec![]; + } + }; + // Try adding the footprint to the appendable block to see if the block remains + // valid. + let dhwa = + DeployHashWithApprovals::new((*dt_hash).into(), approvals_info.approvals); + let add_result = match dt_hash { + DeployOrTransferHash::Deploy(_) => appendable_block.add_deploy(dhwa, footprint), + DeployOrTransferHash::Transfer(_) => { + appendable_block.add_transfer(dhwa, footprint) + } + }; + match add_result { + Ok(()) => { + if !missing_deploys.is_empty() { + // The appendable block is still valid, but we still have missing + // deploys - nothing further to do here. + debug!( + block_timestamp = %appendable_block.timestamp(), + missing_deploys_len = missing_deploys.len(), + "still missing deploys - block validation incomplete" + ); + return vec![]; + } + debug!( + block_timestamp = %appendable_block.timestamp(), + "no further missing deploys - block validation complete" + ); + let new_state = BlockValidationState::Valid(appendable_block.timestamp()); + (new_state, mem::take(responders)) + } + Err(error) => { + warn!(%dt_hash, ?footprint, %error, "block invalid"); + let new_state = BlockValidationState::Invalid(appendable_block.timestamp()); + (new_state, mem::take(responders)) + } + } + } + BlockValidationState::Valid(_) | BlockValidationState::Invalid(_) => return vec![], + }; + *self = new_state; + responders + } + + /// If the current state is `InProgress` and `dt_hash` is present, sets the state to `Invalid` + /// and returns the responders. + pub(super) fn try_mark_invalid( + &mut self, + dt_hash: &DeployOrTransferHash, + ) -> Vec> { + let (timestamp, responders) = match self { + BlockValidationState::InProgress { + appendable_block, + missing_deploys, + responders, + .. + } => { + if !missing_deploys.contains_key(dt_hash) { + return vec![]; + } + (appendable_block.timestamp(), mem::take(responders)) + } + BlockValidationState::Valid(_) | BlockValidationState::Invalid(_) => return vec![], + }; + *self = BlockValidationState::Valid(timestamp); + responders + } + + pub(super) fn block_timestamp_if_completed(&self) -> Option { + match self { + BlockValidationState::InProgress { .. } => None, + BlockValidationState::Valid(timestamp) | BlockValidationState::Invalid(timestamp) => { + Some(*timestamp) + } + } + } + + #[cfg(test)] + pub(super) fn missing_hashes(&self) -> Vec { + match self { + BlockValidationState::InProgress { + missing_deploys, .. + } => missing_deploys + .keys() + .map(|dt_hash| *dt_hash.deploy_hash()) + .collect(), + BlockValidationState::Valid(_) | BlockValidationState::Invalid(_) => vec![], + } + } + + #[cfg(test)] + pub(super) fn holders_mut(&mut self) -> Option<&mut HashMap> { + match self { + BlockValidationState::InProgress { holders, .. } => Some(holders), + BlockValidationState::Valid(_) | BlockValidationState::Invalid(_) => None, + } + } + + #[cfg(test)] + pub(super) fn responder_count(&self) -> usize { + match self { + BlockValidationState::InProgress { responders, .. } => responders.len(), + BlockValidationState::Valid(_) | BlockValidationState::Invalid(_) => 0, + } + } + + #[cfg(test)] + pub(super) fn completed(&self) -> bool { + !matches!(self, BlockValidationState::InProgress { .. }) + } +} + +impl Display for BlockValidationState { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + BlockValidationState::InProgress { + appendable_block, + missing_deploys, + holders, + responders, + } => { + write!( + formatter, + "BlockValidationState::InProgress({}, {} missing deploys, {} holders, {} responders)", + appendable_block, + missing_deploys.len(), + holders.len(), + responders.len() + ) + } + BlockValidationState::Valid(timestamp) => { + write!(formatter, "BlockValidationState::Valid({timestamp})") + } + BlockValidationState::Invalid(timestamp) => { + write!(formatter, "BlockValidationState::Invalid({timestamp})") + } + } + } +} + +#[cfg(test)] +mod tests { + use futures::channel::oneshot; + use rand::Rng; + + use casper_types::{testing::TestRng, TimeDiff}; + + use super::{super::tests::*, *}; + use crate::{ + types::{ChainspecRawBytes, Deploy}, + utils::Loadable, + }; + + struct Fixture { + rng: TestRng, + deploys: Vec, + transfers: Vec, + chainspec: Chainspec, + } + + impl Fixture { + fn new() -> Self { + let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + Fixture { + rng: TestRng::new(), + deploys: vec![], + transfers: vec![], + chainspec, + } + } + + /// Returns a new `BlockValidationState` with the specified number of deploys and transfers + /// added to any pre-existing ones in the fixture. + fn new_state( + &mut self, + deploy_count: u64, + transfer_count: u64, + ) -> (BlockValidationState, Option>) { + let ttl = TimeDiff::from_seconds(10); + let deploys: Vec<_> = (0..deploy_count) + .map(|index| new_deploy(&mut self.rng, Timestamp::from(1000 + index), ttl)) + .collect(); + self.deploys.extend(deploys); + let transfers: Vec<_> = (0..transfer_count) + .map(|index| { + new_transfer( + &mut self.rng, + Timestamp::from(1000 + deploy_count + index), + ttl, + ) + }) + .collect(); + self.transfers.extend(transfers); + + let deploys_for_block = self + .deploys + .iter() + .map(DeployHashWithApprovals::from) + .collect(); + let transfers_for_block = self + .transfers + .iter() + .map(DeployHashWithApprovals::from) + .collect(); + + let proposed_block = new_proposed_block( + Timestamp::from(1000 + deploy_count + transfer_count), + deploys_for_block, + transfers_for_block, + ); + + BlockValidationState::new( + &proposed_block, + NodeId::random(&mut self.rng), + new_responder(), + &self.chainspec, + ) + } + + fn footprints(&self) -> Vec<(DeployOrTransferHash, DeployFootprint)> { + self.deploys + .iter() + .map(|deploy| { + let dt_hash = DeployOrTransferHash::Deploy(*deploy.hash()); + (dt_hash, deploy.footprint().unwrap()) + }) + .chain(self.transfers.iter().map(|transfer| { + let dt_hash = DeployOrTransferHash::Transfer(*transfer.hash()); + (dt_hash, transfer.footprint().unwrap()) + })) + .collect() + } + } + + fn new_responder() -> Responder { + let (sender, _receiver) = oneshot::channel(); + Responder::without_shutdown(sender) + } + + #[test] + fn new_state_should_be_valid_with_no_deploys() { + let mut fixture = Fixture::new(); + let (state, maybe_responder) = fixture.new_state(0, 0); + assert!(matches!(state, BlockValidationState::Valid(_))); + assert!(maybe_responder.is_some()); + } + + #[test] + fn new_state_should_be_invalid_with_too_many_deploys() { + let mut fixture = Fixture::new(); + let deploy_count = 5_u64; + fixture.chainspec.deploy_config.block_max_deploy_count = deploy_count as u32 - 1; + let (state, maybe_responder) = fixture.new_state(deploy_count, 0); + assert!(matches!(state, BlockValidationState::Invalid(_))); + assert!(maybe_responder.is_some()); + } + + #[test] + fn new_state_should_be_invalid_with_too_many_transfers() { + let mut fixture = Fixture::new(); + let transfer_count = 5_u64; + fixture.chainspec.deploy_config.block_max_transfer_count = transfer_count as u32 - 1; + let (state, maybe_responder) = fixture.new_state(0, transfer_count); + assert!(matches!(state, BlockValidationState::Invalid(_))); + assert!(maybe_responder.is_some()); + } + + #[test] + fn new_state_should_be_invalid_with_duplicated_deploy() { + let mut fixture = Fixture::new(); + + let timestamp = Timestamp::from(1000); + let transfers = + vec![new_transfer(&mut fixture.rng, timestamp, TimeDiff::from_millis(200)); 2]; + + let transfers_for_block = transfers + .iter() + .map(DeployHashWithApprovals::from) + .collect(); + let proposed_block = new_proposed_block(timestamp, vec![], transfers_for_block); + + let (state, maybe_responder) = BlockValidationState::new( + &proposed_block, + NodeId::random(&mut fixture.rng), + new_responder(), + &fixture.chainspec, + ); + + assert!(matches!(state, BlockValidationState::Invalid(_))); + assert!(maybe_responder.is_some()); + } + + #[test] + fn new_state_should_be_in_progress_with_some_deploys() { + let mut fixture = Fixture::new(); + let deploy_count = fixture.rng.gen_range(1..10); + let transfer_count = fixture.rng.gen_range(0..10); + let (state, maybe_responder) = fixture.new_state(deploy_count, transfer_count); + + match state { + BlockValidationState::InProgress { + missing_deploys, + holders, + responders, + .. + } => { + assert_eq!(missing_deploys.len() as u64, deploy_count + transfer_count); + assert_eq!(holders.len(), 1); + assert_eq!(holders.values().next().unwrap(), &HolderState::Unasked); + assert_eq!(responders.len(), 1); + } + BlockValidationState::Valid(_) | BlockValidationState::Invalid(_) => { + panic!("unexpected state") + } + } + assert!(maybe_responder.is_none()); + } + + #[test] + fn should_add_responder_if_in_progress() { + let mut fixture = Fixture::new(); + let (mut state, _maybe_responder) = fixture.new_state(2, 2); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + assert_eq!(state.responder_count(), 1); + + let add_responder_result = state.add_responder(new_responder()); + assert!(matches!(add_responder_result, AddResponderResult::Added)); + assert_eq!(state.responder_count(), 2); + } + + #[test] + fn should_not_add_responder_if_valid() { + let mut state = BlockValidationState::Valid(Timestamp::from(1000)); + let add_responder_result = state.add_responder(new_responder()); + assert!(matches!( + add_responder_result, + AddResponderResult::ValidationCompleted { + response_to_send: true, + .. + } + )); + assert_eq!(state.responder_count(), 0); + } + + #[test] + fn should_not_add_responder_if_invalid() { + let mut state = BlockValidationState::Invalid(Timestamp::from(1000)); + let add_responder_result = state.add_responder(new_responder()); + assert!(matches!( + add_responder_result, + AddResponderResult::ValidationCompleted { + response_to_send: false, + .. + } + )); + assert_eq!(state.responder_count(), 0); + } + + #[test] + fn should_add_new_holder_if_in_progress() { + let mut fixture = Fixture::new(); + let (mut state, _maybe_responder) = fixture.new_state(2, 2); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + assert_eq!(state.holders_mut().unwrap().len(), 1); + + let new_holder = NodeId::random(&mut fixture.rng); + state.add_holder(new_holder); + assert_eq!(state.holders_mut().unwrap().len(), 2); + assert_eq!( + state.holders_mut().unwrap().get(&new_holder), + Some(&HolderState::Unasked) + ); + } + + #[test] + fn should_not_change_holder_state() { + let mut fixture = Fixture::new(); + let (mut state, _maybe_responder) = fixture.new_state(2, 2); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + let (holder, holder_state) = state + .holders_mut() + .expect("should have holders") + .iter_mut() + .next() + .expect("should have one entry"); + *holder_state = HolderState::Asked; + let holder = *holder; + + state.add_holder(holder); + assert_eq!(state.holders_mut().unwrap().len(), 1); + assert_eq!( + state.holders_mut().unwrap().get(&holder), + Some(&HolderState::Asked) + ); + } + + #[test] + fn should_start_fetching() { + let mut fixture = Fixture::new(); + let (mut state, _maybe_responder) = fixture.new_state(2, 2); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + let (holder, holder_state) = state + .holders_mut() + .expect("should have holders") + .iter_mut() + .next() + .expect("should have one entry"); + assert_eq!(*holder_state, HolderState::Unasked); + let original_holder = *holder; + + // We currently have one unasked holder. Add some failed holders - should still return + // `MaybeStartFetching::Start` containing the original holder. + for _ in 0..3 { + state + .holders_mut() + .unwrap() + .insert(NodeId::random(&mut fixture.rng), HolderState::Failed); + } + + let maybe_start_fetching = state.start_fetching(); + match maybe_start_fetching { + MaybeStartFetching::Start { + holder, + missing_deploys, + } => { + assert_eq!(holder, original_holder); + assert_eq!(missing_deploys.len(), 4); + } + _ => panic!("unexpected return value"), + } + + // The original holder should now be marked as `Asked`. + let holder_state = state.holders_mut().unwrap().get(&original_holder); + assert_eq!(holder_state, Some(&HolderState::Asked)); + } + + #[test] + fn start_fetching_should_return_ongoing_if_any_holder_in_asked_state() { + let mut fixture = Fixture::new(); + let (mut state, _maybe_responder) = fixture.new_state(2, 2); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + + // Change the current (only) holder's state to `Asked`. + let maybe_start_fetching = state.start_fetching(); + assert!(matches!( + maybe_start_fetching, + MaybeStartFetching::Start { .. } + )); + let holder_state = state.holders_mut().unwrap().values().next(); + assert_eq!(holder_state, Some(&HolderState::Asked)); + + // Add some unasked holders and some failed - should still return + // `MaybeStartFetching::Ongoing`. + let unasked_count = fixture.rng.gen_range(0..3); + for _ in 0..unasked_count { + state + .holders_mut() + .unwrap() + .insert(NodeId::random(&mut fixture.rng), HolderState::Unasked); + } + let failed_count = fixture.rng.gen_range(0..3); + for _ in 0..failed_count { + state + .holders_mut() + .unwrap() + .insert(NodeId::random(&mut fixture.rng), HolderState::Failed); + } + + // Clone the holders collection before calling `start_fetching` as it should be unmodified + // by the call. + let holders_before = state.holders_mut().unwrap().clone(); + + // `start_fetching` should return `Ongoing` due to the single `Asked` holder. + let maybe_start_fetching = state.start_fetching(); + assert_eq!(maybe_start_fetching, MaybeStartFetching::Ongoing); + + // The holders should be unchanged. + assert_eq!(state.holders_mut().unwrap(), &holders_before); + } + + #[test] + fn start_fetching_should_return_unable_if_all_holders_in_failed_state() { + let mut fixture = Fixture::new(); + let (mut state, _maybe_responder) = fixture.new_state(2, 2); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + + // Set the original holder's state to `Failed` and add some more failed. + *state + .holders_mut() + .expect("should have holders") + .values_mut() + .next() + .expect("should have one entry") = HolderState::Failed; + + let failed_count = fixture.rng.gen_range(0..3); + for _ in 0..failed_count { + state + .holders_mut() + .unwrap() + .insert(NodeId::random(&mut fixture.rng), HolderState::Failed); + } + + // Clone the holders collection before calling `start_fetching` as it should be unmodified + // by the call. + let holders_before = state.holders_mut().unwrap().clone(); + + // `start_fetching` should return `Unable` due to no un-failed holders. + let maybe_start_fetching = state.start_fetching(); + assert_eq!(maybe_start_fetching, MaybeStartFetching::Unable); + + // The holders should be unchanged. + assert_eq!(state.holders_mut().unwrap(), &holders_before); + } + + #[test] + fn start_fetching_should_return_validation_succeeded_if_valid() { + let mut state = BlockValidationState::Valid(Timestamp::from(1000)); + let maybe_start_fetching = state.start_fetching(); + assert_eq!( + maybe_start_fetching, + MaybeStartFetching::ValidationSucceeded + ); + } + + #[test] + fn start_fetching_should_return_validation_failed_if_invalid() { + let mut state = BlockValidationState::Invalid(Timestamp::from(1000)); + let maybe_start_fetching = state.start_fetching(); + assert_eq!(maybe_start_fetching, MaybeStartFetching::ValidationFailed); + } + + #[test] + fn state_should_change_to_validation_succeeded() { + let mut fixture = Fixture::new(); + let (mut state, _maybe_responder) = fixture.new_state(2, 2); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + + // While there is still at least one missing deploy, `try_add_deploy_footprint` should keep + // the state `InProgress` and never return responders. + let mut footprints = fixture.footprints(); + while footprints.len() > 1 { + let (dt_hash, footprint) = footprints.pop().unwrap(); + let responders = state.try_add_deploy_footprint(&dt_hash, &footprint); + assert!(responders.is_empty()); + assert!(matches!( + state, + BlockValidationState::InProgress { ref responders, .. } + if !responders.is_empty() + )); + } + + // The final deploy should cause the state to go to `Valid` and the responders to be + // returned. + let (dt_hash, footprint) = footprints.pop().unwrap(); + let responders = state.try_add_deploy_footprint(&dt_hash, &footprint); + assert_eq!(responders.len(), 1); + assert!(matches!(state, BlockValidationState::Valid(_))); + } + + #[test] + fn unrelated_deploy_added_should_not_change_state() { + let mut fixture = Fixture::new(); + let (mut state, _maybe_responder) = fixture.new_state(2, 2); + let (appendable_block_before, missing_deploys_before, holders_before) = match &state { + BlockValidationState::InProgress { + appendable_block, + missing_deploys, + holders, + .. + } => ( + appendable_block.clone(), + missing_deploys.clone(), + holders.clone(), + ), + BlockValidationState::Valid(_) | BlockValidationState::Invalid(_) => { + panic!("unexpected state") + } + }; + + // Create a new, random deploy. + let deploy = new_deploy(&mut fixture.rng, 1500.into(), TimeDiff::from_seconds(1)); + let dt_hash = DeployOrTransferHash::Deploy(*deploy.hash()); + let footprint = deploy.footprint().unwrap(); + + // Ensure trying to add it doesn't change the state. + let responders = state.try_add_deploy_footprint(&dt_hash, &footprint); + assert!(responders.is_empty()); + match &state { + BlockValidationState::InProgress { + appendable_block, + missing_deploys, + holders, + .. + } => { + assert_eq!(&appendable_block_before, appendable_block); + assert_eq!(&missing_deploys_before, missing_deploys); + assert_eq!(&holders_before, holders); + } + BlockValidationState::Valid(_) | BlockValidationState::Invalid(_) => { + panic!("unexpected state") + } + }; + } + + #[test] + fn state_should_change_to_validation_failed() { + let mut fixture = Fixture::new(); + // Add an invalid (future-dated) deploy to the fixture. + let invalid_deploy = + new_deploy(&mut fixture.rng, Timestamp::MAX, TimeDiff::from_seconds(1)); + fixture.deploys.push(invalid_deploy.clone()); + let (mut state, _maybe_responder) = fixture.new_state(2, 2); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + + // Add some valid deploys, should keep the state `InProgress` and never return responders. + let mut footprints = fixture.footprints(); + while footprints.len() > 3 { + let (dt_hash, footprint) = footprints.pop().unwrap(); + if dt_hash.deploy_hash() == invalid_deploy.hash() { + continue; + } + let responders = state.try_add_deploy_footprint(&dt_hash, &footprint); + assert!(responders.is_empty()); + } + + // The invalid deploy should cause the state to go to `Invalid` and the responders to be + // returned. + let dt_hash = DeployOrTransferHash::Deploy(*invalid_deploy.hash()); + let footprint = invalid_deploy.footprint().unwrap(); + let responders = state.try_add_deploy_footprint(&dt_hash, &footprint); + assert_eq!(responders.len(), 1); + assert!(matches!(state, BlockValidationState::Invalid(_))); + } +} diff --git a/node/src/components/block_validator/tests.rs b/node/src/components/block_validator/tests.rs index c9622b40a4..f39f96104a 100644 --- a/node/src/components/block_validator/tests.rs +++ b/node/src/components/block_validator/tests.rs @@ -18,7 +18,7 @@ use crate::{ fetcher::{self, FetchItem}, }, reactor::{EventQueueHandle, QueueKind, Scheduler}, - types::{BlockPayload, ChainspecRawBytes, DeployHashWithApprovals}, + types::{BlockPayload, ChainspecRawBytes, DeployHash, DeployHashWithApprovals}, utils::{self, Loadable}, }; @@ -101,7 +101,7 @@ impl MockReactor { } } -fn new_proposed_block( +pub(super) fn new_proposed_block( timestamp: Timestamp, deploys: Vec, transfers: Vec, @@ -113,7 +113,7 @@ fn new_proposed_block( ProposedBlock::new(Arc::new(block_payload), block_context) } -fn new_deploy(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Deploy { +pub(super) fn new_deploy(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Deploy { let secret_key = SecretKey::random(rng); let chain_name = "chain".to_string(); let payment = ExecutableDeployItem::ModuleBytes { @@ -140,7 +140,7 @@ fn new_deploy(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Deploy ) } -fn new_transfer(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Deploy { +pub(super) fn new_transfer(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Deploy { let secret_key = SecretKey::random(rng); let chain_name = "chain".to_string(); let payment = ExecutableDeployItem::ModuleBytes { @@ -188,7 +188,7 @@ async fn validate_block( let reactor = MockReactor::new(); let effect_builder = EffectBuilder::new(EventQueueHandle::without_shutdown(reactor.scheduler)); let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("local"); - let mut block_validator = BlockValidator::new(Arc::new(chainspec)); + let mut block_validator = BlockValidator::new(Arc::new(chainspec), Config::default()); // Pass the block to the component. This future will eventually resolve to the result, i.e. // whether the block is valid or not. @@ -199,7 +199,11 @@ async fn validate_block( let effects = block_validator.handle_event(effect_builder, rng, event); // If validity could already be determined, the effect will be the validation response. - if block_validator.validation_states.is_empty() { + if block_validator + .validation_states + .values() + .all(BlockValidationState::completed) + { assert_eq!(1, effects.len()); for effect in effects { tokio::spawn(effect).await.unwrap(); // Response. @@ -315,6 +319,7 @@ async fn transfer_deploy_mixup_and_replay() { /// Verifies that the block validator fetches from multiple peers. #[tokio::test] async fn should_fetch_from_multiple_peers() { + let _ = crate::logging::init(); tokio::time::timeout(Duration::from_secs(5), async move { let peer_count = 3; let mut rng = TestRng::new(); @@ -343,7 +348,7 @@ async fn should_fetch_from_multiple_peers() { let effect_builder = EffectBuilder::new(EventQueueHandle::without_shutdown(reactor.scheduler)); let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("local"); - let mut block_validator = BlockValidator::new(Arc::new(chainspec)); + let mut block_validator = BlockValidator::new(Arc::new(chainspec), Config::default()); // Have a validation request for each one of the peers. These futures will eventually all // resolve to the same result, i.e. whether the block is valid or not. @@ -355,19 +360,20 @@ async fn should_fetch_from_multiple_peers() { .collect_vec(); let mut fetch_effects = VecDeque::new(); - for _ in 0..peer_count { + for index in 0..peer_count { let event = reactor.expect_block_validator_event().await; - fetch_effects.push_back(block_validator.handle_event(effect_builder, &mut rng, event)); + let effects = block_validator.handle_event(effect_builder, &mut rng, event); + if index == 0 { + assert_eq!(effects.len(), 6); + fetch_effects.extend(effects); + } else { + assert!(effects.is_empty()); + } } - // The effects are requests to fetch the block's deploys. There are six fetch requests per - // peer: only handle the first set of six for now. - let fetch_results = fetch_effects - .pop_front() - .unwrap() - .into_iter() - .map(tokio::spawn) - .collect_vec(); + // The effects are requests to fetch the block's deploys. There are six fetch requests, all + // using the first peer. + let fetch_results = fetch_effects.drain(..).map(tokio::spawn).collect_vec(); // Provide the first deploy and transfer on first asking. let deploys_to_fetch = vec![deploys[0].clone(), transfers[0].clone()]; @@ -383,77 +389,85 @@ async fn should_fetch_from_multiple_peers() { .expect_fetch_deploys(deploys_to_fetch, deploys_to_not_fetch) .await; + let mut missing = vec![]; for fetch_result in fetch_results { let mut events = fetch_result.await.unwrap(); assert_eq!(1, events.len()); - // The event should be `DeployFound` or `DeployMissing`. + // The event should be `DeployFetched`. let event = events.pop().unwrap(); - // No further effect should be created at this stage as the block still cannot be - // validated and all fetching is enqueued when the initial validation requests are made. + // New fetch requests will be made using a different peer for all deploys not already + // registered as fetched. let effects = block_validator.handle_event(effect_builder, &mut rng, event); - assert!(effects.is_empty()); + if !effects.is_empty() { + assert!(missing.is_empty()); + missing = block_validator + .validation_states + .values() + .next() + .unwrap() + .missing_hashes(); + } + fetch_effects.extend(effects); } - // Handle the second set of six fetch requests now. - let fetch_results = fetch_effects - .pop_front() - .unwrap() - .into_iter() - .map(tokio::spawn) - .collect_vec(); + // Handle the second set of fetch requests now. + let fetch_results = fetch_effects.drain(..).map(tokio::spawn).collect_vec(); - // Provide the first and second deploys and transfers on second asking. - let deploys_to_fetch = vec![ - deploys[0].clone(), - deploys[1].clone(), - transfers[0].clone(), - transfers[1].clone(), - ]; + // Provide the first and second deploys and transfers which haven't already been fetched on + // second asking. + let deploys_to_fetch = vec![&deploys[0], &deploys[1], &transfers[0], &transfers[1]] + .into_iter() + .filter(|deploy| missing.contains(deploy.hash())) + .cloned() + .collect(); let deploys_to_not_fetch = vec![*deploys[2].hash(), *transfers[2].hash()] .into_iter() + .filter(|deploy_hash| missing.contains(deploy_hash)) .collect(); reactor .expect_fetch_deploys(deploys_to_fetch, deploys_to_not_fetch) .await; + missing.clear(); for fetch_result in fetch_results { let mut events = fetch_result.await.unwrap(); assert_eq!(1, events.len()); - // The event should be `DeployFound` or `DeployMissing`. + // The event should be `DeployFetched`. let event = events.pop().unwrap(); - // No further effect should be created at this stage as the block still cannot be - // validated and all fetching is enqueued when the initial validation requests are made. + // New fetch requests will be made using a different peer for all deploys not already + // registered as fetched. let effects = block_validator.handle_event(effect_builder, &mut rng, event); - assert!(effects.is_empty()); + if !effects.is_empty() { + assert!(missing.is_empty()); + missing = block_validator + .validation_states + .values() + .next() + .unwrap() + .missing_hashes(); + } + fetch_effects.extend(effects); } - // Handle the final set of six fetch requests now. - let fetch_results = fetch_effects - .pop_front() - .unwrap() - .into_iter() - .map(tokio::spawn) - .collect_vec(); + // Handle the final set of fetch requests now. + let fetch_results = fetch_effects.into_iter().map(tokio::spawn).collect_vec(); - // Provide the first and third deploys and transfers on third asking. - let deploys_to_fetch = vec![ - deploys[0].clone(), - deploys[2].clone(), - transfers[0].clone(), - transfers[2].clone(), - ]; - let deploys_to_not_fetch = vec![*deploys[1].hash(), *transfers[1].hash()] - .into_iter() + // Provide all deploys and transfers not already fetched on third asking. + let deploys_to_fetch = deploys + .iter() + .chain(transfers.iter()) + .filter(|deploy| missing.contains(deploy.hash())) + .cloned() .collect(); reactor - .expect_fetch_deploys(deploys_to_fetch, deploys_to_not_fetch) + .expect_fetch_deploys(deploys_to_fetch, HashSet::new()) .await; let mut effects = Effects::new(); for fetch_result in fetch_results { let mut events = fetch_result.await.unwrap(); assert_eq!(1, events.len()); - // The event should be `DeployFound` or `DeployMissing`. + // The event should be `DeployFetched`. let event = events.pop().unwrap(); // Once the block is deemed valid (i.e. when the final missing deploy is successfully // fetched) the effects will be three validation responses. diff --git a/node/src/lib.rs b/node/src/lib.rs index c1478cb532..1307c6ee20 100644 --- a/node/src/lib.rs +++ b/node/src/lib.rs @@ -54,6 +54,7 @@ use signal_hook::{consts::TERM_SIGNALS, flag}; pub(crate) use components::{ block_accumulator::Config as BlockAccumulatorConfig, block_synchronizer::Config as BlockSynchronizerConfig, + block_validator::Config as BlockValidatorConfig, consensus::Config as ConsensusConfig, contract_runtime::Config as ContractRuntimeConfig, deploy_acceptor::Config as DeployAcceptorConfig, diff --git a/node/src/reactor/main_reactor.rs b/node/src/reactor/main_reactor.rs index d8c03a138a..c07f2aaebd 100644 --- a/node/src/reactor/main_reactor.rs +++ b/node/src/reactor/main_reactor.rs @@ -1142,7 +1142,7 @@ impl reactor::Reactor for MainReactor { validator_matrix.clone(), registry, )?; - let block_validator = BlockValidator::new(Arc::clone(&chainspec)); + let block_validator = BlockValidator::new(Arc::clone(&chainspec), config.block_validator); let upgrade_watcher = UpgradeWatcher::new(chainspec.as_ref(), config.upgrade_watcher, &root_dir)?; let deploy_acceptor = diff --git a/node/src/reactor/main_reactor/config.rs b/node/src/reactor/main_reactor/config.rs index 584571e298..f6d2f91c6c 100644 --- a/node/src/reactor/main_reactor/config.rs +++ b/node/src/reactor/main_reactor/config.rs @@ -5,10 +5,10 @@ use tracing::error; use crate::{ logging::LoggingConfig, types::{Chainspec, NodeConfig}, - BlockAccumulatorConfig, BlockSynchronizerConfig, ConsensusConfig, ContractRuntimeConfig, - DeployAcceptorConfig, DeployBufferConfig, DiagnosticsPortConfig, EventStreamServerConfig, - FetcherConfig, GossipConfig, NetworkConfig, RestServerConfig, RpcServerConfig, - SpeculativeExecConfig, StorageConfig, UpgradeWatcherConfig, + BlockAccumulatorConfig, BlockSynchronizerConfig, BlockValidatorConfig, ConsensusConfig, + ContractRuntimeConfig, DeployAcceptorConfig, DeployBufferConfig, DiagnosticsPortConfig, + EventStreamServerConfig, FetcherConfig, GossipConfig, NetworkConfig, RestServerConfig, + RpcServerConfig, SpeculativeExecConfig, StorageConfig, UpgradeWatcherConfig, }; /// Root configuration. @@ -50,6 +50,8 @@ pub struct Config { pub block_accumulator: BlockAccumulatorConfig, /// Config values for the block synchronizer. pub block_synchronizer: BlockSynchronizerConfig, + /// Config values for the block validator. + pub block_validator: BlockValidatorConfig, /// Config values for the upgrade watcher. pub upgrade_watcher: UpgradeWatcherConfig, } diff --git a/node/src/types/appendable_block.rs b/node/src/types/appendable_block.rs index ca0944d500..72da119c5b 100644 --- a/node/src/types/appendable_block.rs +++ b/node/src/types/appendable_block.rs @@ -36,7 +36,7 @@ pub(crate) enum AddError { } /// A block that is still being added to. It keeps track of and enforces block limits. -#[derive(Clone, DataSize, Debug)] +#[derive(Clone, Eq, PartialEq, DataSize, Debug)] pub(crate) struct AppendableBlock { deploy_config: DeployConfig, deploys: Vec, diff --git a/resources/local/config.toml b/resources/local/config.toml index d63b52b594..c84aae0df2 100644 --- a/resources/local/config.toml +++ b/resources/local/config.toml @@ -501,6 +501,18 @@ disconnect_dishonest_peers_interval = '10 seconds' latch_reset_interval = '5 seconds' +# ============================================= +# Configuration options for the block validator +# ============================================= +[block_validator] + +# Maximum number of completed entries to retain. +# +# A higher value can avoid creating needless validation work on an already-validated proposed +# block, but comes at the cost of increased memory consumption. +max_completed_entries = 3 + + # ================================== # Configuration options for fetchers # ================================== diff --git a/resources/production/config-example.toml b/resources/production/config-example.toml index 093471b365..04c8ec9630 100644 --- a/resources/production/config-example.toml +++ b/resources/production/config-example.toml @@ -501,6 +501,18 @@ disconnect_dishonest_peers_interval = '10 seconds' latch_reset_interval = '5 seconds' +# ============================================= +# Configuration options for the block validator +# ============================================= +[block_validator] + +# Maximum number of completed entries to retain. +# +# A higher value can avoid creating needless validation work on an already-validated proposed +# block, but comes at the cost of increased memory consumption. +max_completed_entries = 3 + + # ================================== # Configuration options for fetchers # ================================== From 3f8449135dbf7c554aac2c8671e9f4674dfa52a9 Mon Sep 17 00:00:00 2001 From: sacherjj <321623+sacherjj@users.noreply.github.com> Date: Wed, 27 Sep 2023 10:36:09 -0400 Subject: [PATCH 31/41] Bump to 1.5.3 --- Cargo.lock | 3 +- Cargo.toml | 2 +- ci/nctl_compile.sh | 2 +- node/Cargo.toml | 2 +- node/src/components/rpc_server/rpcs/docs.rs | 2 +- node/src/lib.rs | 2 +- resources/production/chainspec.toml | 4 +- resources/test/rpc_schema_hashing.json | 38 +++++++++---------- smart_contracts/contract_as/package-lock.json | 2 +- smart_contracts/contract_as/package.json | 2 +- 10 files changed, 29 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 14317b9fd0..3c2ba9ff61 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -547,7 +547,7 @@ dependencies = [ [[package]] name = "casper-node" -version = "1.5.2" +version = "1.5.3" dependencies = [ "ansi_term", "anyhow", @@ -3163,7 +3163,6 @@ dependencies = [ [[package]] name = "parity-wasm" version = "0.45.0" -source = "git+https://github.com/casper-network/casper-wasm.git?branch=casper-0.45.0#49752a84f34d2f8748133cdd95e3064d1158b0af" [[package]] name = "parking_lot" diff --git a/Cargo.toml b/Cargo.toml index ec6b18c2dd..666ffdc756 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,4 +43,4 @@ codegen-units = 1 lto = true [patch.crates-io] -parity-wasm = { git = "https://github.com/casper-network/casper-wasm.git", branch = "casper-0.45.0" } +parity-wasm = { path = "../casper-wasm" } diff --git a/ci/nctl_compile.sh b/ci/nctl_compile.sh index ec2295549f..501b1337f4 100755 --- a/ci/nctl_compile.sh +++ b/ci/nctl_compile.sh @@ -37,4 +37,4 @@ done # NCTL Build nctl-compile -cachepot --show-stats +#cachepot --show-stats diff --git a/node/Cargo.toml b/node/Cargo.toml index c9ae5e641a..fd096e2236 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "casper-node" -version = "1.5.2" # when updating, also update 'html_root_url' in lib.rs +version = "1.5.3" # when updating, also update 'html_root_url' in lib.rs authors = ["Marc Brinkmann ", "Fraser Hutchison "] edition = "2018" description = "The Casper blockchain node" diff --git a/node/src/components/rpc_server/rpcs/docs.rs b/node/src/components/rpc_server/rpcs/docs.rs index 97820a2883..efd3791201 100644 --- a/node/src/components/rpc_server/rpcs/docs.rs +++ b/node/src/components/rpc_server/rpcs/docs.rs @@ -30,7 +30,7 @@ use super::{ use crate::effect::EffectBuilder; pub(crate) const DOCS_EXAMPLE_PROTOCOL_VERSION: ProtocolVersion = - ProtocolVersion::from_parts(1, 5, 2); + ProtocolVersion::from_parts(1, 5, 3); const DEFINITIONS_PATH: &str = "#/components/schemas/"; diff --git a/node/src/lib.rs b/node/src/lib.rs index 1307c6ee20..3937847641 100644 --- a/node/src/lib.rs +++ b/node/src/lib.rs @@ -8,7 +8,7 @@ //! While the [`main`](fn.main.html) function is the central entrypoint for the node application, //! its core event loop is found inside the [reactor](reactor/index.html). -#![doc(html_root_url = "https://docs.rs/casper-node/1.5.2")] +#![doc(html_root_url = "https://docs.rs/casper-node/1.5.3")] #![doc( html_favicon_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Favicon_RGB_50px.png", html_logo_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Symbol_RGB.png", diff --git a/resources/production/chainspec.toml b/resources/production/chainspec.toml index 5847af59a8..546af3e357 100644 --- a/resources/production/chainspec.toml +++ b/resources/production/chainspec.toml @@ -1,6 +1,6 @@ [protocol] # Protocol version. -version = '1.5.2' +version = '1.5.3' # Whether we need to clear latest blocks back to the switch block just before the activation point or not. hard_reset = true # This protocol version becomes active at this point. @@ -11,7 +11,7 @@ hard_reset = true # in contract-runtime for computing genesis post-state hash. # # If it is an integer, it represents an era ID, meaning the protocol version becomes active at the start of this era. -activation_point = 9100 +activation_point = 11000 [network] # Human readable name for convenience; the genesis_hash is the true identifier. The name influences the genesis hash by diff --git a/resources/test/rpc_schema_hashing.json b/resources/test/rpc_schema_hashing.json index 99dea9ea0f..0574e853fc 100644 --- a/resources/test/rpc_schema_hashing.json +++ b/resources/test/rpc_schema_hashing.json @@ -1,7 +1,7 @@ { "openrpc": "1.0.0-rc1", "info": { - "version": "1.5.2", + "version": "1.5.3", "title": "Client API of Casper Node", "description": "This describes the JSON-RPC 2.0 API of a node on the Casper network.", "contact": { @@ -116,7 +116,7 @@ "result": { "name": "account_put_deploy_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "deploy_hash": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa" } } @@ -201,7 +201,7 @@ "result": { "name": "info_get_deploy_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "deploy": { "hash": "5c9b3b099c1378aa8e4a5f07f59ff1fcdc69a83179427c7e67ae0377d94d93fa", "header": { @@ -368,7 +368,7 @@ "result": { "name": "state_get_account_info_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "account": { "account_hash": "account-hash-e94daaff79c2ab8d9c31d9c3058d7d0a0dd31204a5638dc1451fa67b2e3fb88c", "named_keys": [], @@ -464,7 +464,7 @@ "result": { "name": "state_get_dictionary_item_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "dictionary_key": "dictionary-67518854aa916c97d4e53df8570c8217ccc259da2721b692102d76acd0ee8d1f", "stored_value": { "CLValue": { @@ -572,7 +572,7 @@ "result": { "name": "query_global_state_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "block_header": { "parent_hash": "0707070707070707070707070707070707070707070707070707070707070707", "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808", @@ -706,7 +706,7 @@ "result": { "name": "query_balance_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "balance": "123456" } } @@ -746,7 +746,7 @@ "result": { "name": "info_get_peers_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "peers": [ { "node_id": "tls:0101..0101", @@ -881,7 +881,7 @@ "address": "127.0.0.1:54321" } ], - "api_version": "1.5.2", + "api_version": "1.5.3", "build_version": "1.0.0-xxxxxxxxx@DEBUG", "chainspec_name": "casper-example", "starting_state_root_hash": "0000000000000000000000000000000000000000000000000000000000000000", @@ -959,7 +959,7 @@ "result": { "name": "info_get_validator_changes_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "changes": [ { "public_key": "01d9bf2148748a85c89da5aad8ee0b0fc2d105fd39d41a4c796536354f0ae2900c", @@ -1008,7 +1008,7 @@ "result": { "name": "info_get_chainspec_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "chainspec_bytes": { "chainspec_bytes": "2a2a", "maybe_genesis_accounts_bytes": null, @@ -1074,7 +1074,7 @@ "result": { "name": "chain_get_block_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "block": { "hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", "header": { @@ -1202,7 +1202,7 @@ "result": { "name": "chain_get_block_transfers_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "block_hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", "transfers": [ { @@ -1276,7 +1276,7 @@ "result": { "name": "chain_get_state_root_hash_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "state_root_hash": "0808080808080808080808080808080808080808080808080808080808080808" } } @@ -1365,7 +1365,7 @@ "result": { "name": "state_get_item_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "stored_value": { "CLValue": { "cl_type": "U64", @@ -1443,7 +1443,7 @@ "result": { "name": "state_get_balance_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "balance_value": "123456", "merkle_proof": "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41dd035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce94500226a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d000003000000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c1bcbcee522649d2b135fe510fe3" } @@ -1506,7 +1506,7 @@ "result": { "name": "chain_get_era_info_by_switch_block_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "era_summary": { "block_hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", "era_id": 42, @@ -1586,7 +1586,7 @@ "result": { "name": "state_get_auction_info_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "auction_state": { "state_root_hash": "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", "block_height": 10, @@ -1668,7 +1668,7 @@ "result": { "name": "chain_get_era_summary_example_result", "value": { - "api_version": "1.5.2", + "api_version": "1.5.3", "era_summary": { "block_hash": "13c2d7a68ecdd4b74bf4393c88915c836c863fc4bf11d7f2bd930a1bbccacdcb", "era_id": 42, diff --git a/smart_contracts/contract_as/package-lock.json b/smart_contracts/contract_as/package-lock.json index f929cd63c2..01e88861f5 100644 --- a/smart_contracts/contract_as/package-lock.json +++ b/smart_contracts/contract_as/package-lock.json @@ -1,6 +1,6 @@ { "name": "casper-contract", - "version": "1.5.2", + "version": "1.5.3", "lockfileVersion": 2, "requires": true, "packages": { diff --git a/smart_contracts/contract_as/package.json b/smart_contracts/contract_as/package.json index f3a7f6ff59..808fba4571 100644 --- a/smart_contracts/contract_as/package.json +++ b/smart_contracts/contract_as/package.json @@ -1,6 +1,6 @@ { "name": "casper-contract", - "version": "1.5.2", + "version": "1.5.3", "description": "Library for developing Casper smart contracts.", "homepage": "https://docs.casperlabs.io/en/latest/dapp-dev-guide/index.html", "repository": { From 218471896df98a2942327022b5412daf0d69d7ec Mon Sep 17 00:00:00 2001 From: sacherjj <321623+sacherjj@users.noreply.github.com> Date: Wed, 27 Sep 2023 10:53:36 -0400 Subject: [PATCH 32/41] Fixing inadvertent inclusions for local build. --- Cargo.toml | 2 +- ci/nctl_compile.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 666ffdc756..ec6b18c2dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,4 +43,4 @@ codegen-units = 1 lto = true [patch.crates-io] -parity-wasm = { path = "../casper-wasm" } +parity-wasm = { git = "https://github.com/casper-network/casper-wasm.git", branch = "casper-0.45.0" } diff --git a/ci/nctl_compile.sh b/ci/nctl_compile.sh index 501b1337f4..ec2295549f 100755 --- a/ci/nctl_compile.sh +++ b/ci/nctl_compile.sh @@ -37,4 +37,4 @@ done # NCTL Build nctl-compile -#cachepot --show-stats +cachepot --show-stats From c6cc3bc1cd8d7061ef857e303e99ad1dacb63e4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Papierski?= Date: Thu, 12 Oct 2023 14:52:29 +0200 Subject: [PATCH 33/41] Remove migration code - emergency fix --- execution_engine/src/core/engine_state/mod.rs | 143 +----- .../src/test/system_contracts/auction/bids.rs | 486 ------------------ 2 files changed, 7 insertions(+), 622 deletions(-) diff --git a/execution_engine/src/core/engine_state/mod.rs b/execution_engine/src/core/engine_state/mod.rs index cdaf5d8d8f..11f5bcfd02 100644 --- a/execution_engine/src/core/engine_state/mod.rs +++ b/execution_engine/src/core/engine_state/mod.rs @@ -23,7 +23,7 @@ pub mod upgrade; use std::{ cell::RefCell, - collections::{btree_map::Entry, BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet}, convert::TryFrom, rc::Rc, }; @@ -40,17 +40,17 @@ use casper_types::{ contracts::NamedKeys, system::{ auction::{ - EraValidators, UnbondingPurse, WithdrawPurse, ARG_ERA_END_TIMESTAMP_MILLIS, - ARG_EVICTED_VALIDATORS, ARG_REWARD_FACTORS, ARG_VALIDATOR_PUBLIC_KEYS, - AUCTION_DELAY_KEY, ERA_ID_KEY, LOCKED_FUNDS_PERIOD_KEY, - SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY, + EraValidators, ARG_ERA_END_TIMESTAMP_MILLIS, ARG_EVICTED_VALIDATORS, + ARG_REWARD_FACTORS, ARG_VALIDATOR_PUBLIC_KEYS, AUCTION_DELAY_KEY, + LOCKED_FUNDS_PERIOD_KEY, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, UNBONDING_DELAY_KEY, + VALIDATOR_SLOTS_KEY, }, handle_payment, mint::{self, ROUND_SEIGNIORAGE_RATE_KEY}, AUCTION, HANDLE_PAYMENT, MINT, STANDARD_PAYMENT, }, - AccessRights, ApiError, BlockTime, CLValue, ContractHash, DeployHash, DeployInfo, EraId, Gas, - Key, KeyTag, Motes, Phase, ProtocolVersion, PublicKey, RuntimeArgs, StoredValue, URef, U512, + AccessRights, ApiError, BlockTime, CLValue, ContractHash, DeployHash, DeployInfo, Gas, Key, + KeyTag, Motes, Phase, ProtocolVersion, PublicKey, RuntimeArgs, StoredValue, URef, U512, }; pub use self::{ @@ -421,89 +421,6 @@ where for (key, value) in upgrade_config.global_state_update() { tracking_copy.borrow_mut().write(*key, value.clone()); } - - // This is a one time data transformation which will be removed - // in a following upgrade. - // TODO: CRef={https://github.com/casper-network/casper-node/issues/2479} - { - let withdraw_keys = tracking_copy - .borrow_mut() - .get_keys(correlation_id, &KeyTag::Withdraw) - .map_err(|_| Error::FailedToGetWithdrawKeys)?; - - let (unbonding_delay, current_era_id) = { - let auction_contract = tracking_copy - .borrow_mut() - .get_contract(correlation_id, *auction_hash)?; - - let unbonding_delay_key = auction_contract.named_keys()[UNBONDING_DELAY_KEY]; - let delay = tracking_copy - .borrow_mut() - .read(correlation_id, &unbonding_delay_key) - .map_err(|error| error.into())? - .ok_or(Error::FailedToRetrieveUnbondingDelay)? - .as_cl_value() - .ok_or_else(|| Error::Bytesrepr("unbonding_delay".to_string()))? - .clone() - .into_t::() - .map_err(execution::Error::from)?; - - let era_id_key = auction_contract.named_keys()[ERA_ID_KEY]; - - let era_id = tracking_copy - .borrow_mut() - .read(correlation_id, &era_id_key) - .map_err(|error| error.into())? - .ok_or(Error::FailedToRetrieveEraId)? - .as_cl_value() - .ok_or_else(|| Error::Bytesrepr("era_id".to_string()))? - .clone() - .into_t::() - .map_err(execution::Error::from)?; - - (delay, era_id) - }; - - for key in withdraw_keys { - // Transform only those withdraw purses that are still to be - // processed in the unbonding queue. - let withdraw_purses = tracking_copy - .borrow_mut() - .read(correlation_id, &key) - .map_err(|_| Error::FailedToGetWithdrawKeys)? - .ok_or(Error::FailedToGetStoredWithdraws)? - .as_withdraw() - .ok_or(Error::FailedToGetWithdrawPurses)? - .to_owned(); - - // Ensure that sufficient balance exists for all unbond purses that are to be - // migrated. - Self::fail_upgrade_if_withdraw_purses_lack_sufficient_balance( - &withdraw_purses, - &tracking_copy, - correlation_id, - )?; - - let unbonding_purses: Vec = withdraw_purses - .into_iter() - .filter_map(|purse| { - if purse.era_of_creation() + unbonding_delay >= current_era_id { - return Some(UnbondingPurse::from(purse)); - } - None - }) - .collect(); - - let unbonding_key = key - .withdraw_to_unbond() - .ok_or_else(|| Error::Bytesrepr("unbond".to_string()))?; - - tracking_copy - .borrow_mut() - .write(unbonding_key, StoredValue::Unbonding(unbonding_purses)); - } - } - // We insert the new unbonding delay once the purses to be paid out have been transformed // based on the previous unbonding delay. if let Some(new_unbonding_delay) = upgrade_config.new_unbonding_delay() { @@ -2269,52 +2186,6 @@ where .map_err(Into::into)?; maybe_proof.ok_or(Error::MissingChecksumRegistry) } - - /// As the name suggests, used to ensure commit_upgrade fails if we lack sufficient balances. - fn fail_upgrade_if_withdraw_purses_lack_sufficient_balance( - withdraw_purses: &[WithdrawPurse], - tracking_copy: &Rc::Reader>>>, - correlation_id: CorrelationId, - ) -> Result<(), Error> { - let mut balances = BTreeMap::new(); - for purse in withdraw_purses.iter() { - match balances.entry(*purse.bonding_purse()) { - Entry::Vacant(entry) => { - entry.insert(*purse.amount()); - } - Entry::Occupied(mut entry) => { - let value = entry.get_mut(); - let new_val = value.checked_add(*purse.amount()).ok_or_else(|| { - Error::Mint("overflowed a u512 during unbond migration".into()) - })?; - *value = new_val; - } - } - } - for (unbond_purse_uref, unbond_amount) in balances { - let key = match tracking_copy - .borrow_mut() - .get_purse_balance_key(correlation_id, unbond_purse_uref.into()) - { - Ok(key) => key, - Err(_) => return Err(Error::Mint("purse balance not found".into())), - }; - let current_balance = tracking_copy - .borrow_mut() - .get_purse_balance(CorrelationId::new(), key)? - .value(); - - if unbond_amount > current_balance { - // If we don't have enough balance to migrate, the only thing we can do - // is to fail the upgrade. - error!(%current_balance, %unbond_purse_uref, %unbond_amount, "commit_upgrade failed during migration - insufficient in purse to unbond"); - return Err(Error::Mint( - "insufficient balance detected while migrating unbond purses".into(), - )); - } - } - Ok(()) - } } fn log_execution_result(preamble: &'static str, result: &ExecutionResult) { diff --git a/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs b/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs index 816cae8853..34b23c5e25 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs @@ -3306,48 +3306,6 @@ fn should_delegate_and_redelegate() { ); } -#[ignore] -#[test] -fn should_upgrade_unbonding_purses_from_rel_1_4_2() { - // The `lmdb_fixture::RELEASE_1_4_2` has a single withdraw key - // present in the unbonding queue at the upgrade point - let (mut builder, lmdb_fixture_state, _temp_dir) = - lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_4_2); - - let previous_protocol_version = lmdb_fixture_state.genesis_protocol_version(); - - let new_protocol_version = ProtocolVersion::from_parts( - previous_protocol_version.value().major, - previous_protocol_version.value().minor + 1, - 0, - ); - - let mut upgrade_request = { - UpgradeRequestBuilder::new() - .with_current_protocol_version(previous_protocol_version) - .with_new_protocol_version(new_protocol_version) - .with_activation_point(EraId::new(1u64)) - .build() - }; - - builder - .upgrade_with_upgrade_request(*builder.get_engine_state().config(), &mut upgrade_request) - .expect_upgrade_success(); - - let unbonding_purses: UnbondingPurses = builder.get_unbonds(); - assert_eq!(unbonding_purses.len(), 1); - - let unbond_list = unbonding_purses - .get(&NON_FOUNDER_VALIDATOR_1_ADDR) - .expect("should have unbonding purse for non founding validator"); - assert_eq!(unbond_list.len(), 1); - assert_eq!( - unbond_list[0].validator_public_key(), - &*NON_FOUNDER_VALIDATOR_1_PK - ); - assert!(unbond_list[0].new_validator().is_none()) -} - #[ignore] #[test] fn should_handle_redelegation_to_inactive_validator() { @@ -3540,450 +3498,6 @@ fn should_handle_redelegation_to_inactive_validator() { ); } -#[ignore] -#[test] -fn should_continue_auction_state_from_release_1_4_x() { - // The `lmdb_fixture::RELEASE_1_4_3` has three withdraw keys - // in the unbonding queue which will each be processed - // in the three eras after the upgrade. - let (mut builder, lmdb_fixture_state, _temp_dir) = - lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_4_3); - - let withdraw_purses: WithdrawPurses = builder.get_withdraw_purses(); - - assert_eq!(withdraw_purses.len(), 1); - - let previous_protocol_version = lmdb_fixture_state.genesis_protocol_version(); - - let new_protocol_version = ProtocolVersion::from_parts( - previous_protocol_version.value().major, - previous_protocol_version.value().minor + 1, - 0, - ); - - let mut upgrade_request = { - UpgradeRequestBuilder::new() - .with_current_protocol_version(previous_protocol_version) - .with_new_protocol_version(new_protocol_version) - .with_activation_point(EraId::new(20u64)) - .with_new_unbonding_delay(DEFAULT_UNBONDING_DELAY) - .build() - }; - - builder - .upgrade_with_upgrade_request(*builder.get_engine_state().config(), &mut upgrade_request) - .expect_upgrade_success(); - - let unbonding_purses: UnbondingPurses = builder.get_unbonds(); - assert_eq!(unbonding_purses.len(), 1); - - let unbond_list = unbonding_purses - .get(&NON_FOUNDER_VALIDATOR_1_ADDR) - .expect("should have unbonding purse for non founding validator"); - assert_eq!(unbond_list.len(), 3); - assert_eq!( - unbond_list[0].validator_public_key(), - &*NON_FOUNDER_VALIDATOR_1_PK - ); - assert!(unbond_list[0].new_validator().is_none()); - assert!(unbond_list[1].new_validator().is_none()); - assert!(unbond_list[2].new_validator().is_none()); - - let delegator_1_undelegate_purse = builder - .get_account(*BID_ACCOUNT_1_ADDR) - .expect("should have account") - .main_purse(); - - let delegator_2_undelegate_purse = builder - .get_account(*BID_ACCOUNT_2_ADDR) - .expect("should have account") - .main_purse(); - - let delegator_3_undelegate_purse = builder - .get_account(*DELEGATOR_1_ADDR) - .expect("should have account") - .main_purse(); - - let delegator_1_purse_balance_pre_step = - builder.get_purse_balance(delegator_1_undelegate_purse); - - let delegator_2_purse_balance_pre_step = - builder.get_purse_balance(delegator_2_undelegate_purse); - - let delegator_3_purse_balance_pre_step = - builder.get_purse_balance(delegator_3_undelegate_purse); - - builder.advance_era( - vec![ - RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), - ], - vec![], - ); - - let delegator_1_purse_balance_post_step = - builder.get_purse_balance(delegator_1_undelegate_purse); - - assert_eq!( - delegator_1_purse_balance_post_step, - delegator_1_purse_balance_pre_step + U512::from(UNDELEGATE_AMOUNT_1) - ); - - let delegator_2_purse_balance_post_step = - builder.get_purse_balance(delegator_2_undelegate_purse); - - assert_eq!( - delegator_2_purse_balance_post_step, - delegator_2_purse_balance_pre_step + U512::from(UNDELEGATE_AMOUNT_1) - ); - - let delegator_3_purse_balance_post_step = - builder.get_purse_balance(delegator_3_undelegate_purse); - - assert_eq!( - delegator_3_purse_balance_post_step, - delegator_3_purse_balance_pre_step + U512::from(UNDELEGATE_AMOUNT_1) - ); - - let delegator_4_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *DELEGATOR_2_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - - builder - .exec(delegator_4_fund_request) - .expect_success() - .commit(); - - let delegator_4_validator_1_delegate_request = ExecuteRequestBuilder::standard( - *DELEGATOR_2_ADDR, - CONTRACT_DELEGATE, - runtime_args! { - ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), - ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), - ARG_DELEGATOR => DELEGATOR_2.clone(), - }, - ) - .build(); - - builder - .exec(delegator_4_validator_1_delegate_request) - .expect_success() - .commit(); - - let delegator_4_redelegate_request = ExecuteRequestBuilder::standard( - *DELEGATOR_2_ADDR, - CONTRACT_REDELEGATE, - runtime_args! { - ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_1 + DEFAULT_MINIMUM_DELEGATION_AMOUNT), - ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), - ARG_DELEGATOR => DELEGATOR_2.clone(), - ARG_NEW_VALIDATOR => GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone() - }, - ) - .build(); - - builder - .exec(delegator_4_redelegate_request) - .expect_success() - .commit(); - - let delegator_4_purse = builder - .get_account(*DELEGATOR_2_ADDR) - .expect("must have account") - .main_purse(); - - let delegator_4_purse_balance_before = builder.get_purse_balance(delegator_4_purse); - - let actual_unbonding_delay = builder.get_unbonding_delay(); - - assert_eq!(actual_unbonding_delay, DEFAULT_UNBONDING_DELAY); - - for _ in 0..=actual_unbonding_delay { - let delegator_4_redelegate_purse_balance = builder.get_purse_balance(delegator_4_purse); - assert_eq!( - delegator_4_redelegate_purse_balance, - delegator_4_purse_balance_before - ); - - builder.advance_era( - vec![ - RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), - ], - vec![], - ); - } - - let delegator_4_purse_balance_after = builder.get_purse_balance(delegator_4_purse); - - // redelegation will not transfer funds back to the user - // therefore the balance must remain the same - assert_eq!( - delegator_4_purse_balance_before, - delegator_4_purse_balance_after - ); - - let bids: Bids = builder.get_bids(); - assert_eq!(bids.len(), 3); - - let delegators = bids[&NON_FOUNDER_VALIDATOR_1_PK].delegators(); - assert_eq!(delegators.len(), 4); - let delegated_amount_1 = *delegators[&DELEGATOR_2].staked_amount(); - assert_eq!( - delegated_amount_1, - U512::from(DELEGATE_AMOUNT_1 - UNDELEGATE_AMOUNT_1 - DEFAULT_MINIMUM_DELEGATION_AMOUNT) - ); - - let delegators = bids[&GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY].delegators(); - assert_eq!(delegators.len(), 1); - let redelegated_amount_1 = *delegators[&DELEGATOR_2].staked_amount(); - assert_eq!( - redelegated_amount_1, - U512::from(UNDELEGATE_AMOUNT_1 + DEFAULT_MINIMUM_DELEGATION_AMOUNT) - ); -} - -#[ignore] -#[test] -fn should_transfer_to_main_purse_when_validator_is_no_longer_active() { - let (mut builder, lmdb_fixture_state, _temp_dir) = - lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_4_3); - - let withdraw_purses: WithdrawPurses = builder.get_withdraw_purses(); - - assert_eq!(withdraw_purses.len(), 1); - - let previous_protocol_version = lmdb_fixture_state.genesis_protocol_version(); - - let new_protocol_version = ProtocolVersion::from_parts( - previous_protocol_version.value().major, - previous_protocol_version.value().minor + 1, - 0, - ); - - let mut upgrade_request = { - UpgradeRequestBuilder::new() - .with_current_protocol_version(previous_protocol_version) - .with_new_protocol_version(new_protocol_version) - .with_activation_point(EraId::new(20u64)) - .build() - }; - - builder - .upgrade_with_upgrade_request(*builder.get_engine_state().config(), &mut upgrade_request) - .expect_upgrade_success(); - - let unbonding_purses: UnbondingPurses = builder.get_unbonds(); - assert_eq!(unbonding_purses.len(), 1); - - let unbond_list = unbonding_purses - .get(&NON_FOUNDER_VALIDATOR_1_ADDR) - .expect("should have unbonding purses for non founding validator"); - assert_eq!(unbond_list.len(), 3); - assert_eq!( - unbond_list[0].validator_public_key(), - &*NON_FOUNDER_VALIDATOR_1_PK - ); - assert!(unbond_list[0].new_validator().is_none()); - assert!(unbond_list[1].new_validator().is_none()); - assert!(unbond_list[2].new_validator().is_none()); - - let delegator_1_undelegate_purse = builder - .get_account(*BID_ACCOUNT_1_ADDR) - .expect("should have account") - .main_purse(); - - let delegator_1_purse_balance_pre_step = - builder.get_purse_balance(delegator_1_undelegate_purse); - - builder.advance_era( - vec![ - RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), - ], - vec![], - ); - - let delegator_1_purse_balance_post_step = - builder.get_purse_balance(delegator_1_undelegate_purse); - - assert_eq!( - delegator_1_purse_balance_post_step, - delegator_1_purse_balance_pre_step + U512::from(UNDELEGATE_AMOUNT_1) - ); - - let delegator_2_undelegate_purse = builder - .get_account(*BID_ACCOUNT_2_ADDR) - .expect("should have account") - .main_purse(); - - let delegator_2_purse_balance_pre_step = - builder.get_purse_balance(delegator_2_undelegate_purse); - - builder.advance_era( - vec![ - RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), - ], - vec![], - ); - - let delegator_2_purse_balance_post_step = - builder.get_purse_balance(delegator_2_undelegate_purse); - - assert_eq!( - delegator_2_purse_balance_post_step, - delegator_2_purse_balance_pre_step + U512::from(UNDELEGATE_AMOUNT_1) - ); - - let delegator_3_undelegate_purse = builder - .get_account(*DELEGATOR_1_ADDR) - .expect("should have account") - .main_purse(); - - let delegator_3_purse_balance_pre_step = - builder.get_purse_balance(delegator_3_undelegate_purse); - - builder.advance_era( - vec![ - RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), - ], - vec![], - ); - - let delegator_3_purse_balance_post_step = - builder.get_purse_balance(delegator_3_undelegate_purse); - - assert_eq!( - delegator_3_purse_balance_post_step, - delegator_3_purse_balance_pre_step + U512::from(UNDELEGATE_AMOUNT_1) - ); - - let delegator_4_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *DELEGATOR_2_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - - builder - .exec(delegator_4_fund_request) - .expect_success() - .commit(); - - let delegator_4_validator_1_delegate_request = ExecuteRequestBuilder::standard( - *DELEGATOR_2_ADDR, - CONTRACT_DELEGATE, - runtime_args! { - ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), - ARG_VALIDATOR => GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), - ARG_DELEGATOR => DELEGATOR_2.clone(), - }, - ) - .build(); - - builder - .exec(delegator_4_validator_1_delegate_request) - .expect_success() - .commit(); - - let delegator_4_redelegate_request = ExecuteRequestBuilder::standard( - *DELEGATOR_2_ADDR, - CONTRACT_REDELEGATE, - runtime_args! { - ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_1 + DEFAULT_MINIMUM_DELEGATION_AMOUNT), - ARG_VALIDATOR => GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), - ARG_DELEGATOR => DELEGATOR_2.clone(), - ARG_NEW_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone() - }, - ) - .build(); - - builder - .exec(delegator_4_redelegate_request) - .expect_success() - .commit(); - - let withdraw_request = ExecuteRequestBuilder::standard( - *NON_FOUNDER_VALIDATOR_1_ADDR, - CONTRACT_WITHDRAW_BID, - runtime_args! { - ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), - ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), - }, - ) - .build(); - - builder.exec(withdraw_request).expect_success().commit(); - - builder.advance_eras_by_default_auction_delay( - vec![ - RewardItem::new(NON_FOUNDER_VALIDATOR_1_PK.clone(), 1), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), - ], - vec![], - ); - - let delegator_4_purse = builder - .get_account(*DELEGATOR_2_ADDR) - .expect("must have account") - .main_purse(); - - let delegator_4_purse_balance_before = builder.get_purse_balance(delegator_4_purse); - - let rewards = vec![ - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY.clone(), 0), - RewardItem::new(GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY.clone(), 0), - ]; - - for _ in 0..(builder.get_unbonding_delay() - builder.get_auction_delay()) { - let delegator_4_redelegate_purse_balance = builder.get_purse_balance(delegator_4_purse); - assert_eq!( - delegator_4_redelegate_purse_balance, - delegator_4_purse_balance_before - ); - - builder.advance_era(rewards.clone(), vec![]); - } - - let delegator_4_purse_balance_after = builder.get_purse_balance(delegator_4_purse); - - let bids: Bids = builder.get_bids(); - - assert!(bids[&NON_FOUNDER_VALIDATOR_1_PK].inactive()); - - // Since we have re-delegated to an inactive validator, - // the funds should cycle back to the delegator. - assert_eq!( - delegator_4_purse_balance_before + UNDELEGATE_AMOUNT_1 + DEFAULT_MINIMUM_DELEGATION_AMOUNT, - delegator_4_purse_balance_after - ); - - let delegators = bids[&GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY].delegators(); - assert_eq!(delegators.len(), 1); - let delegated_amount_1 = *delegators[&DELEGATOR_2].staked_amount(); - assert_eq!( - delegated_amount_1, - U512::from(DELEGATE_AMOUNT_1 - UNDELEGATE_AMOUNT_1 - DEFAULT_MINIMUM_DELEGATION_AMOUNT) - ); -} - #[ignore] #[test] fn should_enforce_minimum_delegation_amount() { From 33bb9494067a071dcf5b41a863f1fff76cc5bc66 Mon Sep 17 00:00:00 2001 From: sacherjj <321623+sacherjj@users.noreply.github.com> Date: Fri, 13 Oct 2023 15:42:54 -0400 Subject: [PATCH 34/41] Moving as to end of publish to allow other steps to complete. --- .drone.yml | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/.drone.yml b/.drone.yml index 3022cfe537..96d1b8d857 100644 --- a/.drone.yml +++ b/.drone.yml @@ -370,22 +370,6 @@ steps: prerelease: - true -- name: as-contract-publish - image: plugins/npm - settings: - username: - from_secret: npm_user - token: - from_secret: npm_token - email: - from_secret: npm_email - folder: - - "smart_contracts/contract_as" - fail_on_version_conflict: - - true - access: - - "public" - - name: nctl-s3-build <<: *buildenv_upload commands: @@ -415,6 +399,22 @@ steps: commands: - "./ci/publish_to_crates_io.sh" +- name: as-contract-publish + image: plugins/npm + settings: + username: + from_secret: npm_user + token: + from_secret: npm_token + email: + from_secret: npm_email + folder: + - "smart_contracts/contract_as" + fail_on_version_conflict: + - true + access: + - "public" + - name: notify image: plugins/slack settings: From 23ea203be77a1a75b54e48c3afb2431e471fa729 Mon Sep 17 00:00:00 2001 From: sacherjj <321623+sacherjj@users.noreply.github.com> Date: Fri, 13 Oct 2023 16:59:24 -0400 Subject: [PATCH 35/41] Bump types to 4.0.0 --- Cargo.lock | 18 ++++-------------- execution_engine/Cargo.toml | 2 +- .../test_support/Cargo.toml | 4 ++-- hashing/Cargo.toml | 2 +- node/Cargo.toml | 2 +- smart_contracts/contract/Cargo.toml | 2 +- types/Cargo.toml | 2 +- types/src/lib.rs | 2 +- 8 files changed, 12 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3c2ba9ff61..4aebc348c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -641,7 +641,7 @@ dependencies = [ [[package]] name = "casper-types" -version = "3.0.0" +version = "4.0.0" dependencies = [ "base16", "base64 0.13.1", @@ -1711,25 +1711,14 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.1" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" dependencies = [ - "errno-dragonfly", "libc", "windows-sys 0.48.0", ] -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "expensive-calculation" version = "0.1.0" @@ -3163,6 +3152,7 @@ dependencies = [ [[package]] name = "parity-wasm" version = "0.45.0" +source = "git+https://github.com/casper-network/casper-wasm.git?branch=casper-0.45.0#49752a84f34d2f8748133cdd95e3064d1158b0af" [[package]] name = "parking_lot" diff --git a/execution_engine/Cargo.toml b/execution_engine/Cargo.toml index 686adc1b4e..2cf7f718b5 100644 --- a/execution_engine/Cargo.toml +++ b/execution_engine/Cargo.toml @@ -15,7 +15,7 @@ anyhow = "1.0.33" base16 = "0.2.1" bincode = "1.3.1" casper-hashing = { version = "2.0.0", path = "../hashing" } -casper-types = { version = "3.0.0", path = "../types", default-features = false, features = ["datasize", "gens", "json-schema"] } +casper-types = { version = "4.0.0", path = "../types", default-features = false, features = ["datasize", "gens", "json-schema"] } casper-wasm-utils = "2.0.0" datasize = "0.2.4" either = "1.8.1" diff --git a/execution_engine_testing/test_support/Cargo.toml b/execution_engine_testing/test_support/Cargo.toml index 75427d4788..a364a7c4c7 100644 --- a/execution_engine_testing/test_support/Cargo.toml +++ b/execution_engine_testing/test_support/Cargo.toml @@ -13,7 +13,7 @@ license = "Apache-2.0" [dependencies] casper-execution-engine = { version = "5.0.0", path = "../../execution_engine", features = ["test-support"] } casper-hashing = { version = "2.0.0", path = "../../hashing" } -casper-types = { version = "3.0.0", path = "../../types" } +casper-types = { version = "4.0.0", path = "../../types" } humantime = "2" filesize = "0.2.0" lmdb-rkv = "0.14" @@ -27,7 +27,7 @@ toml = "0.5.6" tempfile = "3.4.0" [dev-dependencies] -casper-types = { version = "3.0.0", path = "../../types", features = ["std"] } +casper-types = { version = "4.0.0", path = "../../types", features = ["std"] } version-sync = "0.9.3" [features] diff --git a/hashing/Cargo.toml b/hashing/Cargo.toml index d638e50411..7add9a7e49 100644 --- a/hashing/Cargo.toml +++ b/hashing/Cargo.toml @@ -12,7 +12,7 @@ license = "Apache-2.0" [dependencies] blake2 = "0.9.0" base16 = "0.2.1" -casper-types = { version = "3.0.0", path = "../types", features = ["datasize", "std"] } +casper-types = { version = "4.0.0", path = "../types", features = ["datasize", "std"] } datasize = "0.2.9" hex = { version = "0.4.2", default-features = false, features = ["serde"] } hex-buffer-serde = "0.3.0" diff --git a/node/Cargo.toml b/node/Cargo.toml index fd096e2236..faa17e8e32 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -24,7 +24,7 @@ bytes = "1.0.1" casper-execution-engine = { version = "5.0.0", path = "../execution_engine" } casper-hashing = { version = "2.0.0", path = "../hashing" } casper-json-rpc = { version = "1.1.0", path = "../json_rpc" } -casper-types = { version = "3.0.0", path = "../types", features = ["datasize", "json-schema", "std"] } +casper-types = { version = "4.0.0", path = "../types", features = ["datasize", "json-schema", "std"] } datasize = { version = "0.2.11", features = ["detailed", "fake_clock-types", "futures-types", "smallvec-types"] } derive_more = "0.99.7" ed25519-dalek = { version = "1", default-features = false, features = ["rand", "serde", "u64_backend"] } diff --git a/smart_contracts/contract/Cargo.toml b/smart_contracts/contract/Cargo.toml index fc827228d6..2f3b86bc8e 100644 --- a/smart_contracts/contract/Cargo.toml +++ b/smart_contracts/contract/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/CasperLabs/casper-node/tree/master/smart_contra license = "Apache-2.0" [dependencies] -casper-types = { version = "3.0.0", path = "../../types" } +casper-types = { version = "4.0.0", path = "../../types" } hex_fmt = "0.3.0" version-sync = { version = "0.9", optional = true } wee_alloc = { version = "0.4.5", optional = true } diff --git a/types/Cargo.toml b/types/Cargo.toml index 4fde9a9433..41ddbfa8d4 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "casper-types" -version = "3.0.0" # when updating, also update 'html_root_url' in lib.rs +version = "4.0.0" # when updating, also update 'html_root_url' in lib.rs authors = ["Fraser Hutchison "] edition = "2018" description = "Types shared by many casper crates for use on the Casper network." diff --git a/types/src/lib.rs b/types/src/lib.rs index 5fe27b0894..88224f3011 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -10,7 +10,7 @@ )), no_std )] -#![doc(html_root_url = "https://docs.rs/casper-types/3.0.0")] +#![doc(html_root_url = "https://docs.rs/casper-types/4.0.0")] #![doc( html_favicon_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Favicon_RGB_50px.png", html_logo_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Symbol_RGB.png", From b64ef36fd85627966315e178ea5096a8e32c48bc Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Mon, 16 Oct 2023 17:26:24 +0100 Subject: [PATCH 36/41] Revert "Bump types to 4.0.0" This reverts commit 23ea203be77a1a75b54e48c3afb2431e471fa729. --- Cargo.lock | 18 ++++++++++++++---- execution_engine/Cargo.toml | 2 +- .../test_support/Cargo.toml | 4 ++-- hashing/Cargo.toml | 2 +- node/Cargo.toml | 2 +- smart_contracts/contract/Cargo.toml | 2 +- types/Cargo.toml | 2 +- types/src/lib.rs | 2 +- 8 files changed, 22 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4aebc348c4..3c2ba9ff61 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -641,7 +641,7 @@ dependencies = [ [[package]] name = "casper-types" -version = "4.0.0" +version = "3.0.0" dependencies = [ "base16", "base64 0.13.1", @@ -1711,14 +1711,25 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.5" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ + "errno-dragonfly", "libc", "windows-sys 0.48.0", ] +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "expensive-calculation" version = "0.1.0" @@ -3152,7 +3163,6 @@ dependencies = [ [[package]] name = "parity-wasm" version = "0.45.0" -source = "git+https://github.com/casper-network/casper-wasm.git?branch=casper-0.45.0#49752a84f34d2f8748133cdd95e3064d1158b0af" [[package]] name = "parking_lot" diff --git a/execution_engine/Cargo.toml b/execution_engine/Cargo.toml index 2cf7f718b5..686adc1b4e 100644 --- a/execution_engine/Cargo.toml +++ b/execution_engine/Cargo.toml @@ -15,7 +15,7 @@ anyhow = "1.0.33" base16 = "0.2.1" bincode = "1.3.1" casper-hashing = { version = "2.0.0", path = "../hashing" } -casper-types = { version = "4.0.0", path = "../types", default-features = false, features = ["datasize", "gens", "json-schema"] } +casper-types = { version = "3.0.0", path = "../types", default-features = false, features = ["datasize", "gens", "json-schema"] } casper-wasm-utils = "2.0.0" datasize = "0.2.4" either = "1.8.1" diff --git a/execution_engine_testing/test_support/Cargo.toml b/execution_engine_testing/test_support/Cargo.toml index a364a7c4c7..75427d4788 100644 --- a/execution_engine_testing/test_support/Cargo.toml +++ b/execution_engine_testing/test_support/Cargo.toml @@ -13,7 +13,7 @@ license = "Apache-2.0" [dependencies] casper-execution-engine = { version = "5.0.0", path = "../../execution_engine", features = ["test-support"] } casper-hashing = { version = "2.0.0", path = "../../hashing" } -casper-types = { version = "4.0.0", path = "../../types" } +casper-types = { version = "3.0.0", path = "../../types" } humantime = "2" filesize = "0.2.0" lmdb-rkv = "0.14" @@ -27,7 +27,7 @@ toml = "0.5.6" tempfile = "3.4.0" [dev-dependencies] -casper-types = { version = "4.0.0", path = "../../types", features = ["std"] } +casper-types = { version = "3.0.0", path = "../../types", features = ["std"] } version-sync = "0.9.3" [features] diff --git a/hashing/Cargo.toml b/hashing/Cargo.toml index 7add9a7e49..d638e50411 100644 --- a/hashing/Cargo.toml +++ b/hashing/Cargo.toml @@ -12,7 +12,7 @@ license = "Apache-2.0" [dependencies] blake2 = "0.9.0" base16 = "0.2.1" -casper-types = { version = "4.0.0", path = "../types", features = ["datasize", "std"] } +casper-types = { version = "3.0.0", path = "../types", features = ["datasize", "std"] } datasize = "0.2.9" hex = { version = "0.4.2", default-features = false, features = ["serde"] } hex-buffer-serde = "0.3.0" diff --git a/node/Cargo.toml b/node/Cargo.toml index faa17e8e32..fd096e2236 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -24,7 +24,7 @@ bytes = "1.0.1" casper-execution-engine = { version = "5.0.0", path = "../execution_engine" } casper-hashing = { version = "2.0.0", path = "../hashing" } casper-json-rpc = { version = "1.1.0", path = "../json_rpc" } -casper-types = { version = "4.0.0", path = "../types", features = ["datasize", "json-schema", "std"] } +casper-types = { version = "3.0.0", path = "../types", features = ["datasize", "json-schema", "std"] } datasize = { version = "0.2.11", features = ["detailed", "fake_clock-types", "futures-types", "smallvec-types"] } derive_more = "0.99.7" ed25519-dalek = { version = "1", default-features = false, features = ["rand", "serde", "u64_backend"] } diff --git a/smart_contracts/contract/Cargo.toml b/smart_contracts/contract/Cargo.toml index 2f3b86bc8e..fc827228d6 100644 --- a/smart_contracts/contract/Cargo.toml +++ b/smart_contracts/contract/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/CasperLabs/casper-node/tree/master/smart_contra license = "Apache-2.0" [dependencies] -casper-types = { version = "4.0.0", path = "../../types" } +casper-types = { version = "3.0.0", path = "../../types" } hex_fmt = "0.3.0" version-sync = { version = "0.9", optional = true } wee_alloc = { version = "0.4.5", optional = true } diff --git a/types/Cargo.toml b/types/Cargo.toml index 41ddbfa8d4..4fde9a9433 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "casper-types" -version = "4.0.0" # when updating, also update 'html_root_url' in lib.rs +version = "3.0.0" # when updating, also update 'html_root_url' in lib.rs authors = ["Fraser Hutchison "] edition = "2018" description = "Types shared by many casper crates for use on the Casper network." diff --git a/types/src/lib.rs b/types/src/lib.rs index 88224f3011..5fe27b0894 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -10,7 +10,7 @@ )), no_std )] -#![doc(html_root_url = "https://docs.rs/casper-types/4.0.0")] +#![doc(html_root_url = "https://docs.rs/casper-types/3.0.0")] #![doc( html_favicon_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Favicon_RGB_50px.png", html_logo_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Symbol_RGB.png", From 1e6147d4c617be8ae8059b7721b587b702a087d0 Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Mon, 16 Oct 2023 18:27:17 +0100 Subject: [PATCH 37/41] remove public method from ExecutionResult --- node/src/components/storage.rs | 38 ++++++++++++++++++++++++++++--- node/src/types/deploy/metadata.rs | 19 +--------------- types/src/execution_result.rs | 21 ----------------- 3 files changed, 36 insertions(+), 42 deletions(-) diff --git a/node/src/components/storage.rs b/node/src/components/storage.rs index 01b55a9abf..dda111e275 100644 --- a/node/src/components/storage.rs +++ b/node/src/components/storage.rs @@ -71,7 +71,7 @@ use tracing::{debug, error, info, trace, warn}; use casper_hashing::Digest; use casper_types::{ bytesrepr::{FromBytes, ToBytes}, - EraId, ExecutionResult, ProtocolVersion, PublicKey, TimeDiff, Timestamp, Transfer, + EraId, ExecutionResult, ProtocolVersion, PublicKey, TimeDiff, Timestamp, Transfer, Transform, }; use crate::{ @@ -1463,7 +1463,7 @@ impl Storage { ) -> Result { let mut transfers: Vec = vec![]; for (deploy_hash, execution_result) in execution_results { - transfers.extend(execution_result.successful_transfers()); + transfers.extend(successful_transfers(&execution_result)); let mut metadata = self .get_deploy_metadata(txn, &deploy_hash)? @@ -2190,7 +2190,18 @@ impl Storage { .get_deploy_metadata(&mut txn, deploy_hash)? .unwrap_or_default(); - transfers.extend(metadata.successful_transfers(block_hash)); + let successful_xfers = match metadata.execution_results.get(block_hash) { + Some(exec_result) => successful_transfers(exec_result), + None => { + error!( + execution_results = ?metadata.execution_results, + %block_hash, + "should have exec result" + ); + vec![] + } + }; + transfers.extend(successful_xfers); } txn.put_value(self.transfer_db, block_hash, &transfers, true)?; txn.commit()?; @@ -3027,3 +3038,24 @@ fn initialize_deploy_metadata_db( info!("deploy metadata database initialized"); Ok(()) } + +/// Returns all `Transform::WriteTransfer`s from the execution effects if this is an +/// `ExecutionResult::Success`, or an empty `Vec` if `ExecutionResult::Failure`. +pub fn successful_transfers(execution_result: &ExecutionResult) -> Vec { + let effects = match execution_result { + ExecutionResult::Success { effect, .. } => effect, + ExecutionResult::Failure { .. } => return vec![], + }; + + effects + .transforms + .iter() + .filter_map(|transform_entry| { + if let Transform::WriteTransfer(transfer) = transform_entry.transform { + Some(transfer) + } else { + None + } + }) + .collect() +} diff --git a/node/src/types/deploy/metadata.rs b/node/src/types/deploy/metadata.rs index c3333669f9..0d15a2886a 100644 --- a/node/src/types/deploy/metadata.rs +++ b/node/src/types/deploy/metadata.rs @@ -1,9 +1,8 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; -use tracing::error; -use casper_types::{ExecutionResult, Transfer}; +use casper_types::ExecutionResult; use crate::types::{BlockHash, BlockHashAndHeight}; @@ -18,22 +17,6 @@ pub(crate) struct Metadata { pub(crate) execution_results: HashMap, } -impl Metadata { - pub(crate) fn successful_transfers(&self, block_hash: &BlockHash) -> Vec { - match self.execution_results.get(block_hash) { - Some(exec_result) => exec_result.successful_transfers(), - None => { - error!( - execution_results = ?self.execution_results, - %block_hash, - "should have exec result" - ); - vec![] - } - } - } -} - /// Additional information describing a deploy. #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] pub(crate) enum MetadataExt { diff --git a/types/src/execution_result.rs b/types/src/execution_result.rs index 2523684012..cc73d9ec91 100644 --- a/types/src/execution_result.rs +++ b/types/src/execution_result.rs @@ -179,27 +179,6 @@ pub enum ExecutionResult { } impl ExecutionResult { - /// Returns all `Transform::WriteTransfer`s from the execution effects if this is an - /// `ExecutionResult::Success`, or an empty `Vec` if `ExecutionResult::Failure`. - pub fn successful_transfers(&self) -> Vec { - let effects = match self { - ExecutionResult::Success { effect, .. } => effect, - ExecutionResult::Failure { .. } => return vec![], - }; - - effects - .transforms - .iter() - .filter_map(|transform_entry| { - if let Transform::WriteTransfer(transfer) = transform_entry.transform { - Some(transfer) - } else { - None - } - }) - .collect() - } - // This method is not intended to be used by third party crates. #[doc(hidden)] #[cfg(feature = "json-schema")] From 83f4d1c28741bcd885aa6fef53a9077d7ab4599b Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Mon, 16 Oct 2023 18:27:46 +0100 Subject: [PATCH 38/41] fix unused warnings --- .../src/test/system_contracts/auction/bids.rs | 30 +++++++------------ 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs b/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs index 34b23c5e25..3fa47d785b 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs @@ -5,13 +5,13 @@ use num_traits::{One, Zero}; use once_cell::sync::Lazy; use casper_engine_test_support::{ - utils, ExecuteRequestBuilder, InMemoryWasmTestBuilder, StepRequestBuilder, - UpgradeRequestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, - DEFAULT_CHAINSPEC_REGISTRY, DEFAULT_EXEC_CONFIG, DEFAULT_GENESIS_CONFIG_HASH, - DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROTOCOL_VERSION, - DEFAULT_ROUND_SEIGNIORAGE_RATE, DEFAULT_SYSTEM_CONFIG, DEFAULT_UNBONDING_DELAY, - DEFAULT_VALIDATOR_SLOTS, DEFAULT_WASM_CONFIG, MINIMUM_ACCOUNT_CREATION_BALANCE, - PRODUCTION_RUN_GENESIS_REQUEST, SYSTEM_ADDR, TIMESTAMP_MILLIS_INCREMENT, + utils, ExecuteRequestBuilder, InMemoryWasmTestBuilder, StepRequestBuilder, DEFAULT_ACCOUNTS, + DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_CHAINSPEC_REGISTRY, + DEFAULT_EXEC_CONFIG, DEFAULT_GENESIS_CONFIG_HASH, DEFAULT_GENESIS_TIMESTAMP_MILLIS, + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROTOCOL_VERSION, DEFAULT_ROUND_SEIGNIORAGE_RATE, + DEFAULT_SYSTEM_CONFIG, DEFAULT_UNBONDING_DELAY, DEFAULT_VALIDATOR_SLOTS, DEFAULT_WASM_CONFIG, + MINIMUM_ACCOUNT_CREATION_BALANCE, PRODUCTION_RUN_GENESIS_REQUEST, SYSTEM_ADDR, + TIMESTAMP_MILLIS_INCREMENT, }; use casper_execution_engine::{ core::{ @@ -41,14 +41,14 @@ use casper_types::{ self, auction::{ self, Bids, DelegationRate, EraValidators, Error as AuctionError, UnbondingPurses, - ValidatorWeights, WithdrawPurses, ARG_AMOUNT, ARG_DELEGATION_RATE, ARG_DELEGATOR, - ARG_NEW_VALIDATOR, ARG_PUBLIC_KEY, ARG_VALIDATOR, ERA_ID_KEY, INITIAL_ERA_ID, + ValidatorWeights, ARG_AMOUNT, ARG_DELEGATION_RATE, ARG_DELEGATOR, ARG_NEW_VALIDATOR, + ARG_PUBLIC_KEY, ARG_VALIDATOR, ERA_ID_KEY, INITIAL_ERA_ID, }, }, EraId, Motes, ProtocolVersion, PublicKey, RuntimeArgs, SecretKey, U256, U512, }; -use crate::{lmdb_fixture, test::system_contracts::auction::bids::engine_state::ExecConfig}; +use crate::test::system_contracts::auction::bids::engine_state::ExecConfig; const ARG_TARGET: &str = "target"; @@ -107,16 +107,6 @@ static ACCOUNT_2_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*ACCO const ACCOUNT_2_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; const ACCOUNT_2_BOND: u64 = 200_000; -static GENESIS_VALIDATOR_ACCOUNT_1_PUBLIC_KEY: Lazy = Lazy::new(|| { - let secret_key = SecretKey::ed25519_from_bytes([200; SecretKey::ED25519_LENGTH]).unwrap(); - PublicKey::from(&secret_key) -}); - -static GENESIS_VALIDATOR_ACCOUNT_2_PUBLIC_KEY: Lazy = Lazy::new(|| { - let secret_key = SecretKey::ed25519_from_bytes([202; SecretKey::ED25519_LENGTH]).unwrap(); - PublicKey::from(&secret_key) -}); - static BID_ACCOUNT_1_PK: Lazy = Lazy::new(|| { let secret_key = SecretKey::ed25519_from_bytes([204; SecretKey::ED25519_LENGTH]).unwrap(); PublicKey::from(&secret_key) From 6b1da9034399fe3835598fdea390597167faed97 Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Mon, 16 Oct 2023 19:07:43 +0100 Subject: [PATCH 39/41] update warp --- Cargo.lock | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3c2ba9ff61..eccc38bdbe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1199,6 +1199,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "data-encoding" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" + [[package]] name = "datasize" version = "0.2.14" @@ -4712,9 +4718,9 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.18.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54319c93411147bced34cb5609a80e0a8e44c5999c93903a81cd866630ec0bfd" +checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" dependencies = [ "futures-util", "log", @@ -5010,13 +5016,13 @@ checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "tungstenite" -version = "0.18.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" +checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" dependencies = [ - "base64 0.13.1", "byteorder", "bytes", + "data-encoding", "http", "httparse", "log", From b60674836bc9c62c07d011717fbac701bcc042b6 Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Tue, 17 Oct 2023 11:09:10 +0100 Subject: [PATCH 40/41] replace parity crates with casper forks --- Cargo.lock | 92 ++++++++++--------- Cargo.toml | 3 - execution_engine/Cargo.toml | 8 +- .../src/core/engine_state/error.rs | 2 +- execution_engine/src/core/execution/error.rs | 8 +- .../src/core/resolvers/memory_resolver.rs | 2 +- execution_engine/src/core/resolvers/mod.rs | 2 +- .../src/core/resolvers/v1_resolver.rs | 2 +- execution_engine/src/core/runtime/args.rs | 2 +- .../src/core/runtime/externals.rs | 2 +- execution_engine/src/core/runtime/mod.rs | 10 +- execution_engine/src/core/runtime/utils.rs | 6 +- execution_engine/src/shared/opcode_costs.rs | 2 +- execution_engine/src/shared/wasm_prep.rs | 20 ++-- execution_engine/src/storage/error/lmdb.rs | 2 +- execution_engine_testing/tests/Cargo.toml | 2 +- .../tests/src/test/gas_counter.rs | 4 +- .../tests/src/test/regression/ee_1129.rs | 4 +- .../tests/src/test/regression/ee_890.rs | 4 +- .../tests/src/test/regression/ee_966.rs | 4 +- .../test/regression/regression_20210924.rs | 4 +- .../test/regression/regression_20220727.rs | 4 +- .../tests/src/test/regression/test_utils.rs | 6 +- .../tests/src/wasm_utils.rs | 4 +- 24 files changed, 99 insertions(+), 100 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eccc38bdbe..b36b623d00 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -432,6 +432,7 @@ dependencies = [ "casper-execution-engine", "casper-hashing", "casper-types", + "casper-wasm", "clap 2.34.0", "criterion", "dictionary", @@ -444,7 +445,6 @@ dependencies = [ "num-rational", "num-traits", "once_cell", - "parity-wasm", "rand 0.8.5", "regex", "serde", @@ -465,7 +465,9 @@ dependencies = [ "bincode", "casper-hashing", "casper-types", + "casper-wasm", "casper-wasm-utils", + "casper-wasmi", "criterion", "datasize", "either", @@ -484,7 +486,6 @@ dependencies = [ "num-traits", "num_cpus", "once_cell", - "parity-wasm", "proptest", "rand 0.8.5", "rand_chacha 0.3.1", @@ -499,7 +500,6 @@ dependencies = [ "uint", "uuid", "walrus", - "wasmi", ] [[package]] @@ -708,15 +708,54 @@ dependencies = [ "thiserror", ] +[[package]] +name = "casper-wasm" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48f53c4e789fbff66ead0ea44030b1af2fc3c465201973483528e479a9155f98" + [[package]] name = "casper-wasm-utils" -version = "2.0.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b49e4ef1382d48c312809fe8f09d0c7beb434a74f5026c5f12efe384df51ca42" +checksum = "15d9f1a2269d52961812862f67d209ef29742d06b47634e2982a96e80d0fe2b4" dependencies = [ "byteorder", + "casper-wasm", "log", - "parity-wasm", +] + +[[package]] +name = "casper-wasmi" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8357f19a7fd98073d8fe8df60f1bef1e677b7c623c1e6e2e07d2f8e59ceb87fc" +dependencies = [ + "casper-wasm", + "casper-wasmi-core", + "casper-wasmi-validation", +] + +[[package]] +name = "casper-wasmi-core" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60089625560924f184cf91d59b0731373d5114b81224f1201c6a39ccc1d8388c" +dependencies = [ + "downcast-rs", + "libm", + "memory_units", + "num-rational", + "num-traits", +] + +[[package]] +name = "casper-wasmi-validation" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f669d385132ce321a57fdf453588d69c01654e75991bee3d22392a3aaaad80bb" +dependencies = [ + "casper-wasm", ] [[package]] @@ -3166,10 +3205,6 @@ dependencies = [ "casper-types", ] -[[package]] -name = "parity-wasm" -version = "0.45.0" - [[package]] name = "parking_lot" version = "0.11.2" @@ -5297,9 +5332,9 @@ dependencies = [ [[package]] name = "warp" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba431ef570df1287f7f8b07e376491ad54f84d26ac473489427231e1718e1f69" +checksum = "c1e92e22e03ff1230c03a1a8ee37d2f89cd489e2e541b7550d6afad96faed169" dependencies = [ "async-compression", "bytes", @@ -5421,39 +5456,6 @@ dependencies = [ "web-sys", ] -[[package]] -name = "wasmi" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c326c93fbf86419608361a2c925a31754cf109da1b8b55737070b4d6669422" -dependencies = [ - "parity-wasm", - "wasmi-validation", - "wasmi_core", -] - -[[package]] -name = "wasmi-validation" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ff416ad1ff0c42e5a926ed5d5fab74c0f098749aa0ad8b2a34b982ce0e867b" -dependencies = [ - "parity-wasm", -] - -[[package]] -name = "wasmi_core" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d20cb3c59b788653d99541c646c561c9dd26506f25c0cebfe810659c54c6d7" -dependencies = [ - "downcast-rs", - "libm", - "memory_units", - "num-rational", - "num-traits", -] - [[package]] name = "wasmparser" version = "0.77.0" diff --git a/Cargo.toml b/Cargo.toml index ec6b18c2dd..3c4773e543 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,6 +41,3 @@ lto = true [profile.bench] codegen-units = 1 lto = true - -[patch.crates-io] -parity-wasm = { git = "https://github.com/casper-network/casper-wasm.git", branch = "casper-0.45.0" } diff --git a/execution_engine/Cargo.toml b/execution_engine/Cargo.toml index 686adc1b4e..b345cf1780 100644 --- a/execution_engine/Cargo.toml +++ b/execution_engine/Cargo.toml @@ -16,11 +16,13 @@ base16 = "0.2.1" bincode = "1.3.1" casper-hashing = { version = "2.0.0", path = "../hashing" } casper-types = { version = "3.0.0", path = "../types", default-features = false, features = ["datasize", "gens", "json-schema"] } -casper-wasm-utils = "2.0.0" +casper-wasm = { version = "0.46.0", default-features = false } +casper-wasm-utils = "3.0.0" +casper-wasmi = "0.13.2" datasize = "0.2.4" either = "1.8.1" -hex_fmt = "0.3.0" hex-buffer-serde = "0.2.1" +hex_fmt = "0.3.0" hostname = "0.3.0" humantime = "2" itertools = "0.10.0" @@ -34,7 +36,6 @@ num-rational = { version = "0.4.0", features = ["serde"] } num-traits = "0.2.10" num_cpus = "1" once_cell = "1.5.2" -parity-wasm = { version = "0.45.0", default-features = false } proptest = { version = "1.0.0", optional = true } rand = "0.8.3" rand_chacha = "0.3.0" @@ -47,7 +48,6 @@ thiserror = "1.0.18" tracing = "0.1.18" uint = "0.9.0" uuid = { version = "0.8.1", features = ["serde", "v4"] } -wasmi = "0.13.2" [dev-dependencies] assert_matches = "1.3.0" diff --git a/execution_engine/src/core/engine_state/error.rs b/execution_engine/src/core/engine_state/error.rs index ec4ddbeb36..f346e35201 100644 --- a/execution_engine/src/core/engine_state/error.rs +++ b/execution_engine/src/core/engine_state/error.rs @@ -33,7 +33,7 @@ pub enum Error { WasmPreprocessing(#[from] wasm_prep::PreprocessingError), /// WASM serialization error. #[error("Wasm serialization error: {0:?}")] - WasmSerialization(#[from] parity_wasm::SerializationError), + WasmSerialization(#[from] casper_wasm::SerializationError), /// Contract execution error. #[error(transparent)] Exec(execution::Error), diff --git a/execution_engine/src/core/execution/error.rs b/execution_engine/src/core/execution/error.rs index 5cb3af277c..223ce7c7b0 100644 --- a/execution_engine/src/core/execution/error.rs +++ b/execution_engine/src/core/execution/error.rs @@ -1,5 +1,5 @@ //! Execution error and supporting code. -use parity_wasm::elements; +use casper_wasm::elements; use thiserror::Error; use casper_types::{ @@ -197,10 +197,10 @@ impl Error { } } -impl wasmi::HostError for Error {} +impl casper_wasmi::HostError for Error {} -impl From for Error { - fn from(error: wasmi::Error) -> Self { +impl From for Error { + fn from(error: casper_wasmi::Error) -> Self { match error .as_host_error() .and_then(|host_error| host_error.downcast_ref::()) diff --git a/execution_engine/src/core/resolvers/memory_resolver.rs b/execution_engine/src/core/resolvers/memory_resolver.rs index 825c0b883b..9777997cd4 100644 --- a/execution_engine/src/core/resolvers/memory_resolver.rs +++ b/execution_engine/src/core/resolvers/memory_resolver.rs @@ -1,5 +1,5 @@ //! This module contains resolver of a memory section of the WASM code. -use wasmi::MemoryRef; +use casper_wasmi::MemoryRef; use super::error::ResolverError; diff --git a/execution_engine/src/core/resolvers/mod.rs b/execution_engine/src/core/resolvers/mod.rs index 0ccfa58648..c63889bfdd 100644 --- a/execution_engine/src/core/resolvers/mod.rs +++ b/execution_engine/src/core/resolvers/mod.rs @@ -4,7 +4,7 @@ pub mod memory_resolver; pub(crate) mod v1_function_index; mod v1_resolver; -use wasmi::ModuleImportResolver; +use casper_wasmi::ModuleImportResolver; use casper_types::ProtocolVersion; diff --git a/execution_engine/src/core/resolvers/v1_resolver.rs b/execution_engine/src/core/resolvers/v1_resolver.rs index 16619daf88..f6525898c8 100644 --- a/execution_engine/src/core/resolvers/v1_resolver.rs +++ b/execution_engine/src/core/resolvers/v1_resolver.rs @@ -1,6 +1,6 @@ use std::cell::RefCell; -use wasmi::{ +use casper_wasmi::{ memory_units::Pages, Error as InterpreterError, FuncInstance, FuncRef, MemoryDescriptor, MemoryInstance, MemoryRef, ModuleImportResolver, Signature, ValueType, }; diff --git a/execution_engine/src/core/runtime/args.rs b/execution_engine/src/core/runtime/args.rs index 17af96a8c0..5f27d2ceb2 100644 --- a/execution_engine/src/core/runtime/args.rs +++ b/execution_engine/src/core/runtime/args.rs @@ -1,4 +1,4 @@ -use wasmi::{FromValue, RuntimeArgs, Trap}; +use casper_wasmi::{FromValue, RuntimeArgs, Trap}; pub(crate) trait Args where diff --git a/execution_engine/src/core/runtime/externals.rs b/execution_engine/src/core/runtime/externals.rs index 369883fa6a..84790e6252 100644 --- a/execution_engine/src/core/runtime/externals.rs +++ b/execution_engine/src/core/runtime/externals.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeSet, convert::TryFrom}; -use wasmi::{Externals, RuntimeArgs, RuntimeValue, Trap}; +use casper_wasmi::{Externals, RuntimeArgs, RuntimeValue, Trap}; use casper_types::{ account::AccountHash, diff --git a/execution_engine/src/core/runtime/mod.rs b/execution_engine/src/core/runtime/mod.rs index f9c1edc655..dba119b0e3 100644 --- a/execution_engine/src/core/runtime/mod.rs +++ b/execution_engine/src/core/runtime/mod.rs @@ -16,9 +16,9 @@ use std::{ iter::FromIterator, }; -use parity_wasm::elements::Module; +use casper_wasm::elements::Module; +use casper_wasmi::{MemoryRef, Trap, TrapCode}; use tracing::error; -use wasmi::{MemoryRef, Trap, TrapCode}; use casper_types::{ account::{Account, AccountHash, ActionType, Weight}, @@ -205,14 +205,14 @@ where self.try_get_memory()? .with_direct_access(|buffer| { let end = offset.checked_add(size).ok_or_else(|| { - wasmi::Error::Memory(format!( + casper_wasmi::Error::Memory(format!( "trying to access memory block of size {} from offset {}", size, offset )) })?; if end > buffer.len() { - return Err(wasmi::Error::Memory(format!( + return Err(casper_wasmi::Error::Memory(format!( "trying to access region [{}..{}] in memory [0..{}]", offset, end, @@ -1367,7 +1367,7 @@ where None => return Err(Error::KeyNotFound(context_key)), }; - parity_wasm::deserialize_buffer(contract_wasm.bytes())? + casper_wasm::deserialize_buffer(contract_wasm.bytes())? }; let context = self.context.new_from_self( diff --git a/execution_engine/src/core/runtime/utils.rs b/execution_engine/src/core/runtime/utils.rs index ad514e3699..b0e73fe7f6 100644 --- a/execution_engine/src/core/runtime/utils.rs +++ b/execution_engine/src/core/runtime/utils.rs @@ -1,7 +1,7 @@ use std::collections::BTreeMap; -use parity_wasm::elements::Module; -use wasmi::{ImportsBuilder, MemoryRef, ModuleInstance, ModuleRef}; +use casper_wasm::elements::Module; +use casper_wasmi::{ImportsBuilder, MemoryRef, ModuleInstance, ModuleRef}; use casper_types::{ contracts::NamedKeys, AccessRights, CLType, CLValue, Key, ProtocolVersion, PublicKey, @@ -30,7 +30,7 @@ pub(super) fn instance_and_memory( protocol_version: ProtocolVersion, wasm_config: &WasmConfig, ) -> Result<(ModuleRef, MemoryRef), Error> { - let module = wasmi::Module::from_parity_wasm_module(parity_module)?; + let module = casper_wasmi::Module::from_casper_wasm_module(parity_module)?; let resolver = resolvers::create_module_resolver(protocol_version, wasm_config)?; let mut imports = ImportsBuilder::new(); imports.push_resolver("env", &resolver); diff --git a/execution_engine/src/shared/opcode_costs.rs b/execution_engine/src/shared/opcode_costs.rs index 5d9ec9ec44..8da5449b5a 100644 --- a/execution_engine/src/shared/opcode_costs.rs +++ b/execution_engine/src/shared/opcode_costs.rs @@ -1,9 +1,9 @@ //! Support for Wasm opcode costs. use std::{convert::TryInto, num::NonZeroU32}; +use casper_wasm::elements::Instruction; use casper_wasm_utils::rules::{MemoryGrowCost, Rules}; use datasize::DataSize; -use parity_wasm::elements::Instruction; use rand::{distributions::Standard, prelude::*, Rng}; use serde::{Deserialize, Serialize}; diff --git a/execution_engine/src/shared/wasm_prep.rs b/execution_engine/src/shared/wasm_prep.rs index 75063c1abb..2ed98cd4b1 100644 --- a/execution_engine/src/shared/wasm_prep.rs +++ b/execution_engine/src/shared/wasm_prep.rs @@ -1,8 +1,8 @@ //! Preprocessing of Wasm modules. -use casper_wasm_utils::{self, stack_height}; -use parity_wasm::elements::{ +use casper_wasm::elements::{ self, External, Instruction, Internal, MemorySection, Module, Section, TableType, Type, }; +use casper_wasm_utils::{self, stack_height}; use thiserror::Error; use super::wasm_config::WasmConfig; @@ -405,7 +405,7 @@ pub fn preprocess( /// Returns a parity Module from the given bytes without making modifications or checking limits. pub fn deserialize(module_bytes: &[u8]) -> Result { - parity_wasm::deserialize_buffer::(module_bytes).map_err(Into::into) + casper_wasm::deserialize_buffer::(module_bytes).map_err(Into::into) } /// Creates new wasm module from entry points. @@ -431,7 +431,7 @@ pub fn get_module_from_entry_points( Some(missing_name) => Err(execution::Error::FunctionNotFound(missing_name)), None => { casper_wasm_utils::optimize(&mut module, entry_point_names)?; - parity_wasm::serialize(module).map_err(execution::Error::ParityWasm) + casper_wasm::serialize(module).map_err(execution::Error::ParityWasm) } } } @@ -439,7 +439,7 @@ pub fn get_module_from_entry_points( #[cfg(test)] mod tests { use casper_types::contracts::DEFAULT_ENTRY_POINT_NAME; - use parity_wasm::{ + use casper_wasm::{ builder, elements::{CodeSection, Instructions}, }; @@ -484,7 +484,7 @@ mod tests { .memory() .build() .build(); - let module_bytes = parity_wasm::serialize(module).expect("should serialize"); + let module_bytes = casper_wasm::serialize(module).expect("should serialize"); let error = preprocess(WasmConfig::default(), &module_bytes) .expect_err("should fail with an error"); assert!( @@ -523,7 +523,7 @@ mod tests { .memory() .build() .build(); - let module_bytes = parity_wasm::serialize(module).expect("should serialize"); + let module_bytes = casper_wasm::serialize(module).expect("should serialize"); let error = preprocess(WasmConfig::default(), &module_bytes) .expect_err("should fail with an error"); assert!( @@ -559,7 +559,7 @@ mod tests { .memory() .build() .build(); - let module_bytes = parity_wasm::serialize(module).expect("should serialize"); + let module_bytes = casper_wasm::serialize(module).expect("should serialize"); let error = preprocess(WasmConfig::default(), &module_bytes) .expect_err("should fail with an error"); assert!( @@ -580,7 +580,7 @@ mod tests { .memory() .build() .build(); - let module_bytes = parity_wasm::serialize(module).expect("should serialize"); + let module_bytes = casper_wasm::serialize(module).expect("should serialize"); let error = preprocess(WasmConfig::default(), &module_bytes) .expect_err("should fail with an error"); @@ -603,7 +603,7 @@ mod tests { .memory() .build() .build(); - let module_bytes = parity_wasm::serialize(module).expect("should serialize"); + let module_bytes = casper_wasm::serialize(module).expect("should serialize"); let error = preprocess(WasmConfig::default(), &module_bytes) .expect_err("should fail with an error"); assert!( diff --git a/execution_engine/src/storage/error/lmdb.rs b/execution_engine/src/storage/error/lmdb.rs index 76966ad03c..f3eb8828ec 100644 --- a/execution_engine/src/storage/error/lmdb.rs +++ b/execution_engine/src/storage/error/lmdb.rs @@ -28,7 +28,7 @@ pub enum Error { CommitError(#[from] CommitError), } -impl wasmi::HostError for Error {} +impl casper_wasmi::HostError for Error {} impl From for Error { fn from(error: bytesrepr::Error) -> Self { diff --git a/execution_engine_testing/tests/Cargo.toml b/execution_engine_testing/tests/Cargo.toml index e9aee94e1f..66154dfdd6 100644 --- a/execution_engine_testing/tests/Cargo.toml +++ b/execution_engine_testing/tests/Cargo.toml @@ -10,10 +10,10 @@ casper-engine-test-support = { path = "../test_support", features = ["test-suppo casper-execution-engine = { path = "../../execution_engine", features = ["test-support"] } casper-hashing = { path = "../../hashing" } casper-types = { path = "../../types", features = ["datasize", "json-schema"] } +casper-wasm = "0.46.0" clap = "2" fs_extra = "1.2.0" log = "0.4.8" -parity-wasm = "0.45.0" rand = "0.8.3" serde = "1" serde_json = "1" diff --git a/execution_engine_testing/tests/src/test/gas_counter.rs b/execution_engine_testing/tests/src/test/gas_counter.rs index facb0c5720..a9fe63671f 100644 --- a/execution_engine_testing/tests/src/test/gas_counter.rs +++ b/execution_engine_testing/tests/src/test/gas_counter.rs @@ -1,5 +1,5 @@ use assert_matches::assert_matches; -use parity_wasm::{ +use casper_wasm::{ builder, elements::{BlockType, Instruction, Instructions}, }; @@ -33,7 +33,7 @@ fn make_session_code_with(instructions: Vec) -> Vec { .memory() .build() .build(); - parity_wasm::serialize(module).expect("should serialize") + casper_wasm::serialize(module).expect("should serialize") } #[ignore] diff --git a/execution_engine_testing/tests/src/test/regression/ee_1129.rs b/execution_engine_testing/tests/src/test/regression/ee_1129.rs index 6ec90ed1a7..94ab42bdcd 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_1129.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_1129.rs @@ -1,6 +1,6 @@ +use casper_wasm::builder; use num_traits::Zero; use once_cell::sync::Lazy; -use parity_wasm::builder; use casper_engine_test_support::{ utils, DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_ACCOUNTS, @@ -438,7 +438,7 @@ fn do_nothing_without_memory() -> Vec { .field(DEFAULT_ENTRY_POINT_NAME) .build() .build(); - parity_wasm::serialize(module).expect("should serialize") + casper_wasm::serialize(module).expect("should serialize") } #[ignore] diff --git a/execution_engine_testing/tests/src/test/regression/ee_890.rs b/execution_engine_testing/tests/src/test/regression/ee_890.rs index 6a6832d13d..b8d0f1b48c 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_890.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_890.rs @@ -1,4 +1,4 @@ -use parity_wasm::{self, builder}; +use casper_wasm::{self, builder}; use casper_engine_test_support::{ DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, ARG_AMOUNT, @@ -32,7 +32,7 @@ fn make_do_nothing_with_start() -> Vec { .build() .build(); - parity_wasm::serialize(module).expect("should serialize") + casper_wasm::serialize(module).expect("should serialize") } #[ignore] diff --git a/execution_engine_testing/tests/src/test/regression/ee_966.rs b/execution_engine_testing/tests/src/test/regression/ee_966.rs index acbe26d3c3..f7b73c2f05 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_966.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_966.rs @@ -1,6 +1,6 @@ use assert_matches::assert_matches; +use casper_wasm::builder; use once_cell::sync::Lazy; -use parity_wasm::builder; use casper_engine_test_support::{ DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, UpgradeRequestBuilder, @@ -73,7 +73,7 @@ fn make_session_code_with_memory_pages(initial_pages: u32, max_pages: Option) -> ExecuteRequest { diff --git a/execution_engine_testing/tests/src/test/regression/regression_20210924.rs b/execution_engine_testing/tests/src/test/regression/regression_20210924.rs index 5e3d8c16e0..37063dee95 100644 --- a/execution_engine_testing/tests/src/test/regression/regression_20210924.rs +++ b/execution_engine_testing/tests/src/test/regression/regression_20210924.rs @@ -9,7 +9,7 @@ use casper_execution_engine::{ shared::opcode_costs::DEFAULT_NOP_COST, }; use casper_types::{contracts::DEFAULT_ENTRY_POINT_NAME, runtime_args, Gas, RuntimeArgs, U512}; -use parity_wasm::{ +use casper_wasm::{ builder, elements::{Instruction, Instructions}, }; @@ -37,7 +37,7 @@ pub fn do_minimum_bytes() -> Vec { .memory() .build() .build(); - parity_wasm::serialize(module).expect("should serialize") + casper_wasm::serialize(module).expect("should serialize") } #[ignore] diff --git a/execution_engine_testing/tests/src/test/regression/regression_20220727.rs b/execution_engine_testing/tests/src/test/regression/regression_20220727.rs index f352639091..52d23db681 100644 --- a/execution_engine_testing/tests/src/test/regression/regression_20220727.rs +++ b/execution_engine_testing/tests/src/test/regression/regression_20220727.rs @@ -1,6 +1,6 @@ use std::fmt::Write; -use parity_wasm::{ +use casper_wasm::{ builder, elements::{Instruction, Instructions}, }; @@ -275,7 +275,7 @@ fn should_not_allow_more_than_one_table() { .memory() .build() .build(); - let module_bytes = parity_wasm::serialize(module).expect("should serialize"); + let module_bytes = casper_wasm::serialize(module).expect("should serialize"); let exec_request = ExecuteRequestBuilder::module_bytes( *DEFAULT_ACCOUNT_ADDR, diff --git a/execution_engine_testing/tests/src/test/regression/test_utils.rs b/execution_engine_testing/tests/src/test/regression/test_utils.rs index 0a8ade8c62..f398641e16 100644 --- a/execution_engine_testing/tests/src/test/regression/test_utils.rs +++ b/execution_engine_testing/tests/src/test/regression/test_utils.rs @@ -1,5 +1,5 @@ use casper_types::contracts::DEFAULT_ENTRY_POINT_NAME; -use parity_wasm::{ +use casper_wasm::{ builder, elements::{Instruction, Instructions}, }; @@ -38,7 +38,7 @@ pub(crate) fn make_gas_counter_overflow() -> Vec { .memory() .build() .build(); - parity_wasm::serialize(module).expect("should serialize") + casper_wasm::serialize(module).expect("should serialize") } /// Prepare malicious payload in a form of a wasm module without memory section. @@ -67,7 +67,7 @@ pub(crate) fn make_module_without_memory_section() -> Vec { .field(DEFAULT_ENTRY_POINT_NAME) .build() .build(); - parity_wasm::serialize(module).expect("should serialize") + casper_wasm::serialize(module).expect("should serialize") } /// Prepare malicious payload in a form of a wasm module with forbidden start section. diff --git a/execution_engine_testing/tests/src/wasm_utils.rs b/execution_engine_testing/tests/src/wasm_utils.rs index d8c30ff298..8d810e3cba 100644 --- a/execution_engine_testing/tests/src/wasm_utils.rs +++ b/execution_engine_testing/tests/src/wasm_utils.rs @@ -1,7 +1,7 @@ //! Wasm helpers. use std::fmt::Write; -use parity_wasm::builder; +use casper_wasm::builder; use casper_types::contracts::DEFAULT_ENTRY_POINT_NAME; @@ -23,7 +23,7 @@ pub fn do_nothing_bytes() -> Vec { .memory() .build() .build(); - parity_wasm::serialize(module).expect("should serialize") + casper_wasm::serialize(module).expect("should serialize") } /// Creates minimal session code that contains a function with arbitrary number of parameters. From ced9145b683153fe5fccdbc8addd17a1df1a14d0 Mon Sep 17 00:00:00 2001 From: Fraser Hutchison Date: Wed, 18 Oct 2023 17:06:43 +0100 Subject: [PATCH 41/41] version bumps --- Cargo.lock | 148 ++++++++---------- execution_engine/CHANGELOG.md | 11 +- execution_engine/Cargo.toml | 2 +- execution_engine/src/lib.rs | 2 +- .../test_support/CHANGELOG.md | 8 + .../test_support/Cargo.toml | 4 +- .../test_support/src/lib.rs | 2 +- node/CHANGELOG.md | 2 +- node/Cargo.toml | 2 +- 9 files changed, 86 insertions(+), 95 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b36b623d00..8179637e5f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -375,9 +375,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.1" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b1ce199063694f33ffb7dd4e0ee620741495c32833cde5aa08f02a0bf96f0c8" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "byteorder" @@ -403,7 +403,7 @@ dependencies = [ [[package]] name = "casper-engine-test-support" -version = "5.0.0" +version = "6.0.0" dependencies = [ "casper-execution-engine", "casper-hashing", @@ -457,7 +457,7 @@ dependencies = [ [[package]] name = "casper-execution-engine" -version = "5.0.0" +version = "6.0.0" dependencies = [ "anyhow", "assert_matches", @@ -1756,25 +1756,14 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.1" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" dependencies = [ - "errno-dragonfly", "libc", "windows-sys 0.48.0", ] -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "expensive-calculation" version = "0.1.0" @@ -2323,9 +2312,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] name = "hex" @@ -2527,7 +2516,7 @@ version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" dependencies = [ - "hermit-abi 0.3.1", + "hermit-abi 0.3.3", "libc", "windows-sys 0.48.0", ] @@ -2553,7 +2542,7 @@ version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ - "hermit-abi 0.3.1", + "hermit-abi 0.3.3", "io-lifetimes", "rustix", "windows-sys 0.48.0", @@ -2594,9 +2583,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -4100,11 +4089,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -4146,9 +4135,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ "bitflags", "core-foundation", @@ -4159,9 +4148,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", @@ -5370,9 +5359,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -5380,24 +5369,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2 1.0.56", "quote 1.0.26", - "syn 1.0.109", + "syn 2.0.15", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.34" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -5407,9 +5396,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote 1.0.26", "wasm-bindgen-macro-support", @@ -5417,22 +5406,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2 1.0.56", "quote 1.0.26", - "syn 1.0.109", + "syn 2.0.15", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.84" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-encoder" @@ -5485,9 +5474,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.61" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -5529,9 +5518,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" dependencies = [ "winapi", ] @@ -5542,21 +5531,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - [[package]] name = "windows-sys" version = "0.45.0" @@ -5572,7 +5546,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", + "windows-targets 0.48.5", ] [[package]] @@ -5592,17 +5566,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm 0.48.0", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", - "windows_x86_64_msvc 0.48.0", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] @@ -5613,9 +5587,9 @@ checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" @@ -5625,9 +5599,9 @@ checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" @@ -5637,9 +5611,9 @@ checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" @@ -5649,9 +5623,9 @@ checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" @@ -5661,9 +5635,9 @@ checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" @@ -5673,9 +5647,9 @@ checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" @@ -5685,9 +5659,9 @@ checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winreg" diff --git a/execution_engine/CHANGELOG.md b/execution_engine/CHANGELOG.md index fb2b720679..df2c209c75 100644 --- a/execution_engine/CHANGELOG.md +++ b/execution_engine/CHANGELOG.md @@ -10,10 +10,19 @@ All notable changes to this project will be documented in this file. The format [comment]: <> (Security: in case of vulnerabilities) -## [Unreleased] + +## 6.0.0 ### Changed * Default value for `max_stack_height` is increased to 500. +* Replaced usage of `parity-wasm` and `wasmi` with Casper forks `casper-wasm` and `casper-wasmi` respectively. + +### Fixed +* Fix incorrect handling of unbonding purses for validators that were also evicted in that era. +* Fix issue with one-time code used for migrating data to support redelegations. + +### Security +* Fix unbounded memory allocation issue while parsing Wasm. diff --git a/execution_engine/Cargo.toml b/execution_engine/Cargo.toml index b345cf1780..8fe42e7a4e 100644 --- a/execution_engine/Cargo.toml +++ b/execution_engine/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "casper-execution-engine" -version = "5.0.0" # when updating, also update 'html_root_url' in lib.rs +version = "6.0.0" # when updating, also update 'html_root_url' in lib.rs authors = ["Henry Till ", "Ed Hastings "] edition = "2018" description = "CasperLabs execution engine crates." diff --git a/execution_engine/src/lib.rs b/execution_engine/src/lib.rs index 572c2e65ea..f99a2ffc6d 100644 --- a/execution_engine/src/lib.rs +++ b/execution_engine/src/lib.rs @@ -1,6 +1,6 @@ //! The engine which executes smart contracts on the Casper network. -#![doc(html_root_url = "https://docs.rs/casper-execution-engine/5.0.0")] +#![doc(html_root_url = "https://docs.rs/casper-execution-engine/6.0.0")] #![doc( html_favicon_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Favicon_RGB_50px.png", html_logo_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Symbol_RGB.png", diff --git a/execution_engine_testing/test_support/CHANGELOG.md b/execution_engine_testing/test_support/CHANGELOG.md index 0f6f56cb6c..802066f032 100644 --- a/execution_engine_testing/test_support/CHANGELOG.md +++ b/execution_engine_testing/test_support/CHANGELOG.md @@ -11,6 +11,14 @@ All notable changes to this project will be documented in this file. The format +## 6.0.0 + +### Changed +* Update `casper-execution-engine` dependency. +* Handle evict items in the `WasmTestBuilder` when advancing eras or calling `step`. + + + ## 5.0.0 ### Added diff --git a/execution_engine_testing/test_support/Cargo.toml b/execution_engine_testing/test_support/Cargo.toml index 75427d4788..ddebcfa6eb 100644 --- a/execution_engine_testing/test_support/Cargo.toml +++ b/execution_engine_testing/test_support/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "casper-engine-test-support" -version = "5.0.0" # when updating, also update 'html_root_url' in lib.rs +version = "6.0.0" # when updating, also update 'html_root_url' in lib.rs authors = ["Fraser Hutchison "] edition = "2018" description = "Library to support testing of Wasm smart contracts for use on the Casper network." @@ -11,7 +11,7 @@ repository = "https://github.com/CasperLabs/casper-node/tree/master/execution_en license = "Apache-2.0" [dependencies] -casper-execution-engine = { version = "5.0.0", path = "../../execution_engine", features = ["test-support"] } +casper-execution-engine = { version = "6.0.0", path = "../../execution_engine", features = ["test-support"] } casper-hashing = { version = "2.0.0", path = "../../hashing" } casper-types = { version = "3.0.0", path = "../../types" } humantime = "2" diff --git a/execution_engine_testing/test_support/src/lib.rs b/execution_engine_testing/test_support/src/lib.rs index 8171a6f21c..2802d5d0f6 100644 --- a/execution_engine_testing/test_support/src/lib.rs +++ b/execution_engine_testing/test_support/src/lib.rs @@ -1,6 +1,6 @@ //! A library to support testing of Wasm smart contracts for use on the Casper Platform. -#![doc(html_root_url = "https://docs.rs/casper-engine-test-support/5.0.0")] +#![doc(html_root_url = "https://docs.rs/casper-engine-test-support/6.0.0")] #![doc( html_favicon_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Favicon_RGB_50px.png", html_logo_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Symbol_RGB.png", diff --git a/node/CHANGELOG.md b/node/CHANGELOG.md index 6e1f74d5ad..3c78eb4a10 100644 --- a/node/CHANGELOG.md +++ b/node/CHANGELOG.md @@ -11,7 +11,7 @@ All notable changes to this project will be documented in this file. The format -## Unreleased +## 1.5.3 ### Added * Add `deploy_acceptor` section to config with a single option `timestamp_leeway` to allow a small leeway when deciding if a deploy is future-dated. diff --git a/node/Cargo.toml b/node/Cargo.toml index fd096e2236..2b9dfae585 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -21,7 +21,7 @@ base16 = "0.2.1" base64 = "0.13.0" bincode = "1" bytes = "1.0.1" -casper-execution-engine = { version = "5.0.0", path = "../execution_engine" } +casper-execution-engine = { version = "6.0.0", path = "../execution_engine" } casper-hashing = { version = "2.0.0", path = "../hashing" } casper-json-rpc = { version = "1.1.0", path = "../json_rpc" } casper-types = { version = "3.0.0", path = "../types", features = ["datasize", "json-schema", "std"] }