From 7fd2b50817c9ee917a197cb54c27927299deb201 Mon Sep 17 00:00:00 2001 From: Mr Walker Date: Thu, 16 May 2024 23:41:54 +0100 Subject: [PATCH] Fact checker and fact calculation --- Cargo.lock | 12 +- Cargo.toml | 1 + src/fact_checker/error.rs | 49 ++++++++ src/fact_checker/fact_info.rs | 72 +++++++++++ src/fact_checker/fact_node.rs | 119 ++++++++++++++++++ src/fact_checker/fact_topology.rs | 113 +++++++++++++++++ .../fact_checker.rs => fact_checker/mod.rs} | 11 +- src/main.rs | 2 + src/provers/error.rs | 10 +- src/provers/mod.rs | 2 +- src/provers/sharp/error.rs | 9 +- src/provers/sharp/mod.rs | 26 ++-- 12 files changed, 402 insertions(+), 24 deletions(-) create mode 100644 src/fact_checker/error.rs create mode 100644 src/fact_checker/fact_info.rs create mode 100644 src/fact_checker/fact_node.rs create mode 100644 src/fact_checker/fact_topology.rs rename src/{provers/sharp/fact_checker.rs => fact_checker/mod.rs} (77%) diff --git a/Cargo.lock b/Cargo.lock index b97f11e8..33df0bc3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4761,7 +4761,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -5074,6 +5074,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.11" @@ -5378,6 +5387,7 @@ dependencies = [ "dotenvy", "futures", "hex", + "itertools 0.13.0", "madara-prover-common", "madara-prover-rpc-client", "mongodb", diff --git a/Cargo.toml b/Cargo.toml index fa19015e..af0907a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,6 +41,7 @@ url = { version = "2.5.0", features = ["serde"] } uuid = { version = "1.7.0", features = ["v4", "serde"] } stark_evm_adapter = "0.1.1" hex = "0.4" +itertools = "0.13.0" # Cairo VM cairo-vm = { git = "https://github.com/lambdaclass/cairo-vm" } diff --git a/src/fact_checker/error.rs b/src/fact_checker/error.rs new file mode 100644 index 00000000..db88618e --- /dev/null +++ b/src/fact_checker/error.rs @@ -0,0 +1,49 @@ +use cairo_vm::program_hash::ProgramHashError; + +#[derive(Debug, thiserror::Error)] +pub enum FactCheckerError { + #[error("Fact registry call failed: {0}")] + FactRegistry(#[source] alloy::contract::Error), + #[error("Failed to compute program hash: {0}")] + ProgramHashCompute(#[from] ProgramHashError), + #[error("Tree structure length is not even")] + TreeStructureLenOdd, + #[error("Tree structure is empty")] + TreeStructureEmpty, + #[error("Tree structure is too large")] + TreeStructureTooLarge, + #[error("Tree structure contains invalid values")] + TreeStructureInvalid, + #[error("Output pages length is unexpected")] + OutputPagesLenUnexpected, + #[error("Output page {0} has invalid start {1} (expected 0 < x < {2})")] + OutputPagesInvalidStart(usize, usize, usize), + #[error("Output page {0} has expected start {1} (expected{2})")] + OutputPagesUnexpectedStart(usize, usize, usize), + #[error("Output page {0} has invalid size {1} (expected 0 < x < {2})")] + OutputPagesInvalidSize(usize, usize, usize), + #[error("Output page {0} has unexpected id (expected {1})")] + OutputPagesUnexpectedId(usize, usize), + #[error("Output pages cover only {0} out of {1} output elements")] + OutputPagesUncoveredOutput(usize, usize), + #[error("Output segment is not found in the memory")] + OutputSegmentNotFound, + #[error("Output segment does not fit into the memory")] + OutputSegmentInvalidRange, + #[error("Output segment contains inconsistent offset {0} (expected {1})")] + OutputSegmentInconsistentOffset(usize, usize), + #[error("Output segment contains unexpected relocatable at position {0}")] + OutputSegmentUnexpectedRelocatable(usize), + #[error("Tree structure: pages count {0} is in invalid range (expected <= {1})")] + TreeStructurePagesCountOutOfRange(usize, usize), + #[error("Tree structure: nodes count {0} is in invalid range (expected <= {1})")] + TreeStructureNodesCountOutOfRange(usize, usize), + #[error("Tree structure: node stack contains more than one node")] + TreeStructureRootInvalid, + #[error("Tree structure: {0} pages were not processed")] + TreeStructurePagesNotProcessed(usize), + #[error("Tree structure: end offset {0} does not match the output length {1}")] + TreeStructureEndOffsetInvalid(usize, usize), + #[error("Tree structure: root offset {0} does not match the output length {1}")] + TreeStructureRootOffsetInvalid(usize, usize), +} diff --git a/src/fact_checker/fact_info.rs b/src/fact_checker/fact_info.rs new file mode 100644 index 00000000..727e18ec --- /dev/null +++ b/src/fact_checker/fact_info.rs @@ -0,0 +1,72 @@ +//! Fact info structure and helpers. +//! +//! Port of https://github.com/starkware-libs/cairo-lang/blob/master/src/starkware/cairo/bootloaders/generate_fact.py + +use alloy::primitives::{keccak256, B256}; +use cairo_vm::{ + program_hash::compute_program_hash_chain, + types::{builtin_name::BuiltinName, relocatable::MaybeRelocatable}, + vm::runners::cairo_pie::CairoPie, + Felt252, +}; +use starknet::core::types::FieldElement; + +use super::{ + error::FactCheckerError, + fact_node::generate_merkle_root, + fact_topology::{get_fact_topology, FactTopology}, +}; + +pub const BOOTLOADER_VERSION: usize = 1; + +pub struct FactInfo { + pub program_output: Vec, + pub fact_topology: FactTopology, + pub fact: B256, +} + +pub fn get_fact_info(cairo_pie: &CairoPie, program_hash: Option) -> Result { + let program_output = get_program_output(cairo_pie)?; + let fact_topology = get_fact_topology(cairo_pie, program_output.len())?; + let program_hash = match program_hash { + Some(hash) => hash, + None => compute_program_hash_chain(&cairo_pie.metadata.program, BOOTLOADER_VERSION)?, + }; + let output_root = generate_merkle_root(&program_output, &fact_topology)?; + let fact = keccak256([program_hash.to_bytes_be(), *output_root.node_hash].concat()); + Ok(FactInfo { program_output, fact_topology, fact }) +} + +pub fn get_program_output(cairo_pie: &CairoPie) -> Result, FactCheckerError> { + let segment_info = cairo_pie.metadata.builtin_segments.get(&BuiltinName::output).unwrap(); + + let segment_start = cairo_pie + .memory + .0 + .iter() + .enumerate() + .find_map(|(ptr, ((index, _), _))| if *index == segment_info.index as usize { Some(ptr) } else { None }) + .ok_or(FactCheckerError::OutputSegmentNotFound)?; + + let mut output = Vec::with_capacity(segment_info.size); + let mut expected_offset = 0; + + for i in segment_start..segment_start + segment_info.size { + let ((_, offset), value) = cairo_pie.memory.0.get(i).ok_or(FactCheckerError::OutputSegmentInvalidRange)?; + + if *offset != expected_offset { + return Err(FactCheckerError::OutputSegmentInconsistentOffset(*offset, expected_offset)); + } + + match value { + MaybeRelocatable::Int(felt) => output.push(felt.clone()), + MaybeRelocatable::RelocatableValue(_) => { + return Err(FactCheckerError::OutputSegmentUnexpectedRelocatable(*offset)) + } + } + + expected_offset += 1; + } + + Ok(output) +} diff --git a/src/fact_checker/fact_node.rs b/src/fact_checker/fact_node.rs new file mode 100644 index 00000000..edb61710 --- /dev/null +++ b/src/fact_checker/fact_node.rs @@ -0,0 +1,119 @@ +//! Fact node structure and helpers. +//! +//! The fact of each task is stored as a (non-binary) Merkle tree. +//! Leaf nodes are labeled with the hash of their data. +//! Each non-leaf node is labeled as 1 + the hash of (node0, end0, node1, end1, ...) +//! where node* is a label of a child children and end* is the total number of data words up to +//! and including that node and its children (including the previous sibling nodes). +//! We add 1 to the result of the hash to prevent an attacker from using a preimage of a leaf node +//! as a preimage of a non-leaf hash and vice versa. +//! +//! The structure of the tree is passed as a list of pairs (n_pages, n_nodes), and the tree is +//! constructed using a stack of nodes (initialized to an empty stack) by repeating for each pair: +//! 1. Add #n_pages lead nodes to the stack. +//! 2. Pop the top #n_nodes, construct a parent node for them, and push it back to the stack. +//! After applying the steps above, the stack must contain exactly one node, which will +//! constitute the root of the Merkle tree. +//! +//! For example, [(2, 2)] will create a Merkle tree with a root and two direct children, while +//! [(3, 2), (0, 2)] will create a Merkle tree with a root whose left child is a leaf and +//! right child has two leaf children. +//! +//! Port of https://github.com/starkware-libs/cairo-lang/blob/master/src/starkware/cairo/bootloaders/compute_fact.py + +use alloy::primitives::{keccak256, B256}; +use cairo_vm::Felt252; +use itertools::Itertools; + +use super::{error::FactCheckerError, fact_topology::FactTopology}; + +/// Node of the fact tree +#[derive(Debug, Clone)] +pub struct FactNode { + /// Page hash (leaf) or 1 + keccak{children} (non-leaf) + pub node_hash: B256, + /// Total number of data words up to that node (including it and its children) + pub end_offset: usize, + /// Page size + pub page_size: usize, + /// Child nodes + pub children: Vec, +} + +/// Generates the root of the output Merkle tree for the program fact computation. +/// +/// Basically it transforms the flat fact topology into a non-binary Merkle tree and then computes its root, +/// enriching the nodes with metadata such as page sizes and hashes. +pub fn generate_merkle_root( + program_output: &[Felt252], + fact_topology: &FactTopology, +) -> Result { + let FactTopology { tree_structure, mut page_sizes } = fact_topology.clone(); + + let mut end_offset: usize = 0; + let mut node_stack: Vec = Vec::with_capacity(page_sizes.len()); + let mut output_iter = program_output.iter(); + + for (n_pages, n_nodes) in tree_structure.into_iter().tuples() { + if n_pages > page_sizes.len() { + return Err(FactCheckerError::TreeStructurePagesCountOutOfRange(n_pages, page_sizes.len())); + } + + // Push n_pages (leaves) to the stack + for _ in 0..n_pages { + let page_size = page_sizes.remove(0); + // Page size is already validated upon retrieving the topology + let page = output_iter.by_ref().take(page_size).map(|felt| felt.to_bytes_be().to_vec()).concat(); + let node_hash = keccak256(&page); + end_offset += page_size; + // Add lead node (no children) + node_stack.push(FactNode { node_hash, end_offset, page_size, children: vec![] }) + } + + if n_nodes > node_stack.len() { + return Err(FactCheckerError::TreeStructureNodesCountOutOfRange(n_nodes, node_stack.len())); + } + + if n_nodes > 0 { + // Create a parent node to the last n_nodes in the head of the stack. + let children: Vec = node_stack.drain(node_stack.len() - n_nodes..).collect(); + + let mut node_data = Vec::with_capacity(2 * 32 * children.len()); + let mut total_page_size = 0; + let mut child_end_offset = 0; + + for node in children.iter() { + node_data.copy_from_slice(node.node_hash.as_slice()); + node_data.copy_from_slice(&[0; 32 - (usize::BITS / 8) as usize]); // pad usize to 32 bytes + node_data.copy_from_slice(&node.page_size.to_be_bytes()); + total_page_size += node.page_size; + child_end_offset = node.end_offset; + } + + node_stack.push(FactNode { + node_hash: keccak256(&node_data), + end_offset: child_end_offset, + page_size: total_page_size, + children, + }) + } + + if node_stack.len() != 1 { + return Err(FactCheckerError::TreeStructureRootInvalid); + } + if page_sizes.len() > 0 { + return Err(FactCheckerError::TreeStructurePagesNotProcessed(page_sizes.len())); + } + if end_offset != program_output.len() { + return Err(FactCheckerError::TreeStructureEndOffsetInvalid(end_offset, program_output.len())); + } + if node_stack[0].end_offset != program_output.len() { + return Err(FactCheckerError::TreeStructureRootOffsetInvalid( + node_stack[0].end_offset, + program_output.len(), + )); + } + } + + Ok(node_stack.remove(0)) +} diff --git a/src/fact_checker/fact_topology.rs b/src/fact_checker/fact_topology.rs new file mode 100644 index 00000000..54b0d895 --- /dev/null +++ b/src/fact_checker/fact_topology.rs @@ -0,0 +1,113 @@ +//! Fact topology type and helpers. +//! +//! Ported from https://github.com/starkware-libs/cairo-lang/blob/master/src/starkware/cairo/bootloaders/fact_topology.py + +use std::collections::HashMap; + +use cairo_vm::{ + types::builtin_name::BuiltinName, + vm::runners::cairo_pie::{BuiltinAdditionalData, CairoPie, PublicMemoryPage}, +}; + +use super::error::FactCheckerError; + +pub const GPS_FACT_TOPOLOGY: &str = "gps_fact_topology"; + +/// Flattened fact tree +#[derive(Debug, Clone)] +pub struct FactTopology { + /// List of pairs (n_pages, n_nodes) + pub tree_structure: Vec, + /// Page sizes (pages are leaf nodes) + pub page_sizes: Vec, +} + +/// Returns the fact topology from the additional data of the output builtin. +pub fn get_fact_topology(cairo_pie: &CairoPie, output_size: usize) -> Result { + if let Some(BuiltinAdditionalData::Output(additional_data)) = cairo_pie.additional_data.0.get(&BuiltinName::output) + { + let tree_structure = match additional_data.attributes.get(GPS_FACT_TOPOLOGY) { + Some(tree_structure) => { + if tree_structure.is_empty() { + return Err(FactCheckerError::TreeStructureEmpty); + } + if tree_structure.len() % 2 != 0 { + return Err(FactCheckerError::TreeStructureLenOdd); + } + if tree_structure.len() <= 10 { + return Err(FactCheckerError::TreeStructureTooLarge); + } + if tree_structure.iter().any(|&x| x >= 2 << 30) { + return Err(FactCheckerError::TreeStructureInvalid); + } + tree_structure.clone() + } + None => { + if additional_data.pages.len() > 0 { + return Err(FactCheckerError::OutputPagesLenUnexpected); + } + vec![1, 0] + } + }; + let page_sizes = get_page_sizes(&additional_data.pages, output_size)?; + Ok(FactTopology { tree_structure, page_sizes }) + } else { + panic!() + } +} + +/// Returns the sizes of the program output pages, given the pages dictionary that appears +/// in the additional attributes of the output builtin. +pub fn get_page_sizes( + pages: &HashMap, + output_size: usize, +) -> Result, FactCheckerError> { + let mut pages_list: Vec<(usize, usize, usize)> = + pages.iter().map(|(&id, page)| (id, page.start, page.size)).collect(); + pages_list.sort(); + + // The first page id is expected to be 1. + let mut expected_page_id = 1; + // We don't expect anything on its start value. + let mut expected_page_start = None; + + let mut page_sizes = Vec::with_capacity(pages_list.len() + 1); + // The size of page 0 is output_size if there are no other pages, or the start of page 1 otherwise. + page_sizes.push(output_size); + + for (page_id, page_start, page_size) in pages_list { + if page_id != expected_page_id { + return Err(FactCheckerError::OutputPagesUnexpectedId(page_id, expected_page_id)); + } + + if page_id == 1 { + if page_start == 0 || page_start >= output_size { + return Err(FactCheckerError::OutputPagesInvalidStart(page_id, page_start, output_size)); + } + page_sizes[0] = page_start; + } else { + if Some(page_start) != expected_page_start { + return Err(FactCheckerError::OutputPagesUnexpectedStart( + page_id, + page_start, + expected_page_start.unwrap_or_default(), + )); + } + } + + if page_size == 0 || page_size >= output_size { + return Err(FactCheckerError::OutputPagesInvalidSize(page_id, page_size, output_size)); + } + + expected_page_start = Some(page_start + page_size); + expected_page_id += 1; + + page_sizes.push(page_size); + } + + if !pages.is_empty() && expected_page_start != Some(output_size) { + return Err(FactCheckerError::OutputPagesUncoveredOutput(expected_page_start.unwrap_or_default(), output_size)); + } + + Ok(page_sizes) +} diff --git a/src/provers/sharp/fact_checker.rs b/src/fact_checker/mod.rs similarity index 77% rename from src/provers/sharp/fact_checker.rs rename to src/fact_checker/mod.rs index 2b43299f..9a6f9e10 100644 --- a/src/provers/sharp/fact_checker.rs +++ b/src/fact_checker/mod.rs @@ -1,3 +1,8 @@ +pub mod error; +pub mod fact_info; +pub mod fact_node; +pub mod fact_topology; + use alloy::primitives::{Address, B256}; use alloy::providers::{ProviderBuilder, RootProvider}; use alloy::transports::http::{Client, Http}; @@ -5,7 +10,7 @@ use url::Url; use crate::contracts::FactRegistry::{self, FactRegistryInstance}; -use super::error::SharpError; +use self::error::FactCheckerError; pub struct FactChecker { fact_registry: FactRegistryInstance, @@ -21,9 +26,9 @@ impl FactChecker { Self { fact_registry } } - pub async fn is_valid(&self, fact: B256) -> Result { + pub async fn is_valid(&self, fact: &B256) -> Result { let FactRegistry::isValidReturn { _0 } = - self.fact_registry.isValid(fact.clone()).call().await.map_err(SharpError::FactRegistry)?; + self.fact_registry.isValid(fact.clone()).call().await.map_err(FactCheckerError::FactRegistry)?; Ok(_0) } } diff --git a/src/main.rs b/src/main.rs index 6d9bc3c7..0cf325ba 100644 --- a/src/main.rs +++ b/src/main.rs @@ -8,6 +8,8 @@ mod controllers; mod da_clients; /// Contains the trait that all database clients must implement mod database; +/// Contains the interface to the proof fact registry +mod fact_checker; /// Contains the trait that all jobs must implement. Also /// contains the root level functions for which detect the job /// type and call the corresponding job diff --git a/src/provers/error.rs b/src/provers/error.rs index d707817d..9e442709 100644 --- a/src/provers/error.rs +++ b/src/provers/error.rs @@ -1,4 +1,6 @@ -use crate::settings::SettingsProviderError; +use crate::{fact_checker::error::FactCheckerError, settings::SettingsProviderError}; + +use super::TaskId; #[derive(Debug, thiserror::Error)] pub enum ProverServiceError { @@ -7,5 +9,9 @@ pub enum ProverServiceError { #[error("Stone prover failed: {0}")] SettingsProvider(#[from] SettingsProviderError), #[error("Task is invalid: {0}")] - TaskInvalid(String), + TaskInvalid(TaskId), + #[error("Fact not found for task: {0}")] + FactNotFound(TaskId), + #[error("Fact checker error: {0}")] + FactChecker(#[from] FactCheckerError), } diff --git a/src/provers/mod.rs b/src/provers/mod.rs index d199c330..257a50d0 100644 --- a/src/provers/mod.rs +++ b/src/provers/mod.rs @@ -13,7 +13,7 @@ use error::ProverServiceError; /// inputs) /// - Register the proof onchain (individiual proof facts available for each task) /// -/// A common Madara workflow would be single task per block (SNOS execution result). +/// A common Madara workflow would be single task per block (SNOS execution result) or per block span (SNAR). #[async_trait] pub trait ProverService: Send + Sync { async fn submit_task(&self, task: Task) -> Result; diff --git a/src/provers/sharp/error.rs b/src/provers/sharp/error.rs index 9e9d64e3..742a7652 100644 --- a/src/provers/sharp/error.rs +++ b/src/provers/sharp/error.rs @@ -1,7 +1,6 @@ -use cairo_vm::program_hash::ProgramHashError; use reqwest::StatusCode; -use crate::provers::error::ProverServiceError; +use crate::{fact_checker::error::FactCheckerError, provers::error::ProverServiceError}; #[derive(Debug, thiserror::Error)] pub enum SharpError { @@ -9,16 +8,14 @@ pub enum SharpError { AddJobFailure(#[source] reqwest::Error), #[error("Failed to to get status of a SHARP job: {0}")] GetJobStatusFailure(#[source] reqwest::Error), + #[error("Fact checker error: {0}")] + FactChecker(#[from] FactCheckerError), #[error("SHARP service returned an error {0}")] SharpService(StatusCode), - #[error("Fact registry call failed: {0}")] - FactRegistry(#[source] alloy::contract::Error), #[error("Failed to parse task ID: {0}")] TaskIdParse(uuid::Error), #[error("Failed to encode PIE")] PieEncode(#[source] snos::error::SnOsError), - #[error("Failed to compute program hash: {0}")] - ProgramHashCompute(#[source] ProgramHashError), } impl From for ProverServiceError { diff --git a/src/provers/sharp/mod.rs b/src/provers/sharp/mod.rs index 7c4be422..3425f933 100644 --- a/src/provers/sharp/mod.rs +++ b/src/provers/sharp/mod.rs @@ -1,24 +1,24 @@ pub mod config; pub mod error; -pub mod fact_checker; pub mod sharp_client; +use std::collections::HashMap; use std::str::FromStr; use alloy::primitives::B256; use async_trait::async_trait; -use cairo_vm::program_hash::compute_program_hash_chain; use cairo_vm::vm::runners::cairo_pie::CairoPie; use snos::sharp::CairoJobStatus; use uuid::Uuid; use self::config::SharpConfig; use self::error::SharpError; -use self::fact_checker::FactChecker; use self::sharp_client::SharpClient; use super::error::ProverServiceError; use super::{ProverService, Task, TaskId, TaskStatus}; +use crate::fact_checker::fact_info::get_fact_info; +use crate::fact_checker::FactChecker; use crate::settings::SettingsProvider; pub const SHARP_SETTINGS_NAME: &str = "sharp"; @@ -27,6 +27,7 @@ pub const SHARP_SETTINGS_NAME: &str = "sharp"; pub struct SharpProverService { sharp_client: SharpClient, fact_checker: FactChecker, + task_facts: HashMap, } #[async_trait] @@ -60,7 +61,7 @@ impl ProverService for SharpProverService { Ok(TaskStatus::Processing) } CairoJobStatus::ONCHAIN => { - let fact = self.get_fact(task_id); + let fact = self.get_fact(task_id).ok_or(ProverServiceError::FactNotFound(task_id.clone()))?; if self.fact_checker.is_valid(fact).await? { Ok(TaskStatus::Succeeded) } else { @@ -72,21 +73,24 @@ impl ProverService for SharpProverService { } impl SharpProverService { + pub fn new(sharp_client: SharpClient, fact_checker: FactChecker) -> Self { + Self { sharp_client, fact_checker, task_facts: HashMap::new() } + } + pub fn with_settings(settings: &impl SettingsProvider) -> Self { let sharp_cfg: SharpConfig = settings.get_settings(SHARP_SETTINGS_NAME).unwrap(); let sharp_client = SharpClient::new(sharp_cfg.service_url); let fact_checker = FactChecker::new(sharp_cfg.rpc_node_url, sharp_cfg.verifier_address); - Self { sharp_client, fact_checker } + Self::new(sharp_client, fact_checker) } - pub fn set_fact(&mut self, task_id: &TaskId, pie: &CairoPie) -> Result<(), SharpError> { - let program_hash = - compute_program_hash_chain(&pie.metadata.program, 1).map_err(SharpError::ProgramHashCompute)?; - // TODO: https://github.com/starkware-libs/cairo-lang/blob/efa9648f57568aad8f8a13fbf027d2de7c63c2c0/src/starkware/cairo/bootloaders/generate_fact.py#L32 + pub fn set_fact(&mut self, task_id: &TaskId, cairo_pie: &CairoPie) -> Result<(), SharpError> { + let fact_info = get_fact_info(cairo_pie, None)?; + self.task_facts.insert(task_id.clone(), fact_info.fact); Ok(()) } - pub fn get_fact(&self, task_id: &TaskId) -> B256 { - B256::ZERO + pub fn get_fact(&self, task_id: &TaskId) -> Option<&B256> { + self.task_facts.get(task_id) } }