diff --git a/Cargo.lock b/Cargo.lock index 1d8cd8e27..0003ce39e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -59,6 +59,17 @@ dependencies = [ "term", ] +[[package]] +name = "async-trait" +version = "0.1.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + [[package]] name = "atty" version = "0.2.14" @@ -354,7 +365,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.15", + "syn 2.0.39", ] [[package]] @@ -371,7 +382,7 @@ checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.39", ] [[package]] @@ -670,7 +681,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.39", ] [[package]] @@ -1050,6 +1061,7 @@ dependencies = [ name = "krill" version = "0.15.0-dev" dependencies = [ + "async-trait", "backoff", "base64 0.13.1", "basic-cookies", @@ -1416,7 +1428,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.39", ] [[package]] @@ -1582,9 +1594,9 @@ checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] name = "proc-macro2" -version = "1.0.56" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] @@ -1600,9 +1612,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.26" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] @@ -1779,10 +1791,9 @@ dependencies = [ [[package]] name = "rpki" -version = "0.17.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98a05b958a41ba8c923cf14bd2ad5f1aca3f3509c8ffd147c36e094346a0290b" +version = "0.17.3-dev" dependencies = [ + "async-trait", "base64 0.21.0", "bcder", "bytes", @@ -1975,7 +1986,7 @@ checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.39", ] [[package]] @@ -2173,9 +2184,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.15" +version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" dependencies = [ "proc-macro2", "quote", @@ -2259,7 +2270,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.39", ] [[package]] @@ -2333,7 +2344,7 @@ checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.39", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index a3ca30015..61188b526 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ exclude = [ ] [dependencies] +async-trait = "0.1.74" backoff = { version = "0.3.0", optional = true } base64 = "^0.13" basic-cookies = { version = "^0.1", optional = true } @@ -54,7 +55,12 @@ regex = { version = "1.5.5", optional = true, default_features = false, features ] } reqwest = { version = "0.11", features = ["json"] } rpassword = { version = "^5.0", optional = true } -rpki = { version = "0.17.2", features = ["ca", "compat", "rrdp"] } +# rpki = { version = "0.17.2", features = ["ca", "compat", "rrdp"] } +rpki = { version = "0.17.3-dev", path = "../rpki-rs", features = [ + "ca", + "compat", + "rrdp", +] } # rpki = { version = "0.16.0-dev", git = "https://github.com/nLnetLabs/rpki-rs", branch = "csr-ca-repo-trailing-slash", features = [ "ca", "compat", "rrdp" ] } scrypt = { version = "^0.6", optional = true, default-features = false } serde = { version = "^1.0", features = ["derive", "rc"] } diff --git a/src/bin/krillup.rs b/src/bin/krillup.rs index 06ac76dfa..b3c4c4500 100644 --- a/src/bin/krillup.rs +++ b/src/bin/krillup.rs @@ -13,7 +13,8 @@ use krill::{ }; use url::Url; -fn main() { +#[tokio::main] +async fn main() { let matches = make_matches(); match parse_matches(matches) { @@ -36,7 +37,7 @@ fn main() { } }; - match prepare_upgrade_data_migrations(UpgradeMode::PrepareOnly, &config, &properties_manager) { + match prepare_upgrade_data_migrations(UpgradeMode::PrepareOnly, &config, &properties_manager).await { Err(e) => { eprintln!("*** Error Preparing Data Migration ***"); eprintln!("{}", e); @@ -62,7 +63,7 @@ fn main() { } } KrillUpMode::Migrate { config, target } => { - if let Err(e) = migrate(config, target) { + if let Err(e) = migrate(config, target).await { eprintln!("*** Error Migrating DATA ***"); eprintln!("{}", e); eprintln!(); diff --git a/src/cli/ta_client.rs b/src/cli/ta_client.rs index 3f1f2da23..7230b7e8a 100644 --- a/src/cli/ta_client.rs +++ b/src/cli/ta_client.rs @@ -876,11 +876,11 @@ impl TrustAnchorClient { let signer_manager = TrustAnchorSignerManager::create(signer_command.config)?; match signer_command.details { - SignerCommandDetails::Init(info) => signer_manager.init(info), - SignerCommandDetails::ShowInfo => signer_manager.show(), - SignerCommandDetails::ProcessRequest(request) => signer_manager.process(request), - SignerCommandDetails::ShowLastResponse => signer_manager.show_last_response(), - SignerCommandDetails::ShowExchanges => signer_manager.show_exchanges(), + SignerCommandDetails::Init(info) => signer_manager.init(info).await, + SignerCommandDetails::ShowInfo => signer_manager.show().await, + SignerCommandDetails::ProcessRequest(request) => signer_manager.process(request).await, + SignerCommandDetails::ShowLastResponse => signer_manager.show_last_response().await, + SignerCommandDetails::ShowExchanges => signer_manager.show_exchanges().await, } } } @@ -1025,8 +1025,8 @@ impl TrustAnchorSignerManager { }) } - fn init(&self, info: SignerInitInfo) -> Result { - if self.store.has(&self.ta_handle)? { + async fn init(&self, info: SignerInitInfo) -> Result { + if self.store.has(&self.ta_handle).await? { Err(TaClientError::other("Trust Anchor Signer was already initialised.")) } else { let cmd = TrustAnchorSignerInitCommand::new( @@ -1043,19 +1043,19 @@ impl TrustAnchorSignerManager { &self.actor, ); - self.store.add(cmd)?; + self.store.add(cmd).await?; Ok(TrustAnchorClientApiResponse::Empty) } } - fn show(&self) -> Result { - let ta_signer = self.get_signer()?; + async fn show(&self) -> Result { + let ta_signer = self.get_signer().await?; let info = ta_signer.get_signer_info(); Ok(TrustAnchorClientApiResponse::TrustAnchorProxySignerInfo(info)) } - fn process(&self, request: TrustAnchorSignedRequest) -> Result { + async fn process(&self, request: TrustAnchorSignedRequest) -> Result { let cmd = TrustAnchorSignerCommand::make_process_request_command( &self.ta_handle, request, @@ -1063,20 +1063,21 @@ impl TrustAnchorSignerManager { self.signer.clone(), &self.actor, ); - self.store.command(cmd)?; + self.store.command(cmd).await?; - self.show_last_response() + self.show_last_response().await } - fn show_last_response(&self) -> Result { - self.get_signer()? + async fn show_last_response(&self) -> Result { + self.get_signer() + .await? .get_latest_exchange() .map(|exchange| TrustAnchorClientApiResponse::SignerResponse(exchange.response.clone())) .ok_or_else(|| TaClientError::other("No response found.")) } - fn show_exchanges(&self) -> Result { - let signer = self.get_signer()?; + async fn show_exchanges(&self) -> Result { + let signer = self.get_signer().await?; // In this context it's okay to clone the exchanges. // If we are afraid that this would become too expensive, then we will // need to rethink the model where we return data in the enum that we @@ -1089,10 +1090,11 @@ impl TrustAnchorSignerManager { Ok(TrustAnchorClientApiResponse::ProxySignerExchanges(exchanges)) } - fn get_signer(&self) -> Result, TaClientError> { - if self.store.has(&self.ta_handle)? { + async fn get_signer(&self) -> Result, TaClientError> { + if self.store.has(&self.ta_handle).await? { self.store .get_latest(&self.ta_handle) + .await .map_err(TaClientError::KrillError) } else { Err(TaClientError::other("Trust Anchor Signer is not initialised.")) diff --git a/src/commons/api/ca.rs b/src/commons/api/ca.rs index 10991d316..a06891291 100644 --- a/src/commons/api/ca.rs +++ b/src/commons/api/ca.rs @@ -2192,29 +2192,28 @@ mod test { assert_eq!(base_uri(), signed_objects_uri) } - #[test] - fn mft_uri() { - test::test_in_memory(|storage_uri| { - let signer = OpenSslSigner::build(storage_uri, "dummy", None).unwrap(); - let key_id = signer.create_key(PublicKeyFormat::Rsa).unwrap(); - let pub_key = signer.get_key_info(&key_id).unwrap(); + #[tokio::test] + async fn mft_uri() { + let storage_uri = test::mem_storage(); + let signer = OpenSslSigner::build(&storage_uri, "dummy", None).unwrap(); + let key_id = signer.create_key(PublicKeyFormat::Rsa).await.unwrap(); + let pub_key = signer.get_key_info(&key_id).await.unwrap(); - let mft_uri = info().resolve("", ObjectName::mft_for_key(&pub_key.key_identifier()).as_ref()); + let mft_uri = info().resolve("", ObjectName::mft_for_key(&pub_key.key_identifier()).as_ref()); - let mft_path = mft_uri.relative_to(&base_uri()).unwrap(); + let mft_path = mft_uri.relative_to(&base_uri()).unwrap(); - assert_eq!(44, mft_path.len()); + assert_eq!(44, mft_path.len()); - // the file name should be the hexencoded pub key info - // not repeating that here, but checking that the name - // part is validly hex encoded. - let name = &mft_path[..40]; - hex::decode(name).unwrap(); + // the file name should be the hexencoded pub key info + // not repeating that here, but checking that the name + // part is validly hex encoded. + let name = &mft_path[..40]; + hex::decode(name).unwrap(); - // and the extension is '.mft' - let ext = &mft_path[40..]; - assert_eq!(ext, ".mft"); - }); + // and the extension is '.mft' + let ext = &mft_path[40..]; + assert_eq!(ext, ".mft"); } #[test] diff --git a/src/commons/crypto/signing/dispatch/krillsigner.rs b/src/commons/crypto/signing/dispatch/krillsigner.rs index 105cce2e9..fc90f4f2d 100644 --- a/src/commons/crypto/signing/dispatch/krillsigner.rs +++ b/src/commons/crypto/signing/dispatch/krillsigner.rs @@ -184,82 +184,110 @@ impl KrillSigner { self.router.get_mapper() } - pub fn get_active_signers(&self) -> HashMap> { - self.router.get_active_signers() + pub async fn get_active_signers(&self) -> HashMap> { + self.router.get_active_signers().await } - pub fn create_key(&self) -> CryptoResult { + pub async fn create_key(&self) -> CryptoResult { self.router .get_default_signer() + .await .create_key(PublicKeyFormat::Rsa) + .await .map_err(crypto::Error::signer) } - pub fn import_key(&self, pem: &str) -> CryptoResult { - self.router.import_key(pem).map_err(crypto::Error::signer) + pub async fn import_key(&self, pem: &str) -> CryptoResult { + self.router.import_key(pem).await.map_err(crypto::Error::signer) } /// Creates a new self-signed (TA) IdCert - pub fn create_self_signed_id_cert(&self) -> CryptoResult { - let signer = self.router.get_default_signer(); + pub async fn create_self_signed_id_cert(&self) -> CryptoResult { + let signer = self.router.get_default_signer().await; - let key = signer.create_key(PublicKeyFormat::Rsa).map_err(crypto::Error::signer)?; + let key = signer + .create_key(PublicKeyFormat::Rsa) + .await + .map_err(crypto::Error::signer)?; let validity = Validity::new( Time::five_minutes_ago(), Time::years_from_now(ID_CERTIFICATE_VALIDITY_YEARS), ); - IdCert::new_ta(validity, &key, signer.as_ref()).map_err(crypto::Error::signer) + IdCert::new_ta(validity, &key, signer.as_ref()) + .await + .map_err(crypto::Error::signer) } - pub fn destroy_key(&self, key_id: &KeyIdentifier) -> CryptoResult<()> { + pub async fn destroy_key(&self, key_id: &KeyIdentifier) -> CryptoResult<()> { let signer = self .router .get_signer_for_key(key_id) + .await .map_err(crypto::Error::key_error)?; - signer.destroy_key(key_id).map_err(crypto::Error::key_error) + + signer.destroy_key(key_id).await.map_err(crypto::Error::key_error) } - pub fn get_key_info(&self, key_id: &KeyIdentifier) -> CryptoResult { + pub async fn get_key_info(&self, key_id: &KeyIdentifier) -> CryptoResult { let signer = self .router .get_signer_for_key(key_id) + .await .map_err(crypto::Error::key_error)?; - signer.get_key_info(key_id).map_err(crypto::Error::key_error) + signer.get_key_info(key_id).await.map_err(crypto::Error::key_error) } - pub fn random_serial(&self) -> CryptoResult { - Serial::random(self.router.get_default_signer().as_ref()).map_err(crypto::Error::signer) + pub async fn random_serial(&self) -> CryptoResult { + Serial::random(self.router.get_default_signer().await.as_ref()) + .await + .map_err(crypto::Error::signer) } - pub fn sign + ?Sized>(&self, key_id: &KeyIdentifier, data: &D) -> CryptoResult { + pub async fn sign + ?Sized + Sync>( + &self, + key_id: &KeyIdentifier, + data: &D, + ) -> CryptoResult { let signer = self .router .get_signer_for_key(key_id) + .await .map_err(crypto::Error::key_error)?; signer .sign(key_id, RpkiSignatureAlgorithm::default(), data) + .await .map_err(crypto::Error::signing) } - pub fn sign_one_off + ?Sized>(&self, data: &D) -> CryptoResult<(RpkiSignature, PublicKey)> { - let signer = self.router.get_one_off_signer(); + pub async fn sign_one_off + ?Sized + Sync>( + &self, + data: &D, + ) -> CryptoResult<(RpkiSignature, PublicKey)> { + let signer = self.router.get_one_off_signer().await; signer .sign_one_off(RpkiSignatureAlgorithm::default(), data) + .await .map_err(crypto::Error::signer) } - pub fn sign_csr(&self, base_repo: &RepoInfo, name_space: &str, key_id: &KeyIdentifier) -> CryptoResult { + pub async fn sign_csr( + &self, + base_repo: &RepoInfo, + name_space: &str, + key_id: &KeyIdentifier, + ) -> CryptoResult { let signer = self .router .get_signer_for_key(key_id) + .await .map_err(crypto::Error::key_error)?; - let signing_key_id = signer.get_key_info(key_id).map_err(crypto::Error::key_error)?; + let signing_key_id = signer.get_key_info(key_id).await.map_err(crypto::Error::key_error)?; let mft_file_name = ObjectName::mft_for_key(&signing_key_id.key_identifier()); // The rpki-rs library returns a signed and encoded CSR for a CA certificate. @@ -270,31 +298,38 @@ impl KrillSigner { &base_repo.resolve(name_space, mft_file_name.as_ref()), base_repo.rpki_notify(), ) + .await .map_err(crypto::Error::signing)?; // Decode the encoded CSR again to get a typed RpkiCaCsr RpkiCaCsr::decode(signed_and_encoded_csr.as_slice()).map_err(crypto::Error::signing) } - pub fn sign_cert(&self, tbs: TbsCert, key_id: &KeyIdentifier) -> CryptoResult { + pub async fn sign_cert(&self, tbs: TbsCert, key_id: &KeyIdentifier) -> CryptoResult { let signer = self .router .get_signer_for_key(key_id) + .await .map_err(crypto::Error::key_error)?; - tbs.into_cert(signer.as_ref(), key_id).map_err(crypto::Error::signing) + tbs.into_cert(signer.as_ref(), key_id) + .await + .map_err(crypto::Error::signing) } - pub fn sign_crl(&self, tbs: TbsCertList>, key_id: &KeyIdentifier) -> CryptoResult { + pub async fn sign_crl(&self, tbs: TbsCertList>, key_id: &KeyIdentifier) -> CryptoResult { let signer = self .router .get_signer_for_key(key_id) + .await .map_err(crypto::Error::key_error)?; - tbs.into_crl(signer.as_ref(), key_id).map_err(crypto::Error::signing) + tbs.into_crl(signer.as_ref(), key_id) + .await + .map_err(crypto::Error::signing) } - pub fn sign_manifest( + pub async fn sign_manifest( &self, content: ManifestContent, builder: SignedObjectBuilder, @@ -303,14 +338,16 @@ impl KrillSigner { let signer = self .router .get_signer_for_key(key_id) + .await .map_err(crypto::Error::key_error)?; content .into_manifest(builder, signer.as_ref(), key_id) + .await .map_err(crypto::Error::signing) } - pub fn sign_roa( + pub async fn sign_roa( &self, roa_builder: RoaBuilder, object_builder: SignedObjectBuilder, @@ -319,14 +356,16 @@ impl KrillSigner { let signer = self .router .get_signer_for_key(key_id) + .await .map_err(crypto::Error::key_error)?; roa_builder .finalize(object_builder, signer.as_ref(), key_id) + .await .map_err(crypto::Error::signing) } - pub fn sign_aspa( + pub async fn sign_aspa( &self, aspa_builder: AspaBuilder, object_builder: SignedObjectBuilder, @@ -335,27 +374,31 @@ impl KrillSigner { let signer = self .router .get_signer_for_key(key_id) + .await .map_err(crypto::Error::key_error)?; aspa_builder .finalize(object_builder, signer.as_ref(), key_id) + .await .map_err(crypto::Error::signing) } - pub fn sign_rta(&self, rta_builder: &mut rta::RtaBuilder, ee: Cert) -> CryptoResult<()> { + pub async fn sign_rta(&self, rta_builder: &mut rta::RtaBuilder, ee: Cert) -> CryptoResult<()> { let key_id = ee.subject_key_identifier(); let signer = self .router .get_signer_for_key(&key_id) + .await .map_err(crypto::Error::key_error)?; rta_builder.push_cert(ee); rta_builder .sign(signer.as_ref(), &key_id, None, None) + .await .map_err(crypto::Error::signing) } - pub fn create_rfc6492_cms( + pub async fn create_rfc6492_cms( &self, message: provisioning::Message, signing_key: &KeyIdentifier, @@ -363,12 +406,15 @@ impl KrillSigner { let signer = self .router .get_signer_for_key(signing_key) + .await .map_err(crypto::Error::key_error)?; - provisioning::ProvisioningCms::create(message, signing_key, signer.as_ref()).map_err(crypto::Error::signing) + provisioning::ProvisioningCms::create(message, signing_key, signer.as_ref()) + .await + .map_err(crypto::Error::signing) } - pub fn create_rfc8181_cms( + pub async fn create_rfc8181_cms( &self, message: publication::Message, signing_key: &KeyIdentifier, @@ -376,12 +422,15 @@ impl KrillSigner { let signer = self .router .get_signer_for_key(signing_key) + .await .map_err(crypto::Error::key_error)?; - publication::PublicationCms::create(message, signing_key, signer.as_ref()).map_err(crypto::Error::signing) + publication::PublicationCms::create(message, signing_key, signer.as_ref()) + .await + .map_err(crypto::Error::signing) } - pub fn create_ta_signed_message( + pub async fn create_ta_signed_message( &self, data: Bytes, validity_days: i64, @@ -390,11 +439,14 @@ impl KrillSigner { let signer = self .router .get_signer_for_key(signing_key) + .await .map_err(crypto::Error::key_error)?; let validity = SignSupport::sign_validity_days(validity_days); - SignedMessage::create(data, validity, signing_key, signer.as_ref()).map_err(crypto::Error::signing) + SignedMessage::create(data, validity, signing_key, signer.as_ref()) + .await + .map_err(crypto::Error::signing) } } diff --git a/src/commons/crypto/signing/dispatch/signerinfo.rs b/src/commons/crypto/signing/dispatch/signerinfo.rs index c7400e4b4..f04115c16 100644 --- a/src/commons/crypto/signing/dispatch/signerinfo.rs +++ b/src/commons/crypto/signing/dispatch/signerinfo.rs @@ -265,6 +265,7 @@ pub struct SignerInfo { keys: HashMap, } +#[async_trait::async_trait] impl Aggregate for SignerInfo { type Command = SignerInfoCommand; type StorableCommandDetails = SignerInfoCommandDetails; @@ -311,7 +312,7 @@ impl Aggregate for SignerInfo { } } - fn process_command(&self, command: Self::Command) -> Result, Self::Error> { + async fn process_command(&self, command: Self::Command) -> Result, Self::Error> { Ok(match command.into_details() { SignerInfoCommandDetails::Init => { // This can't happen really.. we would never send this command @@ -347,7 +348,7 @@ impl Aggregate for SignerInfo { }) } - fn process_init_command(command: SignerInfoInitCommand) -> Result { + async fn process_init_command(command: SignerInfoInitCommand) -> Result { let details = command.into_details(); Ok(SignerInfoInitEvent { signer_name: details.signer_name, @@ -403,7 +404,7 @@ impl SignerMapper { /// `change_signer_info()`. This could be useful for example if the signer backend retains its content but is /// upgraded to a newer version, we can then update the info string in the signer store and the upgrade will be /// visible in the history of the store. - pub fn add_signer( + pub async fn add_signer( &self, signer_name: &str, signer_info: &str, @@ -426,66 +427,74 @@ impl SignerMapper { &actor, ); - self.store.add(cmd)?; + self.store.add(cmd).await?; Ok(signer_handle) } - pub fn _remove_signer(&self, signer_handle: &SignerHandle) -> KrillResult<()> { - self.store.drop_aggregate(signer_handle)?; + pub async fn _remove_signer(&self, signer_handle: &SignerHandle) -> KrillResult<()> { + self.store.drop_aggregate(signer_handle).await?; Ok(()) } - pub fn get_signer_name(&self, signer_handle: &SignerHandle) -> KrillResult { - Ok(self.store.get_latest(signer_handle)?.signer_name.clone()) + pub async fn get_signer_name(&self, signer_handle: &SignerHandle) -> KrillResult { + Ok(self.store.get_latest(signer_handle).await?.signer_name.clone()) } - pub fn change_signer_name(&self, signer_handle: &SignerHandle, signer_name: &str) -> KrillResult<()> { + pub async fn change_signer_name(&self, signer_handle: &SignerHandle, signer_name: &str) -> KrillResult<()> { let cmd = SignerInfoCommand::change_signer_name(signer_handle, None, signer_name); - self.store.command(cmd)?; + self.store.command(cmd).await?; Ok(()) } - pub fn get_signer_public_key(&self, signer_handle: &SignerHandle) -> KrillResult { - Ok(self.store.get_latest(signer_handle)?.signer_identity.public_key.clone()) + pub async fn get_signer_public_key(&self, signer_handle: &SignerHandle) -> KrillResult { + Ok(self + .store + .get_latest(signer_handle) + .await? + .signer_identity + .public_key + .clone()) } - pub fn get_signer_private_key_internal_id(&self, signer_handle: &SignerHandle) -> KrillResult { + pub async fn get_signer_private_key_internal_id(&self, signer_handle: &SignerHandle) -> KrillResult { Ok(self .store - .get_latest(signer_handle)? + .get_latest(signer_handle) + .await? .signer_identity .private_key_internal_id .clone()) } - pub fn change_signer_info(&self, signer_handle: &SignerHandle, signer_info: &str) -> KrillResult<()> { + pub async fn change_signer_info(&self, signer_handle: &SignerHandle, signer_info: &str) -> KrillResult<()> { let cmd = SignerInfoCommand::change_signer_info(signer_handle, None, signer_info); - self.store.command(cmd)?; + self.store.command(cmd).await?; Ok(()) } /// Record the owner of a Krill key and its corresponding signer specific internal id. - pub fn add_key( + pub async fn add_key( &self, signer_handle: &SignerHandle, key_id: &KeyIdentifier, internal_key_id: &str, ) -> KrillResult<()> { let cmd = SignerInfoCommand::add_key(signer_handle, None, key_id, internal_key_id); - self.store.command(cmd)?; + self.store.command(cmd).await?; Ok(()) } - pub fn remove_key(&self, signer_handle: &SignerHandle, key_id: &KeyIdentifier) -> KrillResult<()> { + pub async fn remove_key(&self, signer_handle: &SignerHandle, key_id: &KeyIdentifier) -> KrillResult<()> { let cmd = SignerInfoCommand::remove_key(signer_handle, None, key_id); - self.store.command(cmd)?; + self.store.command(cmd).await?; Ok(()) } /// Retrieve the signer specific internal id corresponding to the given Krill key. - pub fn get_key(&self, signer_handle: &SignerHandle, key_id: &KeyIdentifier) -> KrillResult { + pub async fn get_key(&self, signer_handle: &SignerHandle, key_id: &KeyIdentifier) -> KrillResult { self.store - .get_latest(signer_handle)? + .get_latest(signer_handle) + .await? .keys .get(key_id) .cloned() @@ -493,16 +502,16 @@ impl SignerMapper { } /// Get the complete set of known signer handles. - pub fn get_signer_handles(&self) -> KrillResult> { - self.store.list().map_err(Error::AggregateStoreError) + pub async fn get_signer_handles(&self) -> KrillResult> { + self.store.list().await.map_err(Error::AggregateStoreError) } /// Get the handle of the signer that possesses the given Krill key, if any. - pub fn get_signer_for_key(&self, key_id: &KeyIdentifier) -> KrillResult { + pub async fn get_signer_for_key(&self, key_id: &KeyIdentifier) -> KrillResult { // Look for the key id in the key set of each set. Not very efficient but can be improved upon later if // needed, e.g. by creating on startup and maintaining an in-memory map of KeyIdentifier to signer Handles. - for signer_handle in self.store.list()? { - let signer_info = self.store.get_latest(&signer_handle)?; + for signer_handle in self.store.list().await? { + let signer_info = self.store.get_latest(&signer_handle).await?; if signer_info.keys.contains_key(key_id) { return Ok(signer_handle); } diff --git a/src/commons/crypto/signing/dispatch/signerprovider.rs b/src/commons/crypto/signing/dispatch/signerprovider.rs index c746c6b1e..9d021e225 100644 --- a/src/commons/crypto/signing/dispatch/signerprovider.rs +++ b/src/commons/crypto/signing/dispatch/signerprovider.rs @@ -95,9 +95,9 @@ impl SignerProvider { } } - pub fn create_registration_key(&self) -> Result<(PublicKey, String), SignerError> { + pub async fn create_registration_key(&self) -> Result<(PublicKey, String), SignerError> { match self { - SignerProvider::OpenSsl(_, signer) => signer.create_registration_key(), + SignerProvider::OpenSsl(_, signer) => signer.create_registration_key().await, #[cfg(feature = "hsm")] SignerProvider::Kmip(_, signer) => signer.create_registration_key(), #[cfg(feature = "hsm")] @@ -107,13 +107,17 @@ impl SignerProvider { } } - pub fn sign_registration_challenge + ?Sized>( + pub async fn sign_registration_challenge + ?Sized>( &self, signer_private_key_id: &str, challenge: &D, ) -> Result { match self { - SignerProvider::OpenSsl(_, signer) => signer.sign_registration_challenge(signer_private_key_id, challenge), + SignerProvider::OpenSsl(_, signer) => { + signer + .sign_registration_challenge(signer_private_key_id, challenge) + .await + } #[cfg(feature = "hsm")] SignerProvider::Kmip(_, signer) => signer.sign_registration_challenge(signer_private_key_id, challenge), #[cfg(feature = "hsm")] @@ -168,9 +172,9 @@ impl SignerProvider { /// Import an existing private key. Only supported for OpenSslSigner. Other /// signers will return an error. - pub fn import_key(&self, pem: &str) -> Result { + pub async fn import_key(&self, pem: &str) -> Result { match self { - SignerProvider::OpenSsl(_, signer) => signer.import_key(pem), + SignerProvider::OpenSsl(_, signer) => signer.import_key(pem).await, #[cfg(feature = "hsm")] SignerProvider::Kmip(_, _) => Err(SignerError::other("import key not supported for KMIP signers")), #[cfg(feature = "hsm")] @@ -183,47 +187,48 @@ impl SignerProvider { // Implement the functions defined by the `Signer` trait because `SignerRouter` expects to invoke them, but as the // dispatching is not trait based we don't actually have to implement the `Signer` trait. +#[async_trait::async_trait] impl Signer for SignerProvider { type KeyId = KeyIdentifier; type Error = SignerError; - fn create_key(&self, algorithm: PublicKeyFormat) -> Result { + async fn create_key(&self, algorithm: PublicKeyFormat) -> Result { match self { - SignerProvider::OpenSsl(_, signer) => signer.create_key(algorithm), + SignerProvider::OpenSsl(_, signer) => signer.create_key(algorithm).await, #[cfg(feature = "hsm")] - SignerProvider::Kmip(_, signer) => signer.create_key(algorithm), + SignerProvider::Kmip(_, signer) => signer.create_key(algorithm).await, #[cfg(feature = "hsm")] - SignerProvider::Pkcs11(_, signer) => signer.create_key(algorithm), + SignerProvider::Pkcs11(_, signer) => signer.create_key(algorithm).await, #[cfg(all(test, feature = "hsm"))] - SignerProvider::Mock(_, signer) => signer.create_key(algorithm), + SignerProvider::Mock(_, signer) => signer.create_key(algorithm).await, } } - fn get_key_info(&self, key: &KeyIdentifier) -> Result> { + async fn get_key_info(&self, key: &KeyIdentifier) -> Result> { match self { - SignerProvider::OpenSsl(_, signer) => signer.get_key_info(key), + SignerProvider::OpenSsl(_, signer) => signer.get_key_info(key).await, #[cfg(feature = "hsm")] - SignerProvider::Kmip(_, signer) => signer.get_key_info(key), + SignerProvider::Kmip(_, signer) => signer.get_key_info(key).await, #[cfg(feature = "hsm")] - SignerProvider::Pkcs11(_, signer) => signer.get_key_info(key), + SignerProvider::Pkcs11(_, signer) => signer.get_key_info(key).await, #[cfg(all(test, feature = "hsm"))] - SignerProvider::Mock(_, signer) => signer.get_key_info(key), + SignerProvider::Mock(_, signer) => signer.get_key_info(key).await, } } - fn destroy_key(&self, key: &KeyIdentifier) -> Result<(), KeyError> { + async fn destroy_key(&self, key: &KeyIdentifier) -> Result<(), KeyError> { match self { - SignerProvider::OpenSsl(_, signer) => signer.destroy_key(key), + SignerProvider::OpenSsl(_, signer) => signer.destroy_key(key).await, #[cfg(feature = "hsm")] - SignerProvider::Kmip(_, signer) => signer.destroy_key(key), + SignerProvider::Kmip(_, signer) => signer.destroy_key(key).await, #[cfg(feature = "hsm")] - SignerProvider::Pkcs11(_, signer) => signer.destroy_key(key), + SignerProvider::Pkcs11(_, signer) => signer.destroy_key(key).await, #[cfg(all(test, feature = "hsm"))] - SignerProvider::Mock(_, signer) => signer.destroy_key(key), + SignerProvider::Mock(_, signer) => signer.destroy_key(key).await, } } - fn sign + ?Sized>( + async fn sign + ?Sized + Sync>( &self, key: &KeyIdentifier, algorithm: Alg, @@ -235,17 +240,17 @@ impl Signer for SignerProvider { } match self { - SignerProvider::OpenSsl(_, signer) => signer.sign(key, algorithm, data), + SignerProvider::OpenSsl(_, signer) => signer.sign(key, algorithm, data).await, #[cfg(feature = "hsm")] - SignerProvider::Kmip(_, signer) => signer.sign(key, algorithm, data), + SignerProvider::Kmip(_, signer) => signer.sign(key, algorithm, data).await, #[cfg(feature = "hsm")] - SignerProvider::Pkcs11(_, signer) => signer.sign(key, algorithm, data), + SignerProvider::Pkcs11(_, signer) => signer.sign(key, algorithm, data).await, #[cfg(all(test, feature = "hsm"))] - SignerProvider::Mock(_, signer) => signer.sign(key, algorithm, data), + SignerProvider::Mock(_, signer) => signer.sign(key, algorithm, data).await, } } - fn sign_one_off + ?Sized>( + async fn sign_one_off + ?Sized + Sync>( &self, algorithm: Alg, data: &D, @@ -266,7 +271,7 @@ impl Signer for SignerProvider { } } - fn rand(&self, target: &mut [u8]) -> Result<(), Self::Error> { + async fn rand(&self, target: &mut [u8]) -> Result<(), Self::Error> { openssl::rand::rand_bytes(target).map_err(SignerError::OpenSslError) } } diff --git a/src/commons/crypto/signing/dispatch/signerrouter.rs b/src/commons/crypto/signing/dispatch/signerrouter.rs index 72d3ef21a..392004c19 100644 --- a/src/commons/crypto/signing/dispatch/signerrouter.rs +++ b/src/commons/crypto/signing/dispatch/signerrouter.rs @@ -1,5 +1,8 @@ +use std::collections::HashMap; +use std::ops::Deref; use std::sync::Arc; -use std::{collections::HashMap, sync::RwLock}; + +use tokio::sync::RwLock; use rpki::crypto::KeyIdentifier; @@ -154,20 +157,20 @@ impl SignerRouter { self.signer_mapper.clone() } - pub fn get_active_signers(&self) -> HashMap> { - self.bind_ready_signers(); - self.active_signers.read().unwrap().clone() + pub async fn get_active_signers(&self) -> HashMap> { + self.bind_ready_signers().await; + self.active_signers.read().await.clone() } /// Get the default signer - pub fn get_default_signer(&self) -> &Arc { - self.bind_ready_signers(); + pub async fn get_default_signer(&self) -> &Arc { + self.bind_ready_signers().await; &self.default_signer } /// Get the one-off signer (usually OpenSSL) - pub fn get_one_off_signer(&self) -> &Arc { - self.bind_ready_signers(); + pub async fn get_one_off_signer(&self) -> &Arc { + self.bind_ready_signers().await; &self.one_off_signer } @@ -176,17 +179,18 @@ impl SignerRouter { /// If the signer that owns the key has not yet been promoted from the pending set to the active set or if no /// the key was not created by us or was not registered with the [SignerMapper] then this lookup will fail with /// [SignerError::KeyNotFound]. - pub fn get_signer_for_key(&self, key_id: &KeyIdentifier) -> Result, SignerError> { + pub async fn get_signer_for_key(&self, key_id: &KeyIdentifier) -> Result, SignerError> { match &self.signer_mapper { None => Ok(self.default_signer.clone()), Some(mapper) => { // Get the signer handle for the key let signer_handle = mapper .get_signer_for_key(key_id) + .await .map_err(|_| SignerError::KeyNotFound)?; // Get the SignerProvider for the handle, if the signer is active - let signer = self.active_signers.read().unwrap().get(&signer_handle).cloned(); + let signer = self.active_signers.read().await.get(&signer_handle).cloned(); signer.ok_or(SignerError::KeyNotFound) } @@ -204,9 +208,9 @@ impl SignerRouter { /// Import an existing private RSA key. Will only work for the OpenSslSigner. /// Returns an error if another signer is used. - pub fn import_key(&self, pem: &str) -> Result { - self.bind_ready_signers(); - self.default_signer.import_key(pem) + pub async fn import_key(&self, pem: &str) -> Result { + self.bind_ready_signers().await; + self.default_signer.import_key(pem).await } } @@ -278,119 +282,107 @@ impl SignerRouter { /// but in such cases should implement retry and backoff such that not every attempt to use the signer is blocked /// trying to connect to the backend. Instead most attempts to use a temporarily unavailable signer should fail /// very quickly because the signer handling code is "sleeping" between binding attempts. - fn bind_ready_signers(&self) { - if let Err(err) = self.do_ready_signer_binding() { + async fn bind_ready_signers(&self) { + if let Err(err) = self.do_ready_signer_binding().await { error!("Internal error: Unable to bind ready signers: {}", err); } } /// Attempt to bind pending signers. - fn do_ready_signer_binding(&self) -> Result<(), String> { - let num_pending_signers = self.pending_signers.read().unwrap().len(); + async fn do_ready_signer_binding(&self) -> Result<(), String> { + let num_pending_signers = self.pending_signers.read().await.len(); if num_pending_signers > 0 { trace!("Attempting to bind {} pending signers", num_pending_signers); // Fetch the handle of every signer previously created in the [SignerMapper] to see if any of the pending // signers is actually one of these or is a new signer that we haven't seen before. - let candidate_handles = self.get_candidate_signer_handles()?; + let candidate_handles = self.get_candidate_signer_handles().await?; trace!("{} signers were previously registered", candidate_handles.len()); // Block until we can get a write lock on the set of pending_signers as we will hopefully remove one or // more items from the set. Standard practice in Krill is to panic if a lock cannot be obtained. - let mut pending_signers = self.pending_signers.write().unwrap(); - - let mut abort_flag = false; + let mut pending_signers = self.pending_signers.write().await; // For each pending signer see if we can verify it and if so move it from the pending set to the active set. - pending_signers.retain(|signer_provider| -> bool { - if abort_flag { - return true; - } + // - if it needs to be kept we put it in the retain_signers vec + let mut retain_signers = vec![]; + // let mut abort_flag = false; + for signer_provider in pending_signers.iter() { let signer_name = signer_provider.get_name().to_string(); - // See if this is a known signer that whose signature matches the public key stored in the - // [SignerMapper] for the signer. - self.identify_signer(signer_provider, &candidate_handles) - .and_then(|verify_result| match verify_result { - IdentifyResult::Unavailable => { - // Signer isn't ready yet, leave it in the pending set and try again next time. - trace!("Signer '{}' is unavailable", signer_name); - Ok(true) - } - IdentifyResult::Identified(signer_handle) => { - // Signer is ready and verified, add it to the active set. - self.active_signers - .write() - .unwrap() - .insert(signer_handle, signer_provider.clone()); - info!("Signer '{}' is ready for use", signer_name); - // And remove it from the pending set - Ok(false) - } - IdentifyResult::Unidentified => { - // Signer is ready and new, register it and move it to the active set - self.register_new_signer(signer_provider) - .map(|register_result| match register_result { - RegisterResult::NotReady => { - // Strange, it was ready just now when we verified it ... leave it in the - // pending set and try again next time. - trace!("Signer '{}' is not ready", signer_name); - true - } - RegisterResult::ReadyVerified(signer_handle) => { - // Signer is ready and verified, add it to the active set. - self.active_signers - .write() - .unwrap() - .insert(signer_handle, signer_provider.clone()); - info!("Signer '{}' is ready for use", signer_name); - // And remove it from the pending set - false - } - RegisterResult::ReadyUnusable(err) => { - // Signer registration failed, remove it from the pending set - error!( - "Signer '{}' could not be registered: signer is not usable: {}", - signer_name, err - ); - false - } - }) - } - IdentifyResult::Unusable => { - // Signer is ready and unusable, remove it from the pending set - error!("Signer '{}' could not be identified: signer is not usable", signer_name); - Ok(false) - } - IdentifyResult::Corrupt => { - // This case should never happen as this variant is handled in the called code - Err(ErrorString::new("Internal error: invalid handle")) - } - }) - .unwrap_or_else(|err| { - error!("Signer '{}' could not be bound: {}. Aborting.", signer_name, *err); - abort_flag = true; - true - }) - }); + match self.identify_signer(signer_provider, &candidate_handles).await? { + IdentifyResult::Unavailable => { + // Signer isn't ready yet, leave it in the pending set and try again next time. + trace!("Signer '{}' is unavailable", signer_name); + retain_signers.push(signer_provider.clone()); + } + IdentifyResult::Identified(signer_handle) => { + // Signer is ready and verified, add it to the active set. + self.active_signers + .write() + .await + .insert(signer_handle, signer_provider.clone()); + info!("Signer '{}' is ready for use", signer_name); + } + IdentifyResult::Unidentified => { + // Signer is ready and new, register it and move it to the active set + match self.register_new_signer(signer_provider).await { + Ok(RegisterResult::NotReady) => { + // Strange, it was ready just now when we verified it ... leave it in the + // pending set and try again next time. + trace!("Signer '{}' is not ready", signer_name); + retain_signers.push(signer_provider.clone()); + } + Ok(RegisterResult::ReadyVerified(signer_handle)) => { + // Signer is ready and verified, add it to the active set. + self.active_signers + .write() + .await + .insert(signer_handle, signer_provider.clone()); + info!("Signer '{}' is ready for use", signer_name); + } + Ok(RegisterResult::ReadyUnusable(e)) => { + // Signer registration failed, remove it from the pending set + error!("Signer '{signer_name}' could not be registered: {e}"); + } + Err(e) => { + // Signer registration failed, remove it from the pending set + error!("Signer '{signer_name}' could not be registered: {}", e.deref()); + } + }; + } + IdentifyResult::Unusable => { + // Signer is ready and unusable, remove it from the pending set + error!("Signer '{}' could not be identified: signer is not usable", signer_name); + } + IdentifyResult::Corrupt => { + // This case should never happen as this variant is handled in the called code + error!("Internal error: invalid handle"); + } + } + } + + pending_signers.clear(); + pending_signers.append(&mut retain_signers); } Ok(()) } /// Retrieves the set of signer handles known to the signer mapper. - fn get_candidate_signer_handles(&self) -> Result, String> { + async fn get_candidate_signer_handles(&self) -> Result, String> { // TODO: Filter out already bound signers? self.signer_mapper .as_ref() .unwrap() .get_signer_handles() + .await .map_err(|err| format!("Failed to get signer handles: {}", err)) } /// Checks if the signer identity can be shown to match one of the known signer public keys. - fn identify_signer( + async fn identify_signer( &self, signer_provider: &Arc, candidate_handles: &[SignerHandle], @@ -403,7 +395,13 @@ impl SignerRouter { // candidate handles. let mut ordered_candidate_handles = Vec::new(); for candidate_handle in candidate_handles { - let stored_signer_name = self.signer_mapper.as_ref().unwrap().get_signer_name(candidate_handle)?; + let stored_signer_name = self + .signer_mapper + .as_ref() + .unwrap() + .get_signer_name(candidate_handle) + .await?; + if stored_signer_name == config_signer_name { ordered_candidate_handles.insert(0, candidate_handle); } else { @@ -412,7 +410,9 @@ impl SignerRouter { } for candidate_handle in ordered_candidate_handles { - let res = self.is_signer_identified_by_handle(signer_provider, candidate_handle)?; + let res = self + .is_signer_identified_by_handle(signer_provider, candidate_handle) + .await?; match res { IdentifyResult::Unidentified => { // Signer was contacted and no errors were encountered but it doesn't know the key encoded in the @@ -441,12 +441,18 @@ impl SignerRouter { /// To match the signer backend must have access to a key whose signer internal key ID matches one we stored when /// the signer was previously registered, and when used to sign a challenge the signature must match the public /// key we have on record (also stored when the signer was previously registered). - fn is_signer_identified_by_handle( + async fn is_signer_identified_by_handle( &self, signer_provider: &Arc, candidate_handle: &SignerHandle, ) -> Result { - let handle_name = self.signer_mapper.as_ref().unwrap().get_signer_name(candidate_handle)?; + let handle_name = self + .signer_mapper + .as_ref() + .unwrap() + .get_signer_name(candidate_handle) + .await?; + let signer_name = signer_provider.get_name().to_string(); trace!( "Attempting to identify signer '{}' using identity key stored for signer '{}'", @@ -459,6 +465,7 @@ impl SignerRouter { .as_ref() .unwrap() .get_signer_public_key(candidate_handle) + .await { Ok(res) => Ok(res), Err(err) => match err { @@ -477,10 +484,14 @@ impl SignerRouter { .signer_mapper .as_ref() .unwrap() - .get_signer_private_key_internal_id(candidate_handle)?; + .get_signer_private_key_internal_id(candidate_handle) + .await?; let challenge = "Krill signer verification challenge".as_bytes(); - let signature = match signer_provider.sign_registration_challenge(&signer_private_key_id, challenge) { + let signature = match signer_provider + .sign_registration_challenge(&signer_private_key_id, challenge) + .await + { Err(SignerError::TemporarilyUnavailable) => { debug!("Signer '{}' could not be contacted", signer_name); return Ok(IdentifyResult::Unavailable); @@ -512,6 +523,7 @@ impl SignerRouter { .as_ref() .unwrap() .change_signer_name(candidate_handle, &signer_name) + .await { // This is unexpected and perhaps indicative of a deeper problem but log and keep going. error!( @@ -524,6 +536,7 @@ impl SignerRouter { .as_ref() .unwrap() .change_signer_info(candidate_handle, &signer_info) + .await { // This is unexpected and perhaps indicative of a deeper problem but log and keep going. error!( @@ -551,19 +564,22 @@ impl SignerRouter { /// Registration creates a key pair in the signer backend and stores the signer specific internal ID of the created /// private key and the content of the created public key. Registration also verifies that the signer is able to /// sign using the newly created private key such that the created signature matches the created public key. - fn register_new_signer(&self, signer_provider: &Arc) -> Result { + async fn register_new_signer(&self, signer_provider: &Arc) -> Result { let signer_name = signer_provider.get_name().to_string(); trace!("Attempting to register signer '{}'", signer_name); - let (public_key, signer_private_key_id) = match signer_provider.create_registration_key() { + let (public_key, signer_private_key_id) = match signer_provider.create_registration_key().await { Err(SignerError::TemporarilyUnavailable) => return Ok(RegisterResult::NotReady), Err(err) => return Ok(RegisterResult::ReadyUnusable(err.to_string())), Ok(res) => res, }; let challenge = "Krill signer verification challenge".as_bytes(); - let signature = match signer_provider.sign_registration_challenge(&signer_private_key_id, challenge) { + let signature = match signer_provider + .sign_registration_challenge(&signer_private_key_id, challenge) + .await + { Err(SignerError::TemporarilyUnavailable) => return Ok(RegisterResult::NotReady), Err(err) => return Ok(RegisterResult::ReadyUnusable(err.to_string())), Ok(res) => res, @@ -581,12 +597,12 @@ impl SignerRouter { .get_info() .unwrap_or_else(|| "No signer info".to_string()); - let signer_handle = self.signer_mapper.as_ref().unwrap().add_signer( - &signer_name, - &signer_info, - &public_key, - &signer_private_key_id, - )?; + let signer_handle = self + .signer_mapper + .as_ref() + .unwrap() + .add_signer(&signer_name, &signer_info, &public_key, &signer_private_key_id) + .await?; signer_provider.set_handle(signer_handle.clone()); @@ -624,147 +640,166 @@ pub mod tests { } } - #[test] - pub fn verify_that_a_usable_signer_is_registered_and_can_be_used() { - test::test_in_memory(|storage_uri| { - #[allow(non_snake_case)] - let DEF_SIG_ALG = RpkiSignatureAlgorithm::default(); - - // Build a mock signer that is contactable and usable for the SignerRouter - let call_counts = Arc::new(MockSignerCallCounts::new()); - let signer_mapper = Arc::new(SignerMapper::build(storage_uri).unwrap()); - let mock_signer = MockSigner::new("mock signer", signer_mapper.clone(), call_counts.clone(), None, None); - let mock_signer = Arc::new(SignerProvider::Mock(SignerFlags::default(), mock_signer)); - - // Create a SignerRouter that uses the mock signer with the mock signer starting in the pending signer set. - let router = create_signer_router(&[mock_signer.clone()], signer_mapper.clone()); - - // No signers have been registered with the SignerMapper yet - assert_eq!(0, signer_mapper.get_signer_handles().unwrap().len()); - - // Verify that initially none of the functions in the mock signer have been called - assert_eq!(0, call_counts.get(FnIdx::CreateRegistrationKey)); - assert_eq!(0, call_counts.get(FnIdx::SignRegistrationChallenge)); - assert_eq!(0, call_counts.get(FnIdx::GetInfo)); - assert_eq!(0, call_counts.get(FnIdx::SetHandle)); - assert_eq!(0, call_counts.get(FnIdx::CreateKey)); - assert_eq!(0, call_counts.get(FnIdx::Sign)); - assert_eq!(0, call_counts.get(FnIdx::DestroyKey)); - - // Try to use the SignerRouter to bind ready signers. This should cause the SignerRouter to contact - // the mock signer, ask it to create a registration key, verify that it can sign correctly with that key, - // assign a signer mapper handle to the signer, then check for random number generation support and finally - // actually generate the random number. - router.bind_ready_signers(); - assert_eq!(1, call_counts.get(FnIdx::CreateRegistrationKey)); - assert_eq!(1, call_counts.get(FnIdx::SignRegistrationChallenge)); - assert_eq!(1, call_counts.get(FnIdx::GetInfo)); - assert_eq!(1, call_counts.get(FnIdx::SetHandle)); - - // One signer has been registered with the SignerMapper now - assert_eq!(1, signer_mapper.get_signer_handles().unwrap().len()); - - // Ask to bind the signers again. This time none of the registration steps should be performed as the signer - // is already registered and active. - router.bind_ready_signers(); - - // Check that we can create a new key with the mock signer via the SignerRouter and that the key gets - // registered with the signer mapper. - let key_identifier = router - .get_default_signer() - .create_key(rpki::crypto::PublicKeyFormat::Rsa) - .unwrap(); - assert!(signer_mapper.get_signer_for_key(&key_identifier).is_ok()); - assert_eq!(1, call_counts.get(FnIdx::CreateKey)); - - // Check that we can sign with the SignerRouter using the Krill key identifier. The SignerRouter should - // discover from the SignerMapper that the key belongs to the mock signer and so dispatch the signing - // request to the mock signer. - let random_data = test::random_bytes(); - - router - .get_default_signer() - .sign(&key_identifier, DEF_SIG_ALG, &random_data) - .unwrap(); - assert_eq!(1, call_counts.get(FnIdx::Sign)); - - // Throw the SignerRouter away and create a new one. This is like restarting Krill. Keep the mock signer as - // otherwise we will lose its in-memory private key store. Keep the SignerMapper as the mock signer is - // using it, and because destroying it and recreating it would just be like forcing it to re-read it's saved - // state from disk (and we're not trying to test the AggregateStore here anyway!). - let router = create_signer_router(&[mock_signer.clone()], signer_mapper.clone()); - - // Try to use the SignerRouter to sign again. This time around the SignerMapper should find the existing - // signer in its records and only ask the signer to sign the registration challenge, but not ask it to - // create a registration key. - router - .get_default_signer() - .sign(&key_identifier, DEF_SIG_ALG, &random_data) - .unwrap(); - assert_eq!(1, call_counts.get(FnIdx::CreateRegistrationKey)); - assert_eq!(2, call_counts.get(FnIdx::SignRegistrationChallenge)); - assert_eq!(2, call_counts.get(FnIdx::GetInfo)); - assert_eq!(2, call_counts.get(FnIdx::SetHandle)); - assert_eq!(2, call_counts.get(FnIdx::Sign)); - - // Now delete the key and verify that we no longer have it. - router.get_default_signer().destroy_key(&key_identifier).unwrap(); - assert_eq!(1, call_counts.get(FnIdx::DestroyKey)); - - let err = router.get_default_signer().get_key_info(&key_identifier); - assert!(matches!(err, Err(KeyError::Signer(SignerError::KeyNotFound)))); - - // The Sign call count is still 2 because the SignerRouter fails to determine which signer owns the key - // and fails. - assert_eq!(2, call_counts.get(FnIdx::Sign)); - - // Now ask the mock signer to forget its registration key. After this the SignerRouter should fail to - // verify it and require it to register anew. - mock_signer.wipe_all_keys(); - - // The mock signer still works for the moment because the SignerRouter doesn't do registration again as - // it thinks it still has an active signer. - let key_identifier = router.get_default_signer().create_key(PublicKeyFormat::Rsa).unwrap(); - router - .get_default_signer() - .sign(&key_identifier, DEF_SIG_ALG, &random_data) - .unwrap(); - - assert_eq!(1, call_counts.get(FnIdx::CreateRegistrationKey)); - assert_eq!(2, call_counts.get(FnIdx::SignRegistrationChallenge)); - assert_eq!(2, call_counts.get(FnIdx::CreateKey)); - assert_eq!(3, call_counts.get(FnIdx::Sign)); - - // Throw away the SignerRouter again, thereby forcing the mock signer to be in the pending set again - // instead of the ready set. Now the SignerRouter should register the mock signer again and we should end - // up with a second signer in the SignerMapper as the ability to identify the first one has been lost - // (because above we instructed the mock signer to wipe all its keys). As the SignerMapper contains an - // existing signer the call count to sign_registration_challenge() in the mock signer will actually - // increase twice because the SignerRouter will first challenge it to prove that it is the already - // known signer. Without the identity key however the mock signer fails this identity check and is - // registered again (and then sign challenged again, hence the double increment). - let router = create_signer_router(&[mock_signer], signer_mapper.clone()); - - let err = router.get_default_signer().get_key_info(&key_identifier); - assert!(matches!(err, Err(KeyError::Signer(SignerError::KeyNotFound)))); - - assert_eq!(2, call_counts.get(FnIdx::CreateRegistrationKey)); - assert_eq!(4, call_counts.get(FnIdx::SignRegistrationChallenge)); - assert_eq!(3, call_counts.get(FnIdx::GetInfo)); - assert_eq!(3, call_counts.get(FnIdx::SetHandle)); - assert_eq!(3, call_counts.get(FnIdx::Sign)); - - // Two signers have been registered with the SignerMapper by this point, one of which is now orphaned as - // the keys that it knows about refer to a signer backend that is no longer able to prove that it is the - // owner of these keys (because its identity key was deleted in the signer backend). Thus the SignerRouter - // doesn't know which signer to forward requests to in order to work with the keys owned by the orphaned - // signer. - assert_eq!(2, signer_mapper.get_signer_handles().unwrap().len()); - }); + #[tokio::test] + pub async fn verify_that_a_usable_signer_is_registered_and_can_be_used() { + let storage_uri = test::mem_storage(); + + #[allow(non_snake_case)] + let DEF_SIG_ALG = RpkiSignatureAlgorithm::default(); + + // Build a mock signer that is contactable and usable for the SignerRouter + let call_counts = Arc::new(MockSignerCallCounts::new()); + let signer_mapper = Arc::new(SignerMapper::build(&storage_uri).unwrap()); + let mock_signer = MockSigner::new("mock signer", signer_mapper.clone(), call_counts.clone(), None, None); + let mock_signer = Arc::new(SignerProvider::Mock(SignerFlags::default(), mock_signer)); + + // Create a SignerRouter that uses the mock signer with the mock signer starting in the pending signer set. + let router = create_signer_router(&[mock_signer.clone()], signer_mapper.clone()); + + // No signers have been registered with the SignerMapper yet + assert_eq!(0, signer_mapper.get_signer_handles().await.unwrap().len()); + + // Verify that initially none of the functions in the mock signer have been called + assert_eq!(0, call_counts.get(FnIdx::CreateRegistrationKey)); + assert_eq!(0, call_counts.get(FnIdx::SignRegistrationChallenge)); + assert_eq!(0, call_counts.get(FnIdx::GetInfo)); + assert_eq!(0, call_counts.get(FnIdx::SetHandle)); + assert_eq!(0, call_counts.get(FnIdx::CreateKey)); + assert_eq!(0, call_counts.get(FnIdx::Sign)); + assert_eq!(0, call_counts.get(FnIdx::DestroyKey)); + + // Try to use the SignerRouter to bind ready signers. This should cause the SignerRouter to contact + // the mock signer, ask it to create a registration key, verify that it can sign correctly with that key, + // assign a signer mapper handle to the signer, then check for random number generation support and finally + // actually generate the random number. + router.bind_ready_signers().await; + assert_eq!(1, call_counts.get(FnIdx::CreateRegistrationKey)); + assert_eq!(1, call_counts.get(FnIdx::SignRegistrationChallenge)); + assert_eq!(1, call_counts.get(FnIdx::GetInfo)); + assert_eq!(1, call_counts.get(FnIdx::SetHandle)); + + // One signer has been registered with the SignerMapper now + assert_eq!(1, signer_mapper.get_signer_handles().await.unwrap().len()); + + // Ask to bind the signers again. This time none of the registration steps should be performed as the signer + // is already registered and active. + router.bind_ready_signers().await; + + // Check that we can create a new key with the mock signer via the SignerRouter and that the key gets + // registered with the signer mapper. + let key_identifier = router + .get_default_signer() + .await + .create_key(rpki::crypto::PublicKeyFormat::Rsa) + .await + .unwrap(); + assert!(signer_mapper.get_signer_for_key(&key_identifier).await.is_ok()); + assert_eq!(1, call_counts.get(FnIdx::CreateKey)); + + // Check that we can sign with the SignerRouter using the Krill key identifier. The SignerRouter should + // discover from the SignerMapper that the key belongs to the mock signer and so dispatch the signing + // request to the mock signer. + let random_data = test::random_bytes(); + + router + .get_default_signer() + .await + .sign(&key_identifier, DEF_SIG_ALG, &random_data) + .await + .unwrap(); + assert_eq!(1, call_counts.get(FnIdx::Sign)); + + // Throw the SignerRouter away and create a new one. This is like restarting Krill. Keep the mock signer as + // otherwise we will lose its in-memory private key store. Keep the SignerMapper as the mock signer is + // using it, and because destroying it and recreating it would just be like forcing it to re-read it's saved + // state from disk (and we're not trying to test the AggregateStore here anyway!). + let router = create_signer_router(&[mock_signer.clone()], signer_mapper.clone()); + + // Try to use the SignerRouter to sign again. This time around the SignerMapper should find the existing + // signer in its records and only ask the signer to sign the registration challenge, but not ask it to + // create a registration key. + router + .get_default_signer() + .await + .sign(&key_identifier, DEF_SIG_ALG, &random_data) + .await + .unwrap(); + assert_eq!(1, call_counts.get(FnIdx::CreateRegistrationKey)); + assert_eq!(2, call_counts.get(FnIdx::SignRegistrationChallenge)); + assert_eq!(2, call_counts.get(FnIdx::GetInfo)); + assert_eq!(2, call_counts.get(FnIdx::SetHandle)); + assert_eq!(2, call_counts.get(FnIdx::Sign)); + + // Now delete the key and verify that we no longer have it. + router + .get_default_signer() + .await + .destroy_key(&key_identifier) + .await + .unwrap(); + + assert_eq!(1, call_counts.get(FnIdx::DestroyKey)); + + let err = router.get_default_signer().await.get_key_info(&key_identifier).await; + assert!(matches!(err, Err(KeyError::Signer(SignerError::KeyNotFound)))); + + // The Sign call count is still 2 because the SignerRouter fails to determine which signer owns the key + // and fails. + assert_eq!(2, call_counts.get(FnIdx::Sign)); + + // Now ask the mock signer to forget its registration key. After this the SignerRouter should fail to + // verify it and require it to register anew. + mock_signer.wipe_all_keys(); + + // The mock signer still works for the moment because the SignerRouter doesn't do registration again as + // it thinks it still has an active signer. + let key_identifier = router + .get_default_signer() + .await + .create_key(PublicKeyFormat::Rsa) + .await + .unwrap(); + router + .get_default_signer() + .await + .sign(&key_identifier, DEF_SIG_ALG, &random_data) + .await + .unwrap(); + + assert_eq!(1, call_counts.get(FnIdx::CreateRegistrationKey)); + assert_eq!(2, call_counts.get(FnIdx::SignRegistrationChallenge)); + assert_eq!(2, call_counts.get(FnIdx::CreateKey)); + assert_eq!(3, call_counts.get(FnIdx::Sign)); + + // Throw away the SignerRouter again, thereby forcing the mock signer to be in the pending set again + // instead of the ready set. Now the SignerRouter should register the mock signer again and we should end + // up with a second signer in the SignerMapper as the ability to identify the first one has been lost + // (because above we instructed the mock signer to wipe all its keys). As the SignerMapper contains an + // existing signer the call count to sign_registration_challenge() in the mock signer will actually + // increase twice because the SignerRouter will first challenge it to prove that it is the already + // known signer. Without the identity key however the mock signer fails this identity check and is + // registered again (and then sign challenged again, hence the double increment). + let router = create_signer_router(&[mock_signer], signer_mapper.clone()); + + let err = router.get_default_signer().await.get_key_info(&key_identifier).await; + assert!(matches!(err, Err(KeyError::Signer(SignerError::KeyNotFound)))); + + assert_eq!(2, call_counts.get(FnIdx::CreateRegistrationKey)); + assert_eq!(4, call_counts.get(FnIdx::SignRegistrationChallenge)); + assert_eq!(3, call_counts.get(FnIdx::GetInfo)); + assert_eq!(3, call_counts.get(FnIdx::SetHandle)); + assert_eq!(3, call_counts.get(FnIdx::Sign)); + + // Two signers have been registered with the SignerMapper by this point, one of which is now orphaned as + // the keys that it knows about refer to a signer backend that is no longer able to prove that it is the + // owner of these keys (because its identity key was deleted in the signer backend). Thus the SignerRouter + // doesn't know which signer to forward requests to in order to work with the keys owned by the orphaned + // signer. + assert_eq!(2, signer_mapper.get_signer_handles().await.unwrap().len()); } - #[test] - pub fn verify_that_unusable_signers_are_neither_registered_nor_retried() { + #[tokio::test] + pub async fn verify_that_unusable_signers_are_neither_registered_nor_retried() { fn perm_unusable(_: &MockSignerCallCounts) -> Result<(), SignerError> { Err(SignerError::PermanentlyUnusable) } @@ -806,49 +841,48 @@ pub mod tests { ] } - test::test_in_memory(|storage_uri| { - let call_counts = Arc::new(MockSignerCallCounts::new()); - let signer_mapper = Arc::new(SignerMapper::build(storage_uri).unwrap()); - let broken_signers = create_broken_signers(signer_mapper.clone(), call_counts.clone()); - - // Create a SignerRouter that has access to all of the broken signers - let router = create_signer_router(broken_signers.as_slice(), signer_mapper.clone()); - - // No signers have been registered with the SignerMapper yet - assert_eq!(0, signer_mapper.get_signer_handles().unwrap().len()); - - // Try to use the SignerRouter to bind the ready signers. This should cause the SignerRouter to contact - // all of the mock signers, asking them to create a registration key, and if that succeeds to then verify - // that the signer can sign correctly with that key. None of the broken signers will succeed at these steps - // and so the counter of registered signers will remain at zero. - router.bind_ready_signers(); - - // The number of attempts to register a signer should have increased by the number of signers. - // Half of the signers should fail at the registration step, the other half at the challenge signing step. - // So the number of signers that we succeeded in moving out of the pending set to the active set and - // registering with the signer mapper should be zero. - assert_eq!(6, call_counts.get(FnIdx::CreateRegistrationKey)); - assert_eq!(3, call_counts.get(FnIdx::SignRegistrationChallenge)); - assert_eq!(0, signer_mapper.get_signer_handles().unwrap().len()); - - // - // Try again. - // - router.bind_ready_signers(); - - // The signers that were permanently unusable at registration should not be tried again. - assert_eq!(6 + 2, call_counts.get(FnIdx::CreateRegistrationKey)); - - // The signers that were permanently unusable at challenge signing should not be tried again. - assert_eq!(3 + 1, call_counts.get(FnIdx::SignRegistrationChallenge)); - - // And the end result should be that no signers were registered with the signer mapper. - assert_eq!(0, signer_mapper.get_signer_handles().unwrap().len()); - }); + let storage_uri = test::mem_storage(); + let call_counts = Arc::new(MockSignerCallCounts::new()); + let signer_mapper = Arc::new(SignerMapper::build(&storage_uri).unwrap()); + let broken_signers = create_broken_signers(signer_mapper.clone(), call_counts.clone()); + + // Create a SignerRouter that has access to all of the broken signers + let router = create_signer_router(broken_signers.as_slice(), signer_mapper.clone()); + + // No signers have been registered with the SignerMapper yet + assert_eq!(0, signer_mapper.get_signer_handles().await.unwrap().len()); + + // Try to use the SignerRouter to bind the ready signers. This should cause the SignerRouter to contact + // all of the mock signers, asking them to create a registration key, and if that succeeds to then verify + // that the signer can sign correctly with that key. None of the broken signers will succeed at these steps + // and so the counter of registered signers will remain at zero. + router.bind_ready_signers().await; + + // The number of attempts to register a signer should have increased by the number of signers. + // Half of the signers should fail at the registration step, the other half at the challenge signing step. + // So the number of signers that we succeeded in moving out of the pending set to the active set and + // registering with the signer mapper should be zero. + assert_eq!(6, call_counts.get(FnIdx::CreateRegistrationKey)); + assert_eq!(3, call_counts.get(FnIdx::SignRegistrationChallenge)); + assert_eq!(0, signer_mapper.get_signer_handles().await.unwrap().len()); + + // + // Try again. + // + router.bind_ready_signers().await; + + // The signers that were permanently unusable at registration should not be tried again. + assert_eq!(6 + 2, call_counts.get(FnIdx::CreateRegistrationKey)); + + // The signers that were permanently unusable at challenge signing should not be tried again. + assert_eq!(3 + 1, call_counts.get(FnIdx::SignRegistrationChallenge)); + + // And the end result should be that no signers were registered with the signer mapper. + assert_eq!(0, signer_mapper.get_signer_handles().await.unwrap().len()); } - #[test] - pub fn verify_that_temporarily_unavailable_signers_are_registered_when_available() { + #[tokio::test] + pub async fn verify_that_temporarily_unavailable_signers_are_registered_when_available() { fn temp_unavail(call_counts: &MockSignerCallCounts) -> Result<(), SignerError> { if call_counts.get(FnIdx::CreateRegistrationKey) == 1 { // Fail the first time registration is attempted @@ -859,49 +893,49 @@ pub mod tests { } } - test::test_in_memory(|storage_uri| { - let call_counts = Arc::new(MockSignerCallCounts::new()); - let signer_mapper = Arc::new(SignerMapper::build(storage_uri).unwrap()); - - let temp_unavail_signer = Arc::new(SignerProvider::Mock( - SignerFlags::default(), - MockSigner::new( - "mock temporararily unavailable signer", - signer_mapper.clone(), - call_counts.clone(), - Some(temp_unavail), - None, - ), - )); - - // Create a SignerRouter that uses the mock signer with the mock signer starting in the pending signer set. - let router = create_signer_router(&[temp_unavail_signer], signer_mapper.clone()); - - // No signers have been registered with the SignerMapper yet - assert_eq!(0, signer_mapper.get_signer_handles().unwrap().len()); - - // Try to use the SignerRouter to bind ready signers. This should cause the SignerRouter to contact - // the mock signer, ask it to create a registration key, verify that it can sign correctly with that key, - // assign a signer mapper handle to the signer, then check for random number generation support and finally - // actually generate the random number. This should fail the first time due to the logic imlpemented by the - // temp_avail() function above. - router.bind_ready_signers(); - - // The number of attempts to register a signer should have increased by one. - assert_eq!(1, call_counts.get(FnIdx::CreateRegistrationKey)); - assert_eq!(0, call_counts.get(FnIdx::SignRegistrationChallenge)); - assert_eq!(0, signer_mapper.get_signer_handles().unwrap().len()); - - // - // Try again. We should succeed the second time due to the logic implemented by the temp_avail() function - // above. - // - router.bind_ready_signers(); - - // We should be all green now - assert_eq!(2, call_counts.get(FnIdx::CreateRegistrationKey)); - assert_eq!(1, call_counts.get(FnIdx::SignRegistrationChallenge)); - assert_eq!(1, signer_mapper.get_signer_handles().unwrap().len()); - }); + let storage_uri = test::mem_storage(); + + let call_counts = Arc::new(MockSignerCallCounts::new()); + let signer_mapper = Arc::new(SignerMapper::build(&storage_uri).unwrap()); + + let temp_unavail_signer = Arc::new(SignerProvider::Mock( + SignerFlags::default(), + MockSigner::new( + "mock temporarily unavailable signer", + signer_mapper.clone(), + call_counts.clone(), + Some(temp_unavail), + None, + ), + )); + + // Create a SignerRouter that uses the mock signer with the mock signer starting in the pending signer set. + let router = create_signer_router(&[temp_unavail_signer], signer_mapper.clone()); + + // No signers have been registered with the SignerMapper yet + assert_eq!(0, signer_mapper.get_signer_handles().await.unwrap().len()); + + // Try to use the SignerRouter to bind ready signers. This should cause the SignerRouter to contact + // the mock signer, ask it to create a registration key, verify that it can sign correctly with that key, + // assign a signer mapper handle to the signer, then check for random number generation support and finally + // actually generate the random number. This should fail the first time due to the logic implemented by the + // temp_avail() function above. + router.bind_ready_signers().await; + + // The number of attempts to register a signer should have increased by one. + assert_eq!(1, call_counts.get(FnIdx::CreateRegistrationKey)); + assert_eq!(0, call_counts.get(FnIdx::SignRegistrationChallenge)); + assert_eq!(0, signer_mapper.get_signer_handles().await.unwrap().len()); + + // + // Try again. We should succeed the second time due to the logic implemented by the temp_avail() function + // above. + // + router.bind_ready_signers().await; + + // We should be all green now + assert_eq!(2, call_counts.get(FnIdx::CreateRegistrationKey)); + assert_eq!(1, call_counts.get(FnIdx::SignRegistrationChallenge)); + assert_eq!(1, signer_mapper.get_signer_handles().await.unwrap().len()); } } diff --git a/src/commons/crypto/signing/misc.rs b/src/commons/crypto/signing/misc.rs index 17f63048b..b3daed47b 100644 --- a/src/commons/crypto/signing/misc.rs +++ b/src/commons/crypto/signing/misc.rs @@ -113,7 +113,7 @@ pub struct SignSupport; impl SignSupport { /// Create an IssuedCert - pub fn make_issued_cert( + pub async fn make_issued_cert( csr: CsrInfo, resources: &ResourceSet, limit: RequestResourceLimit, @@ -128,8 +128,8 @@ impl SignSupport { let request = CertRequest::Ca(csr, validity); - let tbs = Self::make_tbs_cert(&resources, signing_cert, request, signer)?; - let cert = signer.sign_cert(tbs, &signing_cert.key_identifier())?; + let tbs = Self::make_tbs_cert(&resources, signing_cert, request, signer).await?; + let cert = signer.sign_cert(tbs, &signing_cert.key_identifier()).await?; let uri = signing_cert.uri_for_object(&cert); @@ -146,7 +146,7 @@ impl SignSupport { /// Create an EE certificate for use in ResourceTaggedAttestations. /// Note that for RPKI signed objects such as ROAs and Manifests, the /// EE certificate is created by the rpki.rs library instead. - pub fn make_rta_ee_cert( + pub async fn make_rta_ee_cert( resources: &ResourceSet, signing_key: &CertifiedKey, validity: Validity, @@ -155,19 +155,19 @@ impl SignSupport { ) -> KrillResult { let signing_cert = signing_key.incoming_cert(); let request = CertRequest::Ee(pub_key, validity); - let tbs = Self::make_tbs_cert(resources, signing_cert, request, signer)?; + let tbs = Self::make_tbs_cert(resources, signing_cert, request, signer).await?; - let cert = signer.sign_cert(tbs, signing_key.key_id())?; + let cert = signer.sign_cert(tbs, signing_key.key_id()).await?; Ok(cert) } - fn make_tbs_cert( + async fn make_tbs_cert( resources: &ResourceSet, signing_cert: &ReceivedCert, request: CertRequest, signer: &KrillSigner, ) -> KrillResult { - let serial = signer.random_serial()?; + let serial = signer.random_serial().await?; let issuer = signing_cert.subject().clone(); let validity = match &request { diff --git a/src/commons/crypto/signing/signers/kmip/signer.rs b/src/commons/crypto/signing/signers/kmip/signer.rs index 19312812d..7949305f7 100644 --- a/src/commons/crypto/signing/signers/kmip/signer.rs +++ b/src/commons/crypto/signing/signers/kmip/signer.rs @@ -573,7 +573,7 @@ pub(super) struct KmipKeyPairIds { impl KmipSigner { /// Remember that the given KMIP public and private key pair IDs correspond to the given KeyIdentifier. - pub(super) fn remember_kmip_key_ids( + pub(super) async fn remember_kmip_key_ids( &self, key_id: &KeyIdentifier, kmip_key_ids: KmipKeyPairIds, @@ -581,25 +581,34 @@ impl KmipSigner { // TODO: Don't assume colons cannot appear in HSM key ids. let internal_key_id = format!("{}:{}", kmip_key_ids.public_key_id, kmip_key_ids.private_key_id); - let readable_handle = self.handle.read().unwrap(); - let signer_handle = readable_handle.as_ref().ok_or_else(|| { - SignerError::Other("KMIP: Failed to record signer key: Signer handle not set".to_string()) - })?; + let signer_handle = { + // Note: Perhaps we should change the implementation to use a tokio::sync::RwLock for + // the handle instead. Or we should not keep these handles here? However, for the + // moment that change opens a rabbit hole that is too deep. + self.handle.read().unwrap().clone().ok_or_else(|| { + SignerError::Other("KMIP: Failed to record signer key: Signer handle not set".to_string()) + })? + }; + self.mapper - .add_key(signer_handle, key_id, &internal_key_id) + .add_key(&signer_handle, key_id, &internal_key_id) + .await .map_err(|err| SignerError::KmipError(format!("Failed to record signer key: {}", err)))?; Ok(()) } /// Given a KeyIdentifier lookup the corresponding KMIP public and private key pair IDs. - pub(super) fn lookup_kmip_key_ids(&self, key_id: &KeyIdentifier) -> Result> { - let readable_handle = self.handle.read().unwrap(); - let signer_handle = readable_handle.as_ref().ok_or(KeyError::KeyNotFound)?; + pub(super) async fn lookup_kmip_key_ids( + &self, + key_id: &KeyIdentifier, + ) -> Result> { + let signer_handle = { self.handle.read().unwrap().clone().ok_or(KeyError::KeyNotFound)? }; let internal_key_id = self .mapper - .get_key(signer_handle, key_id) + .get_key(&signer_handle, key_id) + .await .map_err(|_| KeyError::KeyNotFound)?; let (public_key_id, private_key_id) = internal_key_id.split_once(':').ok_or(KeyError::KeyNotFound)?; @@ -843,21 +852,21 @@ impl KmipSigner { // dispatching is not trait based we don't actually have to implement the `Signer` trait. impl KmipSigner { - pub fn create_key(&self, algorithm: PublicKeyFormat) -> Result { + pub async fn create_key(&self, algorithm: PublicKeyFormat) -> Result { let (key, kmip_key_pair_ids) = self.build_key(algorithm)?; let key_id = key.key_identifier(); - self.remember_kmip_key_ids(&key_id, kmip_key_pair_ids)?; + self.remember_kmip_key_ids(&key_id, kmip_key_pair_ids).await?; Ok(key_id) } - pub fn get_key_info(&self, key_id: &KeyIdentifier) -> Result> { - let kmip_key_pair_ids = self.lookup_kmip_key_ids(key_id)?; + pub async fn get_key_info(&self, key_id: &KeyIdentifier) -> Result> { + let kmip_key_pair_ids = self.lookup_kmip_key_ids(key_id).await?; self.get_public_key_from_id(&kmip_key_pair_ids.public_key_id) .map_err(KeyError::Signer) } - pub fn destroy_key(&self, key_id: &KeyIdentifier) -> Result<(), KeyError> { - let kmip_key_pair_ids = self.lookup_kmip_key_ids(key_id)?; + pub async fn destroy_key(&self, key_id: &KeyIdentifier) -> Result<(), KeyError> { + let kmip_key_pair_ids = self.lookup_kmip_key_ids(key_id).await?; let mut res = self .destroy_key_pair(&kmip_key_pair_ids, KeyStatus::Active) @@ -874,10 +883,17 @@ impl KmipSigner { } // remove the key from the signer mapper as well - if let Some(signer_handle) = self.handle.read().unwrap().as_ref() { + let signer_handle_opt = { + // We need to clone this because the RwLock cannot be sent. We could use tokio::sync::RwLock + // in future, but that's a difficult change right now. + self.handle.read().unwrap().clone() + }; + + if let Some(signer_handle) = signer_handle_opt { let res2 = self .mapper - .remove_key(signer_handle, key_id) + .remove_key(&signer_handle, key_id) + .await .map_err(|err| KeyError::Signer(SignerError::Other(err.to_string()))); if let Err(err) = &res2 { @@ -893,13 +909,13 @@ impl KmipSigner { res } - pub fn sign + ?Sized>( + pub async fn sign + ?Sized>( &self, key_id: &KeyIdentifier, algorithm: Alg, data: &D, ) -> Result, SigningError> { - let kmip_key_pair_ids = self.lookup_kmip_key_ids(key_id)?; + let kmip_key_pair_ids = self.lookup_kmip_key_ids(key_id).await?; let signature = self .sign_with_key(&kmip_key_pair_ids.private_key_id, algorithm, data.as_ref()) diff --git a/src/commons/crypto/signing/signers/mocksigner.rs b/src/commons/crypto/signing/signers/mocksigner.rs index 3e609a08a..6f5e3d8b9 100644 --- a/src/commons/crypto/signing/signers/mocksigner.rs +++ b/src/commons/crypto/signing/signers/mocksigner.rs @@ -11,6 +11,7 @@ use openssl::{ }; use rpki::{ + ca::idexchange::MyHandle, crypto::signer::KeyError, crypto::{ signer::SigningAlgorithm, KeyIdentifier, PublicKey, PublicKeyFormat, RpkiSignature, RpkiSignatureAlgorithm, @@ -104,6 +105,19 @@ impl MockSigner { } } + /// Gets the current handle, drops the std::sync::RwLock as + /// quickly as possible so that the handle can be used in + /// async functions + /// + /// Converting to use tokio::sync::RwLock is too difficult + /// at this time. + fn handle(&self) -> Result { + let lock = self.handle.read().unwrap(); + + lock.clone() + .ok_or_else(|| SignerError::Other("Signer has no handle".to_string())) + } + fn inc_fn_call_count(&self, fn_idx: FnIdx) { self.fn_call_counts.inc(fn_idx) } @@ -149,11 +163,12 @@ impl MockSigner { PublicKey::decode(bytes).map_err(|_| SignerError::DecodeError) } - fn internal_id_from_key_identifier(&self, key_identifier: &KeyIdentifier) -> Result { - let lock = self.handle.read().unwrap(); - let signer_handle = lock.as_ref().unwrap(); + async fn internal_id_from_key_identifier(&self, key_identifier: &KeyIdentifier) -> Result { + let signer_handle = self.handle()?; + self.mapper - .get_key(signer_handle, key_identifier) + .get_key(&signer_handle, key_identifier) + .await .map_err(|_| SignerError::KeyNotFound) } @@ -214,51 +229,53 @@ impl MockSigner { // Implement the functions defined by the `Signer` trait because `SignerProvider` expects to invoke them, but as the // dispatching is not trait based we don't actually have to implement the `Signer` trait. impl MockSigner { - pub fn create_key(&self, _algorithm: PublicKeyFormat) -> Result { + pub async fn create_key(&self, _algorithm: PublicKeyFormat) -> Result { self.inc_fn_call_count(FnIdx::CreateKey); let (_, _, key_identifier, internal_id) = self.build_key().unwrap(); // tell the signer mapper we own this key identifier which maps to our "internal id" - let lock = self.handle.read().unwrap(); - let signer_handle = lock.as_ref().unwrap(); + let signer_handle = self.handle()?; + self.mapper - .add_key(signer_handle, &key_identifier, &internal_id) + .add_key(&signer_handle, &key_identifier, &internal_id) + .await .unwrap(); Ok(key_identifier) } - pub fn get_key_info(&self, key_identifier: &KeyIdentifier) -> Result> { + pub async fn get_key_info(&self, key_identifier: &KeyIdentifier) -> Result> { self.inc_fn_call_count(FnIdx::GetKeyInfo); - let internal_id = self.internal_id_from_key_identifier(key_identifier)?; + let internal_id = self.internal_id_from_key_identifier(key_identifier).await?; let pkey = self.load_key(&internal_id).ok_or(KeyError::KeyNotFound)?; let public_key = Self::public_key_from_pkey(&pkey).unwrap(); Ok(public_key) } - pub fn destroy_key(&self, key_id: &KeyIdentifier) -> Result<(), KeyError> { + pub async fn destroy_key(&self, key_id: &KeyIdentifier) -> Result<(), KeyError> { self.inc_fn_call_count(FnIdx::DestroyKey); - let internal_id = self.internal_id_from_key_identifier(key_id).unwrap(); + let internal_id = self.internal_id_from_key_identifier(key_id).await.unwrap(); let _ = self.keys.write().unwrap().remove(&internal_id); // remove the key from the signer mapper as well - if let Some(signer_handle) = self.handle.read().unwrap().as_ref() { + if let Ok(signer_handle) = self.handle() { self.mapper - .remove_key(signer_handle, key_id) + .remove_key(&signer_handle, key_id) + .await .map_err(|err| KeyError::Signer(SignerError::Other(err.to_string())))?; } Ok(()) } - pub fn sign + ?Sized>( + pub async fn sign + ?Sized + Sync>( &self, key_identifier: &KeyIdentifier, algorithm: Alg, data: &D, ) -> Result, SigningError> { self.inc_fn_call_count(FnIdx::Sign); - let internal_id = self.internal_id_from_key_identifier(key_identifier)?; + let internal_id = self.internal_id_from_key_identifier(key_identifier).await?; let pkey = self.load_key(&internal_id).ok_or(SignerError::KeyNotFound)?; Self::sign_with_key(algorithm, &pkey, data).map_err(SigningError::Signer) } diff --git a/src/commons/crypto/signing/signers/pkcs11/signer.rs b/src/commons/crypto/signing/signers/pkcs11/signer.rs index 9db16be95..e5ffcb3dd 100644 --- a/src/commons/crypto/signing/signers/pkcs11/signer.rs +++ b/src/commons/crypto/signing/signers/pkcs11/signer.rs @@ -19,6 +19,7 @@ use cryptoki::{ }; use rpki::{ + ca::idexchange::MyHandle, crypto::signer::KeyError, crypto::{ KeyIdentifier, PublicKey, PublicKeyFormat, RpkiSignature, RpkiSignatureAlgorithm, Signature, @@ -340,6 +341,18 @@ impl Pkcs11Signer { &self.name } + /// Gets the handle and drops the lock. Returns an error if the + /// handle is not yet set. We need this because std::sync::RwLock + /// cannot be sent to async fns and changing the lock to tokio::sync::RwLock + /// is too difficult right now. In particular with regards to closures. + fn handle(&self) -> Result { + self.handle + .read() + .unwrap() + .clone() + .ok_or_else(|| SignerError::Other("handle is not set".to_string())) + } + pub fn set_handle(&self, handle: SignerHandle) { let mut writable_handle = self.handle.write().unwrap(); if writable_handle.is_some() { @@ -779,29 +792,28 @@ impl Pkcs11Signer { //------------ High level helper functions for use by the public Signer interface implementation ---------------------- impl Pkcs11Signer { - pub(super) fn remember_key_id( + pub(super) async fn remember_key_id( &self, key_id: &rpki::crypto::KeyIdentifier, internal_key_id: String, ) -> Result<(), SignerError> { - let readable_handle = self.handle.read().unwrap(); - let signer_handle = readable_handle.as_ref().ok_or_else(|| { - SignerError::Other("PKCS#11: Failed to record signer key: Signer handle not set".to_string()) - })?; + let signer_handle = self.handle()?; + self.mapper - .add_key(signer_handle, key_id, &internal_key_id) + .add_key(&signer_handle, key_id, &internal_key_id) + .await .map_err(|err| SignerError::Pkcs11Error(format!("Failed to record signer key: {}", err)))?; Ok(()) } - pub(super) fn lookup_key_id(&self, key_id: &KeyIdentifier) -> Result> { - let readable_handle = self.handle.read().unwrap(); - let signer_handle = readable_handle.as_ref().ok_or(KeyError::KeyNotFound)?; + pub(super) async fn lookup_key_id(&self, key_id: &KeyIdentifier) -> Result> { + let signer_handle = self.handle().map_err(|_| KeyError::KeyNotFound)?; let internal_key_id = self .mapper - .get_key(signer_handle, key_id) + .get_key(&signer_handle, key_id) + .await .map_err(|_| KeyError::KeyNotFound)?; Ok(internal_key_id) @@ -996,22 +1008,22 @@ impl Pkcs11Signer { // dispatching is not trait based we don't actually have to implement the `Signer` trait. impl Pkcs11Signer { - pub fn create_key(&self, algorithm: PublicKeyFormat) -> Result { + pub async fn create_key(&self, algorithm: PublicKeyFormat) -> Result { let (key, _, _, internal_key_id) = self.build_key(algorithm)?; let key_id = key.key_identifier(); - self.remember_key_id(&key_id, internal_key_id)?; + self.remember_key_id(&key_id, internal_key_id).await?; Ok(key_id) } - pub fn get_key_info(&self, key_id: &KeyIdentifier) -> Result> { - let internal_key_id = self.lookup_key_id(key_id)?; + pub async fn get_key_info(&self, key_id: &KeyIdentifier) -> Result> { + let internal_key_id = self.lookup_key_id(key_id).await?; let pub_handle = self.find_key(&internal_key_id, ObjectClass::PUBLIC_KEY)?; self.get_public_key_from_handle(pub_handle).map_err(KeyError::Signer) } - pub fn destroy_key(&self, key_id: &KeyIdentifier) -> Result<(), KeyError> { + pub async fn destroy_key(&self, key_id: &KeyIdentifier) -> Result<(), KeyError> { debug!("[{}] Destroying key pair with ID {}", self.name, key_id); - let internal_key_id = self.lookup_key_id(key_id)?; + let internal_key_id = self.lookup_key_id(key_id).await?; let mut res: Result<(), KeyError> = Ok(()); // try deleting the public key @@ -1047,10 +1059,11 @@ impl Pkcs11Signer { } // remove the key from the signer mapper as well - if let Some(signer_handle) = self.handle.read().unwrap().as_ref() { + if let Ok(signer_handle) = self.handle() { let res3 = self .mapper - .remove_key(signer_handle, key_id) + .remove_key(&signer_handle, key_id) + .await .map_err(|err| KeyError::Signer(SignerError::Other(err.to_string()))); if let Err(err) = &res3 { @@ -1066,13 +1079,14 @@ impl Pkcs11Signer { res } - pub fn sign + ?Sized>( + pub async fn sign + ?Sized>( &self, key_id: &KeyIdentifier, algorithm: Alg, data: &D, ) -> Result, SigningError> { - let internal_key_id = self.lookup_key_id(key_id)?; + let internal_key_id = self.lookup_key_id(key_id).await?; + let priv_handle = self .find_key(&internal_key_id, ObjectClass::PRIVATE_KEY) .map_err(|err| match err { diff --git a/src/commons/crypto/signing/signers/softsigner.rs b/src/commons/crypto/signing/signers/softsigner.rs index c40b5327c..15594e722 100644 --- a/src/commons/crypto/signing/signers/softsigner.rs +++ b/src/commons/crypto/signing/signers/softsigner.rs @@ -11,10 +11,13 @@ use openssl::{ pkey::{PKey, PKeyRef, Private}, rsa::Rsa, }; -use rpki::crypto::{ - signer::{KeyError, SigningAlgorithm}, - KeyIdentifier, PublicKey, PublicKeyFormat, RpkiSignature, RpkiSignatureAlgorithm, Signature, SignatureAlgorithm, - SigningError, +use rpki::{ + ca::idexchange::MyHandle, + crypto::{ + signer::{KeyError, SigningAlgorithm}, + KeyIdentifier, PublicKey, PublicKeyFormat, RpkiSignature, RpkiSignatureAlgorithm, Signature, + SignatureAlgorithm, SigningError, + }, }; use serde::{de, ser, Deserialize, Deserializer, Serialize, Serializer}; use url::Url; @@ -83,6 +86,15 @@ impl OpenSslSigner { &self.name } + /// Gets the handle and drops the lock so it can be used for async fns + fn handle(&self) -> Result { + self.handle + .read() + .unwrap() + .clone() + .ok_or_else(|| SignerError::Other("handle not set".to_string())) + } + pub fn set_handle(&self, handle: SignerHandle) { let mut writable_handle = self.handle.write().unwrap(); if writable_handle.is_some() { @@ -95,22 +107,22 @@ impl OpenSslSigner { self.info.clone() } - pub fn create_registration_key(&self) -> Result<(PublicKey, String), SignerError> { + pub async fn create_registration_key(&self) -> Result<(PublicKey, String), SignerError> { // For the OpenSslSigner we use the KeyIdentifier as the internal key id so the two are the same. - let key_id = self.build_key()?; + let key_id = self.build_key().await?; let internal_key_id = key_id.to_string(); - let key_pair = self.load_key(&key_id)?; + let key_pair = self.load_key(&key_id).await?; let public_key = key_pair.subject_public_key_info()?; Ok((public_key, internal_key_id)) } - pub fn sign_registration_challenge + ?Sized>( + pub async fn sign_registration_challenge + ?Sized>( &self, signer_private_key_id: &str, challenge: &D, ) -> Result { let key_id = KeyIdentifier::from_str(signer_private_key_id).map_err(|_| SignerError::KeyNotFound)?; - let key_pair = self.load_key(&key_id)?; + let key_pair = self.load_key(&key_id).await?; let signature = Self::sign_with_key(key_pair.pkey.as_ref(), RpkiSignatureAlgorithm::default(), challenge)?; Ok(signature) } @@ -123,12 +135,12 @@ impl OpenSslSigner { Ok(store) } - fn build_key(&self) -> Result { + async fn build_key(&self) -> Result { let kp = OpenSslKeyPair::build()?; - self.store_key(kp) + self.store_key(kp).await } - fn store_key(&self, kp: OpenSslKeyPair) -> Result { + async fn store_key(&self, kp: OpenSslKeyPair) -> Result { let pk = &kp.subject_public_key_info()?; let key_id = pk.key_identifier(); @@ -137,6 +149,7 @@ impl OpenSslSigner { match self .keys_store .store(&Key::new_global(SegmentBuf::parse_lossy(&key_id.to_string())), &json) // key_id should always be a valid Segment + .await { Ok(_) => Ok(key_id), Err(err) => Err(SignerError::Other(format!("Failed to store key: {}:", err))), @@ -161,11 +174,12 @@ impl OpenSslSigner { Ok(signature) } - fn load_key(&self, key_id: &KeyIdentifier) -> Result { + async fn load_key(&self, key_id: &KeyIdentifier) -> Result { // TODO decrypt key after read match self .keys_store .get(&Key::new_global(SegmentBuf::parse_lossy(&key_id.to_string()))) // key_id should always be a valid Segment + .await { Ok(Some(kp)) => Ok(kp), Ok(None) => Err(SignerError::KeyNotFound), @@ -173,18 +187,19 @@ impl OpenSslSigner { } } - fn remember_key_id(&self, key_id: &KeyIdentifier) -> Result<(), SignerError> { + async fn remember_key_id(&self, key_id: &KeyIdentifier) -> Result<(), SignerError> { // When testing the OpenSSlSigner in isolation there is no need for a mapper as we don't need to determine // which signer to use for a particular KeyIdentifier as there is only one signer, and the OpenSslSigner // doesn't need a mapper to map from KeyIdentifier to internal key id as the internal key id IS the // KeyIdentifier. if let Some(mapper) = &self.mapper { - let readable_handle = self.handle.read().unwrap(); - let signer_handle = readable_handle.as_ref().ok_or_else(|| { + let signer_handle = self.handle().map_err(|_| { SignerError::Other("OpenSSL: Failed to record signer key: Signer handle not set".to_string()) })?; + mapper - .add_key(signer_handle, key_id, &format!("{}", key_id)) + .add_key(&signer_handle, key_id, &format!("{}", key_id)) + .await .map_err(|err| SignerError::Other(format!("Failed to record signer key: {}", err))) } else { Ok(()) @@ -195,40 +210,41 @@ impl OpenSslSigner { // Implement the functions defined by the `Signer` trait because `SignerProvider` expects to invoke them, but as the // dispatching is not trait based we don't actually have to implement the `Signer` trait. impl OpenSslSigner { - pub fn create_key(&self, _algorithm: PublicKeyFormat) -> Result { - let key_id = self.build_key()?; - self.remember_key_id(&key_id)?; + pub async fn create_key(&self, _algorithm: PublicKeyFormat) -> Result { + let key_id = self.build_key().await?; + self.remember_key_id(&key_id).await?; Ok(key_id) } /// Import an existing RSA key pair from the PEM encoded private key - pub fn import_key(&self, pem: &str) -> Result { + pub async fn import_key(&self, pem: &str) -> Result { let kp = OpenSslKeyPair::from_pem(pem)?; - let key_id = self.store_key(kp)?; - self.remember_key_id(&key_id)?; + let key_id = self.store_key(kp).await?; + self.remember_key_id(&key_id).await?; Ok(key_id) } - pub fn get_key_info(&self, key_id: &KeyIdentifier) -> Result> { - let key_pair = self.load_key(key_id)?; + pub async fn get_key_info(&self, key_id: &KeyIdentifier) -> Result> { + let key_pair = self.load_key(key_id).await?; Ok(key_pair.subject_public_key_info()?) } - pub fn destroy_key(&self, key_id: &KeyIdentifier) -> Result<(), KeyError> { + pub async fn destroy_key(&self, key_id: &KeyIdentifier) -> Result<(), KeyError> { self.keys_store .drop_key(&Key::new_global(SegmentBuf::parse_lossy(&key_id.to_string()))) // key_id should always be a valid Segment + .await .map_err(|_| KeyError::Signer(SignerError::KeyNotFound)) } - pub fn sign + ?Sized>( + pub async fn sign + ?Sized + Sync>( &self, key_id: &KeyIdentifier, algorithm: Alg, data: &D, ) -> Result, SigningError> { - let key_pair = self.load_key(key_id)?; + let key_pair = self.load_key(key_id).await?; Self::sign_with_key(key_pair.pkey.as_ref(), algorithm, data).map_err(SigningError::Signer) } @@ -316,14 +332,13 @@ pub mod tests { use super::*; - #[test] - fn should_return_subject_public_key_info() { - test::test_in_memory(|storage_uri| { - let s = OpenSslSigner::build(storage_uri, "dummy", None).unwrap(); - let ki = s.create_key(PublicKeyFormat::Rsa).unwrap(); - s.get_key_info(&ki).unwrap(); - s.destroy_key(&ki).unwrap(); - }) + #[tokio::test] + async fn should_return_subject_public_key_info() { + let storage_uri = test::mem_storage(); + let s = OpenSslSigner::build(&storage_uri, "dummy", None).unwrap(); + let ki = s.create_key(PublicKeyFormat::Rsa).await.unwrap(); + s.get_key_info(&ki).await.unwrap(); + s.destroy_key(&ki).await.unwrap(); } #[test] @@ -338,29 +353,27 @@ pub mod tests { assert_eq!(json, json_from_des); } - #[test] - fn import_existing_pkcs1_openssl_key() { - test::test_in_memory(|storage_uri| { - // The following key was generated using OpenSSL on the command line - let pem = include_str!("../../../../../test-resources/ta/example-pkcs1.pem"); - let signer = OpenSslSigner::build(storage_uri, "dummy", None).unwrap(); - - let ki = signer.import_key(pem).unwrap(); - signer.get_key_info(&ki).unwrap(); - signer.destroy_key(&ki).unwrap(); - }) + #[tokio::test] + async fn import_existing_pkcs1_openssl_key() { + let storage_uri = test::mem_storage(); + // The following key was generated using OpenSSL on the command line + let pem = include_str!("../../../../../test-resources/ta/example-pkcs1.pem"); + let signer = OpenSslSigner::build(&storage_uri, "dummy", None).unwrap(); + + let ki = signer.import_key(pem).await.unwrap(); + signer.get_key_info(&ki).await.unwrap(); + signer.destroy_key(&ki).await.unwrap(); } - #[test] - fn import_existing_pkcs8_openssl_key() { - test::test_in_memory(|storage_uri| { - // The following key was generated using OpenSSL on the command line - let pem = include_str!("../../../../../test-resources/ta/example-pkcs8.pem"); - let signer = OpenSslSigner::build(storage_uri, "dummy", None).unwrap(); - - let ki = signer.import_key(pem).unwrap(); - signer.get_key_info(&ki).unwrap(); - signer.destroy_key(&ki).unwrap(); - }) + #[tokio::test] + async fn import_existing_pkcs8_openssl_key() { + let storage_uri = test::mem_storage(); + // The following key was generated using OpenSSL on the command line + let pem = include_str!("../../../../../test-resources/ta/example-pkcs8.pem"); + let signer = OpenSslSigner::build(&storage_uri, "dummy", None).unwrap(); + + let ki = signer.import_key(pem).await.unwrap(); + signer.get_key_info(&ki).await.unwrap(); + signer.destroy_key(&ki).await.unwrap(); } } diff --git a/src/commons/eventsourcing/agg.rs b/src/commons/eventsourcing/agg.rs index cbe79f819..1df332f8f 100644 --- a/src/commons/eventsourcing/agg.rs +++ b/src/commons/eventsourcing/agg.rs @@ -22,6 +22,7 @@ use crate::commons::{ /// this intent and decide whether it can be executed. If successful a number of /// 'events' are returned that contain state changes to the aggregate. These events /// still need to be applied to become persisted. +#[async_trait::async_trait] pub trait Aggregate: Storable + Send + Sync + 'static { type InitCommand: InitCommand; type InitEvent: InitEvent; @@ -50,7 +51,7 @@ pub trait Aggregate: Storable + Send + Sync + 'static { /// Tries to initialise a new InitEvent for a new instance. This /// can fail. The InitEvent is not applied here, but returned so /// that we can re-build state from history. - fn process_init_command(command: Self::InitCommand) -> Result; + async fn process_init_command(command: Self::InitCommand) -> Result; /// Returns the current version of the aggregate. fn version(&self) -> u64; @@ -87,5 +88,5 @@ pub trait Aggregate: Storable + Send + Sync + 'static { /// /// The events are not applied here, but need to be applied using /// [`apply_command`] so that we can re-build state from history. - fn process_command(&self, command: Self::Command) -> Result, Self::Error>; + async fn process_command(&self, command: Self::Command) -> Result, Self::Error>; } diff --git a/src/commons/eventsourcing/listener.rs b/src/commons/eventsourcing/listener.rs index 0b60a629c..20a168d3c 100644 --- a/src/commons/eventsourcing/listener.rs +++ b/src/commons/eventsourcing/listener.rs @@ -8,8 +8,9 @@ use super::Aggregate; /// the events *before* the Aggregate is saved. Thus, they are allowed /// to return an error in case of issues, which will then roll back the /// intended change to an aggregate. +#[async_trait::async_trait] pub trait PreSaveEventListener: Send + Sync + 'static { - fn listen(&self, agg: &A, events: &[A::Event]) -> Result<(), A::Error>; + async fn listen(&self, agg: &A, events: &[A::Event]) -> Result<(), A::Error>; } //------------ PostSaveEventListener ------------------------------------------ @@ -17,8 +18,9 @@ pub trait PreSaveEventListener: Send + Sync + 'static { /// This trait defines a listener for events which is designed to receive /// them *after* the updated Aggregate is saved. Because the updates already /// happened EventListeners of this type are not allowed to fail. +#[async_trait::async_trait] pub trait PostSaveEventListener: Send + Sync + 'static { - fn listen(&self, agg: &A, events: &[A::Event]); + async fn listen(&self, agg: &A, events: &[A::Event]); } //------------ EventCounter -------------------------------------------------- @@ -46,8 +48,9 @@ impl EventCounter { } } +#[async_trait::async_trait] impl PostSaveEventListener for EventCounter { - fn listen(&self, _agg: &A, events: &[A::Event]) { + async fn listen(&self, _agg: &A, events: &[A::Event]) { self.counter.write().unwrap().total += events.len(); } } diff --git a/src/commons/eventsourcing/mod.rs b/src/commons/eventsourcing/mod.rs index 922af95ac..8991fbd48 100644 --- a/src/commons/eventsourcing/mod.rs +++ b/src/commons/eventsourcing/mod.rs @@ -273,6 +273,7 @@ mod tests { } } + #[async_trait::async_trait] impl Aggregate for Person { type InitCommand = PersonInitCommand; type InitEvent = PersonInitEvent; @@ -293,7 +294,7 @@ mod tests { } } - fn process_init_command(command: Self::InitCommand) -> Result { + async fn process_init_command(command: Self::InitCommand) -> Result { Ok(PersonInitEvent { name: command.into_details().name, }) @@ -314,7 +315,7 @@ mod tests { } } - fn process_command(&self, command: Self::Command) -> Result, Self::Error> { + async fn process_command(&self, command: Self::Command) -> Result, Self::Error> { match command.into_details() { PersonCommandDetails::ChangeName(name) => { let event = PersonEvent::name_changed(name); @@ -332,8 +333,8 @@ mod tests { } } - #[test] - fn event_sourcing_framework() { + #[tokio::test] + async fn event_sourcing_framework() { let storage_uri = mem_storage(); let counter = Arc::new(EventCounter::default()); @@ -347,16 +348,16 @@ mod tests { let alice_handle = MyHandle::from_str("alice").unwrap(); let alice_init_cmd = PersonInitCommand::make(&alice_handle, alice_name); - manager.add(alice_init_cmd).unwrap(); + manager.add(alice_init_cmd).await.unwrap(); - let mut alice = manager.get_latest(&alice_handle).unwrap(); + let mut alice = manager.get_latest(&alice_handle).await.unwrap(); assert_eq!("alice smith", alice.name()); assert_eq!(0, alice.age()); let mut age = 0; loop { let get_older = PersonCommand::go_around_sun(&alice_handle, None); - alice = manager.command(get_older).unwrap(); + alice = manager.command(get_older).await.unwrap(); age += 1; if age == 21 { @@ -368,7 +369,7 @@ mod tests { assert_eq!(21, alice.age()); let change_name = PersonCommand::change_name(&alice_handle, Some(22), "alice smith-doe"); - let alice = manager.command(change_name).unwrap(); + let alice = manager.command(change_name).await.unwrap(); assert_eq!("alice smith-doe", alice.name()); assert_eq!(21, alice.age()); @@ -376,7 +377,7 @@ mod tests { let manager = AggregateStore::::create(&storage_uri, &NamespaceBuf::parse_lossy("person"), false).unwrap(); - let alice = manager.get_latest(&alice_handle).unwrap(); + let alice = manager.get_latest(&alice_handle).await.unwrap(); assert_eq!("alice smith-doe", alice.name()); assert_eq!(21, alice.age()); @@ -387,7 +388,7 @@ mod tests { crit.set_offset(3); crit.set_rows(10); - let history = manager.command_history(&alice_handle, crit).unwrap(); + let history = manager.command_history(&alice_handle, crit).await.unwrap(); assert_eq!(history.total(), 22); assert_eq!(history.offset(), 3); assert_eq!(history.commands().len(), 10); @@ -396,8 +397,7 @@ mod tests { // Get history excluding 'around the sun' commands let mut crit = CommandHistoryCriteria::default(); crit.set_excludes(&["person-around-sun"]); - let history = manager.command_history(&alice_handle, crit).unwrap(); + let history = manager.command_history(&alice_handle, crit).await.unwrap(); assert_eq!(history.total(), 1); - // }) } } diff --git a/src/commons/eventsourcing/store.rs b/src/commons/eventsourcing/store.rs index 95c3fb489..8a4e8eabd 100644 --- a/src/commons/eventsourcing/store.rs +++ b/src/commons/eventsourcing/store.rs @@ -1,9 +1,6 @@ -use std::{ - collections::HashMap, - fmt, - str::FromStr, - sync::{Arc, Mutex, RwLock}, -}; +use std::{collections::HashMap, fmt, str::FromStr, sync::Arc}; + +use tokio::sync::{Mutex, RwLock}; use rpki::{ca::idexchange::MyHandle, repository::x509::Time}; use url::Url; @@ -73,19 +70,20 @@ impl AggregateStore { } /// Warms up the cache, to be used after startup. Will fail if any aggregates fail to load. - pub fn warm(&self) -> StoreResult<()> { - for handle in self.list()? { - self.warm_aggregate(&handle)?; + pub async fn warm(&self) -> StoreResult<()> { + for handle in self.list().await? { + self.warm_aggregate(&handle).await?; } info!("Cache for CAs has been warmed."); Ok(()) } /// Warm the cache for a specific aggregate. - pub fn warm_aggregate(&self, handle: &MyHandle) -> StoreResult<()> { + pub async fn warm_aggregate(&self, handle: &MyHandle) -> StoreResult<()> { info!("Warming the cache for: '{}'", handle); self.get_latest(handle) + .await .map_err(|e| AggregateStoreError::WarmupFailed(handle.clone(), e.to_string()))?; Ok(()) @@ -111,30 +109,30 @@ where /// Gets the latest version for the given aggregate. Returns /// an AggregateStoreError::UnknownAggregate in case the aggregate /// does not exist. - pub fn get_latest(&self, handle: &MyHandle) -> Result, A::Error> { - self.execute_opt_command(handle, None, false) + pub async fn get_latest(&self, handle: &MyHandle) -> Result, A::Error> { + self.execute_opt_command(handle, None, false).await } /// Updates the snapshots for all entities in this store. - pub fn update_snapshots(&self) -> Result<(), A::Error> { - for handle in self.list()? { - self.save_snapshot(&handle)?; + pub async fn update_snapshots(&self) -> Result<(), A::Error> { + for handle in self.list().await? { + self.save_snapshot(&handle).await?; } Ok(()) } /// Gets the latest version for the given aggregate and updates the snapshot. - pub fn save_snapshot(&self, handle: &MyHandle) -> Result, A::Error> { - self.execute_opt_command(handle, None, true) + pub async fn save_snapshot(&self, handle: &MyHandle) -> Result, A::Error> { + self.execute_opt_command(handle, None, true).await } /// Adds a new aggregate instance based on the init event. - pub fn add(&self, cmd: A::InitCommand) -> Result, A::Error> { + pub async fn add(&self, cmd: A::InitCommand) -> Result, A::Error> { let scope = Self::scope_for_agg(cmd.handle()); self.kv - .execute(&scope, move |kv| { + .execute(&scope, |kv| async move { let handle = cmd.handle().clone(); let init_command_key = Self::key_for_command(&handle, 0); @@ -151,7 +149,7 @@ where cmd.store(), ); - match A::process_init_command(cmd.clone()) { + match A::process_init_command(cmd.clone()).await { Ok(init_event) => { let aggregate = A::init(handle.clone(), init_event.clone()); let processed_command = processed_command_builder.finish_with_init_event(init_event); @@ -161,7 +159,7 @@ where let arc = Arc::new(aggregate); - self.cache_update(&handle, arc.clone()); + self.cache_update(&handle, arc.clone()).await; Ok(Ok(arc)) } @@ -169,6 +167,7 @@ where } } }) + .await .map_err(|e| A::Error::from(AggregateStoreError::KeyStoreError(e)))? } @@ -186,37 +185,46 @@ where /// - do not save anything, return aggregate /// on error: /// - save command and error, return error - pub fn command(&self, cmd: A::Command) -> Result, A::Error> { - self.execute_opt_command(cmd.handle(), Some(&cmd), false) + pub async fn command(&self, cmd: A::Command) -> Result, A::Error> { + self.execute_opt_command(cmd.handle(), Some(&cmd), false).await } /// Returns true if an instance exists for the id - pub fn has(&self, id: &MyHandle) -> Result { + pub async fn has(&self, id: &MyHandle) -> Result { let init_command_key = Self::key_for_command(id, 0); self.kv .has(&init_command_key) + .await .map_err(AggregateStoreError::KeyStoreError) } /// Lists all known ids. - pub fn list(&self) -> Result, AggregateStoreError> { - self.aggregates() + pub async fn list(&self) -> Result, AggregateStoreError> { + let mut res = vec![]; + + for scope in self.kv.scopes().await? { + if let Ok(handle) = MyHandle::from_str(&scope.to_string()) { + res.push(handle) + } + } + + Ok(res) } /// Get the latest aggregate and optionally apply a command to it, all /// inside a single transaction (postgres) or lock (disk). - fn execute_opt_command( + async fn execute_opt_command( &self, handle: &MyHandle, cmd_opt: Option<&A::Command>, save_snapshot: bool, ) -> Result, A::Error> { self.kv - .execute(&Self::scope_for_agg(handle), |kv| { + .execute(&Self::scope_for_agg(handle), |kv| async move { // Get the aggregate from the cache, or get it from the store. let mut changed_from_cached = false; - let latest_result = match self.cache_get(handle) { + let latest_result = match self.cache_get(handle).await { Some(arc) => Ok(arc), None => { // There was no cached aggregate, so try to get it @@ -316,7 +324,7 @@ where std::process::exit(1); } - match aggregate.process_command(cmd.clone()) { + match aggregate.process_command(cmd.clone()).await { Err(e) => { // Store the processed command with the error. let processed_command = processed_command_builder.finish_with_error(&e); @@ -348,7 +356,7 @@ where let mut opt_err: Option = None; if let Some(events) = processed_command.events() { for pre_save_listener in &self.pre_save_listeners { - if let Err(e) = pre_save_listener.as_ref().listen(aggregate, events) { + if let Err(e) = pre_save_listener.as_ref().listen(aggregate, events).await { opt_err = Some(e); break; } @@ -368,7 +376,7 @@ where // Now send the events to the 'post-save' listeners. if let Some(events) = processed_command.events() { for listener in &self.post_save_listeners { - listener.as_ref().listen(aggregate, events); + listener.as_ref().listen(aggregate, events).await; } } @@ -384,7 +392,7 @@ where }; if changed_from_cached { - self.cache_update(handle, agg.clone()); + self.cache_update(handle, agg.clone()).await; } if save_snapshot { @@ -399,6 +407,7 @@ where Ok(Ok(agg)) } }) + .await .map_err(|e| A::Error::from(AggregateStoreError::KeyStoreError(e)))? } } @@ -410,7 +419,7 @@ where A::Error: From, { /// Find all commands that fit the criteria and return history - pub fn command_history( + pub async fn command_history( &self, id: &MyHandle, crit: CommandHistoryCriteria, @@ -451,21 +460,21 @@ where match &self.history_cache { Some(mutex) => { - let mut cache_lock = mutex.lock().unwrap(); + let mut cache_lock = mutex.lock().await; let records = cache_lock.entry(id.clone()).or_default(); - self.update_history_records(records, id)?; + self.update_history_records(records, id).await?; Ok(command_history_for_records(crit, records)) } None => { let mut records = vec![]; - self.update_history_records(&mut records, id)?; + self.update_history_records(&mut records, id).await?; Ok(command_history_for_records(crit, &records)) } } } /// Updates history records for a given aggregate - fn update_history_records( + async fn update_history_records( &self, records: &mut Vec, id: &MyHandle, @@ -475,7 +484,7 @@ where None => 1, }; - while let Ok(command) = self.get_command(id, version) { + while let Ok(command) = self.get_command(id, version).await { records.push(CommandHistoryRecord::from(command)); version += 1; } @@ -484,10 +493,10 @@ where } /// Get the command for this key, if it exists - pub fn get_command(&self, id: &MyHandle, version: u64) -> Result, AggregateStoreError> { + pub async fn get_command(&self, id: &MyHandle, version: u64) -> Result, AggregateStoreError> { let key = Self::key_for_command(id, version); - match self.kv.get(&key)? { + match self.kv.get(&key).await? { Some(cmd) => Ok(cmd), None => Err(AggregateStoreError::CommandNotFound(id.clone(), version)), } @@ -498,16 +507,16 @@ impl AggregateStore where A::Error: From, { - fn cache_get(&self, id: &MyHandle) -> Option> { - self.cache.read().unwrap().get(id).cloned() + async fn cache_get(&self, id: &MyHandle) -> Option> { + self.cache.read().await.get(id).cloned() } - fn cache_remove(&self, id: &MyHandle) { - self.cache.write().unwrap().remove(id); + async fn cache_remove(&self, id: &MyHandle) { + self.cache.write().await.remove(id); } - fn cache_update(&self, id: &MyHandle, arc: Arc) { - self.cache.write().unwrap().insert(id.clone(), arc); + async fn cache_update(&self, id: &MyHandle, arc: Arc) { + self.cache.write().await.insert(id.clone(), arc); } } @@ -532,26 +541,15 @@ where ) } - /// Private, should be called through `list` which takes care of locking. - fn aggregates(&self) -> Result, AggregateStoreError> { - let mut res = vec![]; - - for scope in self.kv.scopes()? { - if let Ok(handle) = MyHandle::from_str(&scope.to_string()) { - res.push(handle) - } - } - - Ok(res) - } - /// Drop an aggregate, completely. Handle with care! - pub fn drop_aggregate(&self, id: &MyHandle) -> Result<(), AggregateStoreError> { + pub async fn drop_aggregate(&self, id: &MyHandle) -> Result<(), AggregateStoreError> { let scope = Self::scope_for_agg(id); - self.kv.execute(&scope, |kv| kv.delete_scope(&scope))?; + self.kv + .execute(&Scope::global(), |kv| async move { kv.delete_scope(&scope) }) + .await?; - self.cache_remove(id); + self.cache_remove(id).await; Ok(()) } } diff --git a/src/commons/eventsourcing/wal.rs b/src/commons/eventsourcing/wal.rs index 38a509b43..69f49128a 100644 --- a/src/commons/eventsourcing/wal.rs +++ b/src/commons/eventsourcing/wal.rs @@ -134,10 +134,11 @@ impl WalStore { } /// Warms up the store: caches all instances. - pub fn warm(&self) -> WalStoreResult<()> { - for handle in self.list()? { + pub async fn warm(&self) -> WalStoreResult<()> { + for handle in self.list().await? { let latest = self .get_latest(&handle) + .await .map_err(|e| WalStoreError::WarmupFailed(handle.clone(), e.to_string()))?; self.cache.write().unwrap().insert(handle, latest); @@ -146,12 +147,12 @@ impl WalStore { } /// Add a new entity for the given handle. Fails if the handle is in use. - pub fn add(&self, handle: &MyHandle, instance: T) -> WalStoreResult<()> { + pub async fn add(&self, handle: &MyHandle, instance: T) -> WalStoreResult<()> { let scope = Self::scope_for_handle(handle); let instance = Arc::new(instance); self.kv - .execute(&scope, |kv| { + .execute(&scope, |kv| async move { let key = Self::key_for_snapshot(handle); let json = serde_json::to_value(instance.as_ref())?; kv.store(&key, json)?; @@ -160,13 +161,14 @@ impl WalStore { Ok(()) }) + .await .map_err(WalStoreError::KeyStoreError) } /// Checks whether there is an instance for the given handle. - pub fn has(&self, handle: &MyHandle) -> WalStoreResult { + pub async fn has(&self, handle: &MyHandle) -> WalStoreResult { let scope = Self::scope_for_handle(handle); - self.kv.has_scope(&scope).map_err(WalStoreError::KeyStoreError) + self.kv.has_scope(&scope).await.map_err(WalStoreError::KeyStoreError) } /// Get the latest revision for the given handle. @@ -174,32 +176,33 @@ impl WalStore { /// This will use the cache if it's available and otherwise get a snapshot /// from the keystore. Then it will check whether there are any further /// changes. - pub fn get_latest(&self, handle: &MyHandle) -> Result, T::Error> { - self.execute_opt_command(handle, None, false) + pub async fn get_latest(&self, handle: &MyHandle) -> Result, T::Error> { + self.execute_opt_command(handle, None, false).await } /// Remove an instance from this store. Irrevocable. - pub fn remove(&self, handle: &MyHandle) -> WalStoreResult<()> { - if !self.has(handle)? { + pub async fn remove(&self, handle: &MyHandle) -> WalStoreResult<()> { + if !self.has(handle).await? { Err(WalStoreError::Unknown(handle.clone())) } else { let scope = Self::scope_for_handle(handle); self.kv - .execute(&scope, |kv| { + .execute(&Scope::global(), |kv| async move { kv.delete_scope(&scope)?; self.cache_remove(handle); Ok(()) }) + .await .map_err(WalStoreError::KeyStoreError) } } /// Returns a list of all instances managed in this store. - pub fn list(&self) -> WalStoreResult> { + pub async fn list(&self) -> WalStoreResult> { let mut res = vec![]; - for scope in self.kv.scopes()? { + for scope in self.kv.scopes().await? { if let Ok(handle) = MyHandle::from_str(&scope.to_string()) { res.push(handle) } @@ -215,19 +218,19 @@ impl WalStore { /// - apply the wal set locally /// - save the wal set /// - if saved properly update the cache - pub fn send_command(&self, command: T::Command) -> Result, T::Error> { + pub async fn send_command(&self, command: T::Command) -> Result, T::Error> { let handle = command.handle().clone(); - self.execute_opt_command(&handle, Some(command), false) + self.execute_opt_command(&handle, Some(command), false).await } - fn execute_opt_command( + async fn execute_opt_command( &self, handle: &MyHandle, cmd_opt: Option, save_snapshot: bool, ) -> Result, T::Error> { self.kv - .execute(&Self::scope_for_handle(handle), |kv| { + .execute(&Self::scope_for_handle(handle), |kv| async move { // Track whether anything has changed compared to the cached // instance (if any) so that we will know whether the cache // should be updated. @@ -368,19 +371,20 @@ impl WalStore { Ok(Ok(latest)) }) + .await .map_err(|e| T::Error::from(WalStoreError::KeyStoreError(e)))? } - pub fn update_snapshots(&self) -> Result<(), T::Error> { - for handle in self.list()? { - self.update_snapshot(&handle)?; + pub async fn update_snapshots(&self) -> Result<(), T::Error> { + for handle in self.list().await? { + self.update_snapshot(&handle).await?; } Ok(()) } /// Update snapshot and archive or delete old wal sets - pub fn update_snapshot(&self, handle: &MyHandle) -> Result, T::Error> { - self.execute_opt_command(handle, None, true) + pub async fn update_snapshot(&self, handle: &MyHandle) -> Result, T::Error> { + self.execute_opt_command(handle, None, true).await } fn cache_get(&self, id: &MyHandle) -> Option> { diff --git a/src/commons/storage/disk.rs b/src/commons/storage/disk.rs index 0cbc401d7..bcd9c5412 100644 --- a/src/commons/storage/disk.rs +++ b/src/commons/storage/disk.rs @@ -8,6 +8,7 @@ use std::{ time::Duration, }; +use futures_util::Future; use serde_json::Value; use crate::commons::{ @@ -293,16 +294,18 @@ impl Disk { } impl Disk { - pub fn execute(&self, scope: &Scope, op: F) -> Result + pub async fn execute<'f, F, T, Ret>(&self, scope: &Scope, op: F) -> Result where - F: FnOnce(&KeyValueStoreDispatcher) -> Result, + F: FnOnce(KeyValueStoreDispatcher) -> Ret, + Ret: Future>, { let lock_file_dir = self.root.join(LOCK_FILE_DIR); let _lock = FileLock::lock(scope.as_path(lock_file_dir))?; - let dispatcher = KeyValueStoreDispatcher::Disk(self); - op(&dispatcher) + let dispatcher = KeyValueStoreDispatcher::Disk(self.clone()); + + op(dispatcher).await } } diff --git a/src/commons/storage/kv.rs b/src/commons/storage/kv.rs index 536275058..30a57df0a 100644 --- a/src/commons/storage/kv.rs +++ b/src/commons/storage/kv.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, fmt}; +use futures_util::Future; use serde::{de::DeserializeOwned, Serialize}; use serde_json::Value; use url::Url; @@ -34,13 +35,13 @@ impl KeyValueStore { } /// Returns true if this KeyValueStore (with this namespace) has any entries. - pub fn is_empty(&self) -> Result { - self.execute(&Scope::global(), |kv| kv.is_empty()) + pub async fn is_empty(&self) -> Result { + self.execute(&Scope::global(), |kv| async move { kv.is_empty() }).await } /// Wipe the complete store. Needless to say perhaps.. use with care.. - pub fn wipe(&self) -> Result<(), KeyValueError> { - self.execute(&Scope::global(), |kv| kv.clear()) + pub async fn wipe(&self) -> Result<(), KeyValueError> { + self.execute(&Scope::global(), |kv| async move { kv.clear() }).await } /// Execute one or more `KeyValueStoreDispatcher` operations @@ -54,39 +55,45 @@ impl KeyValueStore { /// T can be () if no return value is needed. If anything can /// fail in the closure, other than kv calls, then T can be /// a Result. - pub fn execute(&self, scope: &Scope, op: F) -> Result + pub async fn execute<'f, F, T, Ret>(&self, scope: &Scope, op: F) -> Result where - F: FnOnce(&KeyValueStoreDispatcher) -> Result, + F: FnOnce(KeyValueStoreDispatcher) -> Ret, + Ret: Future>, { let dispatcher = match self { - KeyValueStore::Memory(memory) => KeyValueStoreDispatcher::Memory(memory), - KeyValueStore::Disk(disk) => KeyValueStoreDispatcher::Disk(disk), + KeyValueStore::Memory(memory) => KeyValueStoreDispatcher::Memory(memory.clone()), + KeyValueStore::Disk(disk) => KeyValueStoreDispatcher::Disk(disk.clone()), }; - dispatcher.execute(scope, op) + dispatcher.execute(scope, op).await } } // # Keys and Values impl KeyValueStore { /// Stores a key value pair, serialized as json, overwrite existing - pub fn store(&self, key: &Key, value: &V) -> Result<(), KeyValueError> { - self.execute(key.scope(), |kv: &KeyValueStoreDispatcher| { - kv.store(key, serde_json::to_value(value)?) - }) + pub async fn store(&self, key: &Key, value: &V) -> Result<(), KeyValueError> { + self.execute( + key.scope(), + |kv| async move { kv.store(key, serde_json::to_value(value)?) }, + ) + .await } /// Stores a key value pair, serialized as json, fails if existing - pub fn store_new(&self, key: &Key, value: &V) -> Result<(), KeyValueError> { - self.execute(key.scope(), |kv: &KeyValueStoreDispatcher| match kv.get(key)? { - None => kv.store(key, serde_json::to_value(value)?), - _ => Err(KeyValueError::UnknownKey(key.to_owned())), + pub async fn store_new(&self, key: &Key, value: &V) -> Result<(), KeyValueError> { + self.execute(key.scope(), |kv| async move { + match kv.get(key)? { + None => kv.store(key, serde_json::to_value(value)?), + _ => Err(KeyValueError::UnknownKey(key.to_owned())), + } }) + .await } /// Gets a value for a key, returns an error if the value cannot be deserialized, /// returns None if it cannot be found. - pub fn get(&self, key: &Key) -> Result, KeyValueError> { - self.execute(key.scope(), |kv| { + pub async fn get(&self, key: &Key) -> Result, KeyValueError> { + self.execute(key.scope(), |kv| async move { if let Some(value) = kv.get(key)? { trace!("got value for key: {}", key); Ok(Some(serde_json::from_value(value)?)) @@ -95,21 +102,22 @@ impl KeyValueStore { Ok(None) } }) + .await } /// Returns whether a key exists - pub fn has(&self, key: &Key) -> Result { - self.execute(key.scope(), |kv| kv.has(key)) + pub async fn has(&self, key: &Key) -> Result { + self.execute(key.scope(), |kv| async move { kv.has(key) }).await } /// Returns all keys for the given scope - pub fn list_keys(&self, scope: &Scope) -> StorageResult> { - self.execute(scope, |kv| kv.list_keys(scope)) + pub async fn list_keys(&self, scope: &Scope) -> StorageResult> { + self.execute(scope, |kv| async move { kv.list_keys(scope) }).await } /// Delete a key-value pair - pub fn drop_key(&self, key: &Key) -> Result<(), KeyValueError> { - self.execute(key.scope(), |kv| kv.delete(key)) + pub async fn drop_key(&self, key: &Key) -> Result<(), KeyValueError> { + self.execute(key.scope(), |kv| async move { kv.delete(key) }).await } /// Returns all keys under a scope (scopes are exact strings, 'sub'-scopes @@ -117,8 +125,8 @@ impl KeyValueStore { /// two distinct scopes. /// /// If matching is not empty then the key must contain the given `&str`. - pub fn keys(&self, scope: &Scope, matching: &str) -> Result, KeyValueError> { - self.execute(scope, |kv| { + pub async fn keys(&self, scope: &Scope, matching: &str) -> Result, KeyValueError> { + self.execute(scope, |kv| async move { kv.list_keys(scope).map(|keys| { keys.into_iter() .filter(|key| { @@ -127,11 +135,12 @@ impl KeyValueStore { .collect() }) }) + .await } /// Returns all key value pairs under a scope. - pub fn key_value_pairs(&self, scope: &Scope, matching: &str) -> Result, KeyValueError> { - self.execute(scope, |kv| { + pub async fn key_value_pairs(&self, scope: &Scope, matching: &str) -> Result, KeyValueError> { + self.execute(scope, |kv| async move { let keys: Vec = kv.list_keys(scope).map(|keys| { keys.into_iter() .filter(|key| { @@ -149,24 +158,27 @@ impl KeyValueStore { Ok(pairs) }) + .await } } // # Scopes impl KeyValueStore { /// Returns whether a scope exists - pub fn has_scope(&self, scope: &Scope) -> Result { - self.execute(&Scope::global(), |kv| kv.has_scope(scope)) + pub async fn has_scope(&self, scope: &Scope) -> Result { + self.execute(&Scope::global(), |kv| async move { kv.has_scope(scope) }) + .await } /// Delete a scope - pub fn drop_scope(&self, scope: &Scope) -> Result<(), KeyValueError> { - self.execute(scope, |kv| kv.delete_scope(scope)) + pub async fn drop_scope(&self, scope: &Scope) -> Result<(), KeyValueError> { + self.execute(scope, |kv| async move { kv.delete_scope(scope) }).await } /// Returns all scopes, including sub_scopes - pub fn scopes(&self) -> Result, KeyValueError> { - self.execute(&Scope::global(), |kv| kv.list_scopes()) + pub async fn scopes(&self) -> Result, KeyValueError> { + self.execute(&Scope::global(), |kv| async move { kv.list_scopes() }) + .await } } @@ -189,12 +201,12 @@ impl KeyValueStore { /// Archive this store (i.e. for this namespace). Deletes /// any existing archive for this namespace if present. - pub fn migrate_to_archive(&mut self, storage_uri: &Url, namespace: &Namespace) -> Result<(), KeyValueError> { + pub async fn migrate_to_archive(&mut self, storage_uri: &Url, namespace: &Namespace) -> Result<(), KeyValueError> { let archive_ns = Self::prefixed_namespace(namespace, "archive")?; // Wipe any existing archive, before archiving this store. // We don't want to keep too much old data. See issue: #1088. let archive_store = KeyValueStore::create(storage_uri, &archive_ns)?; - archive_store.wipe()?; + archive_store.wipe().await?; match self { KeyValueStore::Memory(memory) => memory.migrate_namespace(archive_ns), @@ -205,9 +217,9 @@ impl KeyValueStore { /// Make this (upgrade) store the current store. /// /// Fails if there is a non-empty current store. - pub fn migrate_to_current(&mut self, storage_uri: &Url, namespace: &Namespace) -> Result<(), KeyValueError> { + pub async fn migrate_to_current(&mut self, storage_uri: &Url, namespace: &Namespace) -> Result<(), KeyValueError> { let current_store = KeyValueStore::create(storage_uri, namespace)?; - if !current_store.is_empty()? { + if !current_store.is_empty().await? { Err(KeyValueError::Other(format!( "Abort migrate upgraded store for {} to current. The current store was not archived.", namespace @@ -227,26 +239,27 @@ impl KeyValueStore { /// currently not supported. This should be okay, because this function /// is intended to be used for migrations and testing (copy test data /// into a store) while Krill is not running. - pub fn import(&self, other: &Self) -> Result<(), KeyValueError> { + pub async fn import(&self, other: &Self) -> Result<(), KeyValueError> { debug!("Import keys from {} into {}", other, self); - let mut scopes = other.scopes()?; + let mut scopes = other.scopes().await?; scopes.push(Scope::global()); // not explicitly listed but should be migrated as well. for scope in scopes { - let key_value_pairs = other.key_value_pairs(&scope, "")?; + let key_value_pairs = other.key_value_pairs(&scope, "").await?; trace!( "Migrating {} key value pairs in scope {}.", key_value_pairs.len(), scope ); - self.execute(&scope, |kv| { + self.execute(&scope, |kv| async move { for (key, value) in key_value_pairs.into_iter() { trace!(" ---storing key {}", key); kv.store(&key, value)?; } Ok(()) - })?; + }) + .await?; } Ok(()) @@ -264,20 +277,21 @@ impl fmt::Display for KeyValueStore { //------------ KeyValueStoreDispatcher --------------------------------------- -#[derive(Debug)] -pub enum KeyValueStoreDispatcher<'a> { - Memory(&'a Memory), - Disk(&'a Disk), +#[derive(Clone, Debug)] +pub enum KeyValueStoreDispatcher { + Memory(Memory), + Disk(Disk), } -impl<'a> KeyValueStoreDispatcher<'a> { - pub fn execute(&self, scope: &Scope, op: F) -> Result +impl KeyValueStoreDispatcher { + pub async fn execute(&self, scope: &Scope, op: F) -> Result where - F: FnOnce(&KeyValueStoreDispatcher) -> Result, + F: FnOnce(KeyValueStoreDispatcher) -> Ret, + Ret: Future>, { match self { - KeyValueStoreDispatcher::Memory(memory) => memory.execute(scope, op), - KeyValueStoreDispatcher::Disk(disk) => disk.execute(scope, op), + KeyValueStoreDispatcher::Memory(memory) => memory.execute(scope, op).await, + KeyValueStoreDispatcher::Disk(disk) => disk.execute(scope, op).await, } } @@ -361,7 +375,7 @@ impl<'a> KeyValueStoreDispatcher<'a> { } } -impl<'a> fmt::Display for KeyValueStoreDispatcher<'a> { +impl fmt::Display for KeyValueStoreDispatcher { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { KeyValueStoreDispatcher::Memory(memory) => memory.fmt(f), @@ -466,144 +480,144 @@ mod tests { Key::new_scoped(random_scope(depth), random_segment()) } - fn impl_store(store: KeyValueStore) { + async fn impl_store(store: KeyValueStore) { let content = "content".to_owned(); let key = Key::new_global(random_segment()); - store.store(&key, &content).unwrap(); - assert!(store.has(&key).unwrap()); - assert_eq!(store.get(&key).unwrap(), Some(content)); + store.store(&key, &content).await.unwrap(); + assert!(store.has(&key).await.unwrap()); + assert_eq!(store.get(&key).await.unwrap(), Some(content)); } - fn impl_store_new(store: KeyValueStore) { + async fn impl_store_new(store: KeyValueStore) { let content = "content".to_owned(); let key = Key::new_global(random_segment()); - assert!(store.store_new(&key, &content).is_ok()); - assert!(store.store_new(&key, &content).is_err()); + assert!(store.store_new(&key, &content).await.is_ok()); + assert!(store.store_new(&key, &content).await.is_err()); } - fn impl_store_scoped(store: KeyValueStore) { + async fn impl_store_scoped(store: KeyValueStore) { let content = "content".to_owned(); let id = random_segment(); let scope = Scope::from_segment(SegmentBuf::parse_lossy("scope")); let key = Key::new_scoped(scope.clone(), id.clone()); - store.store(&key, &content).unwrap(); - assert!(store.has(&key).unwrap()); - assert_eq!(store.get(&key).unwrap(), Some(content.clone())); - assert!(store.has_scope(&scope).unwrap()); + store.store(&key, &content).await.unwrap(); + assert!(store.has(&key).await.unwrap()); + assert_eq!(store.get(&key).await.unwrap(), Some(content.clone())); + assert!(store.has_scope(&scope).await.unwrap()); let simple = Key::new_global(id); - store.store(&simple, &content).unwrap(); - assert!(store.has(&simple).unwrap()); - assert_eq!(store.get(&simple).unwrap(), Some(content)); + store.store(&simple, &content).await.unwrap(); + assert!(store.has(&simple).await.unwrap()); + assert_eq!(store.get(&simple).await.unwrap(), Some(content)); } - fn impl_get(store: KeyValueStore) { + async fn impl_get(store: KeyValueStore) { let content = "content".to_owned(); let key = Key::new_global(random_segment()); - assert_eq!(store.get::(&key).unwrap(), None); + assert_eq!(store.get::(&key).await.unwrap(), None); - store.store(&key, &content).unwrap(); - assert_eq!(store.get(&key).unwrap(), Some(content)); + store.store(&key, &content).await.unwrap(); + assert_eq!(store.get(&key).await.unwrap(), Some(content)); } - fn impl_get_transactional(store: KeyValueStore) { + async fn impl_get_transactional(store: KeyValueStore) { let content = "content".to_owned(); let key = Key::new_global(random_segment()); - assert_eq!(store.get::(&key).unwrap(), None); + assert_eq!(store.get::(&key).await.unwrap(), None); - store.store(&key, &content).unwrap(); - assert_eq!(store.get(&key).unwrap(), Some(content)); + store.store(&key, &content).await.unwrap(); + assert_eq!(store.get(&key).await.unwrap(), Some(content)); } - fn impl_has(store: KeyValueStore) { + async fn impl_has(store: KeyValueStore) { let content = "content".to_owned(); let key = Key::new_global(random_segment()); - assert!(!store.has(&key).unwrap()); + assert!(!store.has(&key).await.unwrap()); - store.store(&key, &content).unwrap(); - assert!(store.has(&key).unwrap()); + store.store(&key, &content).await.unwrap(); + assert!(store.has(&key).await.unwrap()); } - fn impl_drop_key(store: KeyValueStore) { + async fn impl_drop_key(store: KeyValueStore) { let content = "content".to_owned(); let key = Key::new_global(random_segment()); - store.store(&key, &content).unwrap(); - assert!(store.has(&key).unwrap()); + store.store(&key, &content).await.unwrap(); + assert!(store.has(&key).await.unwrap()); - store.drop_key(&key).unwrap(); - assert!(!store.has(&key).unwrap()); + store.drop_key(&key).await.unwrap(); + assert!(!store.has(&key).await.unwrap()); } - fn impl_drop_scope(store: KeyValueStore) { + async fn impl_drop_scope(store: KeyValueStore) { let content = "content".to_owned(); let scope = Scope::from_segment(random_segment()); let key = Key::new_scoped(scope.clone(), random_segment()); let key2 = Key::new_scoped(Scope::from_segment(random_segment()), random_segment()); - store.store(&key, &content).unwrap(); - store.store(&key2, &content).unwrap(); - assert!(store.has_scope(&scope).unwrap()); - assert!(store.has(&key).unwrap()); - assert!(store.has(&key2).unwrap()); + store.store(&key, &content).await.unwrap(); + store.store(&key2, &content).await.unwrap(); + assert!(store.has_scope(&scope).await.unwrap()); + assert!(store.has(&key).await.unwrap()); + assert!(store.has(&key2).await.unwrap()); - store.drop_scope(&scope).unwrap(); - assert!(!store.has_scope(&scope).unwrap()); - assert!(!store.has(&key).unwrap()); - assert!(store.has(&key2).unwrap()); + store.drop_scope(&scope).await.unwrap(); + assert!(!store.has_scope(&scope).await.unwrap()); + assert!(!store.has(&key).await.unwrap()); + assert!(store.has(&key2).await.unwrap()); } - fn impl_wipe(store: KeyValueStore) { + async fn impl_wipe(store: KeyValueStore) { let content = "content".to_owned(); let scope = Scope::from_segment(SegmentBuf::parse_lossy("scope")); let key = Key::new_scoped(scope.clone(), random_segment()); - store.store(&key, &content).unwrap(); - assert!(store.has_scope(&scope).unwrap()); - assert!(store.has(&key).unwrap()); + store.store(&key, &content).await.unwrap(); + assert!(store.has_scope(&scope).await.unwrap()); + assert!(store.has(&key).await.unwrap()); - store.wipe().unwrap(); - assert!(!store.has_scope(&scope).unwrap()); - assert!(!store.has(&key).unwrap()); - assert!(store.keys(&Scope::global(), "").unwrap().is_empty()); + store.wipe().await.unwrap(); + assert!(!store.has_scope(&scope).await.unwrap()); + assert!(!store.has(&key).await.unwrap()); + assert!(store.keys(&Scope::global(), "").await.unwrap().is_empty()); } - fn impl_list_scopes(store: KeyValueStore) { + async fn impl_list_scopes(store: KeyValueStore) { let content = "content".to_owned(); let id = SegmentBuf::parse_lossy("id"); let scope = Scope::from_segment(random_segment()); let key = Key::new_scoped(scope.clone(), id.clone()); - assert!(store.scopes().unwrap().is_empty()); + assert!(store.scopes().await.unwrap().is_empty()); - store.store(&key, &content).unwrap(); - assert_eq!(store.scopes().unwrap(), [scope.clone()]); + store.store(&key, &content).await.unwrap(); + assert_eq!(store.scopes().await.unwrap(), [scope.clone()]); let scope2 = Scope::from_segment(random_segment()); let key2 = Key::new_scoped(scope2.clone(), id); - store.store(&key2, &content).unwrap(); + store.store(&key2, &content).await.unwrap(); - let mut scopes = store.scopes().unwrap(); + let mut scopes = store.scopes().await.unwrap(); scopes.sort(); let mut expected = vec![scope.clone(), scope2.clone()]; expected.sort(); assert_eq!(scopes, expected); - store.drop_scope(&scope2).unwrap(); - assert_eq!(store.scopes().unwrap(), vec![scope]); + store.drop_scope(&scope2).await.unwrap(); + assert_eq!(store.scopes().await.unwrap(), vec![scope]); } - fn impl_has_scope(store: KeyValueStore) { + async fn impl_has_scope(store: KeyValueStore) { let content = "content".to_owned(); let scope = Scope::from_segment(random_segment()); let key = Key::new_scoped(scope.clone(), SegmentBuf::parse_lossy("id")); - assert!(!store.has_scope(&scope).unwrap()); + assert!(!store.has_scope(&scope).await.unwrap()); - store.store(&key, &content).unwrap(); - assert!(store.has_scope(&scope).unwrap()); + store.store(&key, &content).await.unwrap(); + assert!(store.has_scope(&scope).await.unwrap()); } - fn impl_list_keys(store: KeyValueStore) { + async fn impl_list_keys(store: KeyValueStore) { let content = "content".to_owned(); let id = SegmentBuf::parse_lossy("command--id"); let scope = Scope::from_segment(SegmentBuf::parse_lossy("command")); @@ -614,21 +628,21 @@ mod tests { let key2 = Key::new_scoped(scope.clone(), id2.clone()); let key3 = Key::new_global(id3.clone()); - store.store(&key, &content).unwrap(); - store.store(&key2, &content).unwrap(); - store.store(&key3, &content).unwrap(); + store.store(&key, &content).await.unwrap(); + store.store(&key2, &content).await.unwrap(); + store.store(&key3, &content).await.unwrap(); - let mut keys = store.keys(&scope, "command--").unwrap(); + let mut keys = store.keys(&scope, "command--").await.unwrap(); keys.sort(); let mut expected = vec![key.clone(), key2.clone()]; expected.sort(); assert_eq!(keys, expected); - assert_eq!(store.keys(&scope, id2.as_str()).unwrap(), [key2.clone()]); - assert_eq!(store.keys(&scope, id3.as_str()).unwrap(), []); - assert_eq!(store.keys(&Scope::global(), id3.as_str()).unwrap(), [key3]); + assert_eq!(store.keys(&scope, id2.as_str()).await.unwrap(), [key2.clone()]); + assert_eq!(store.keys(&scope, id3.as_str()).await.unwrap(), []); + assert_eq!(store.keys(&Scope::global(), id3.as_str()).await.unwrap(), [key3]); - let mut keys = store.keys(&scope, "").unwrap(); + let mut keys = store.keys(&scope, "").await.unwrap(); keys.sort(); let mut expected = vec![key, key2]; expected.sort(); @@ -636,11 +650,11 @@ mod tests { assert_eq!(keys, expected); } - fn impl_is_empty(store: KeyValueStore) { - assert!(store.is_empty().unwrap()); - store.store(&random_key(1), &random_value(8)).unwrap(); + async fn impl_is_empty(store: KeyValueStore) { + assert!(store.is_empty().await.unwrap()); + store.store(&random_key(1), &random_value(8)).await.unwrap(); - assert!(!store.is_empty().unwrap()); + assert!(!store.is_empty().await.unwrap()); } async fn impl_execute(store: KeyValueStore) { @@ -659,35 +673,39 @@ mod tests { store .execute(&scope, |kv| { - // start with an empty kv - assert!(kv.is_empty().unwrap()); + let scope = scope.clone(); + async move { + // start with an empty kv + assert!(kv.is_empty().unwrap()); - // add a bunch of keys, see that they are there - // and nothing else - let mut keys: Vec = (0..8).map(|_| random_key(1)).collect(); - keys.sort(); + // add a bunch of keys, see that they are there + // and nothing else + let mut keys: Vec = (0..8).map(|_| random_key(1)).collect(); + keys.sort(); - for key in &keys { - kv.store(key, random_value(8)).unwrap(); - } - assert!(!kv.is_empty().unwrap()); + for key in &keys { + kv.store(key, random_value(8)).unwrap(); + } + assert!(!kv.is_empty().unwrap()); - // TODO: use non-blocking sleep when we have an async closure - std::thread::sleep(std::time::Duration::from_millis(200)); + // TODO: use non-blocking sleep when we have an async closure + std::thread::sleep(std::time::Duration::from_millis(200)); - let mut stored_keys = kv.list_keys(&scope).unwrap(); - stored_keys.sort(); + let mut stored_keys = kv.list_keys(&scope).unwrap(); + stored_keys.sort(); - assert_eq!(keys.len(), stored_keys.len()); - assert_eq!(keys, stored_keys); + assert_eq!(keys.len(), stored_keys.len()); + assert_eq!(keys, stored_keys); - for key in &keys { - kv.delete(key).unwrap(); - } - assert!(kv.is_empty().unwrap()); + for key in &keys { + kv.delete(key).unwrap(); + } + assert!(kv.is_empty().unwrap()); - Ok(()) + Ok(()) + } }) + .await .unwrap(); } @@ -702,19 +720,19 @@ mod tests { } async fn test_impl(storage_uri: Url) { - impl_store(test_store(&storage_uri)); - impl_store_new(test_store(&storage_uri)); - impl_store_scoped(test_store(&storage_uri)); - impl_get(test_store(&storage_uri)); - impl_get_transactional(test_store(&storage_uri)); - impl_has(test_store(&storage_uri)); - impl_drop_key(test_store(&storage_uri)); - impl_drop_scope(test_store(&storage_uri)); - impl_wipe(test_store(&storage_uri)); - impl_list_scopes(test_store(&storage_uri)); - impl_has_scope(test_store(&storage_uri)); - impl_list_keys(test_store(&storage_uri)); - impl_is_empty(test_store(&storage_uri)); + impl_store(test_store(&storage_uri)).await; + impl_store_new(test_store(&storage_uri)).await; + impl_store_scoped(test_store(&storage_uri)).await; + impl_get(test_store(&storage_uri)).await; + impl_get_transactional(test_store(&storage_uri)).await; + impl_has(test_store(&storage_uri)).await; + impl_drop_key(test_store(&storage_uri)).await; + impl_drop_scope(test_store(&storage_uri)).await; + impl_wipe(test_store(&storage_uri)).await; + impl_list_scopes(test_store(&storage_uri)).await; + impl_has_scope(test_store(&storage_uri)).await; + impl_list_keys(test_store(&storage_uri)).await; + impl_is_empty(test_store(&storage_uri)).await; impl_execute(test_store(&storage_uri)).await; } diff --git a/src/commons/storage/memory.rs b/src/commons/storage/memory.rs index 8949609b7..18b6b21fb 100644 --- a/src/commons/storage/memory.rs +++ b/src/commons/storage/memory.rs @@ -5,6 +5,7 @@ use std::{ sync::{Mutex, MutexGuard}, }; +use futures_util::Future; use lazy_static::lazy_static; use crate::commons::storage::{Key, KeyValueError, NamespaceBuf, Scope, StorageResult}; @@ -181,9 +182,10 @@ impl Display for Memory { } impl Memory { - pub fn execute(&self, scope: &Scope, op: F) -> Result + pub async fn execute<'f, F, T, Ret>(&self, scope: &Scope, op: F) -> Result where - F: FnOnce(&KeyValueStoreDispatcher) -> Result, + F: FnOnce(KeyValueStoreDispatcher) -> Ret, + Ret: Future>, { // fn transaction(&self, scope: &Scope, callback: TransactionCallback) -> Result<()> { // Try to get a lock for 10 seconds. We may need to make this configurable. @@ -213,8 +215,8 @@ impl Memory { } } - let dispatcher = KeyValueStoreDispatcher::Memory(self); - let res = op(&dispatcher); + let dispatcher = KeyValueStoreDispatcher::Memory(self.clone()); + let res = op(dispatcher).await; let mut locks = self .locks diff --git a/src/commons/storage/queue.rs b/src/commons/storage/queue.rs index dafe9a2dd..c82303fe2 100644 --- a/src/commons/storage/queue.rs +++ b/src/commons/storage/queue.rs @@ -8,9 +8,8 @@ use std::{ use crate::commons::storage::{Key, KeyValueError, KeyValueStore, Scope, Segment, SegmentBuf, StorageResult}; -use super::KeyValueStoreDispatcher; - const SEPARATOR: char = '-'; +const RESCHEDULE_AFTER: Duration = Duration::from_secs(15 * 60); fn now() -> u128 { SystemTime::now() @@ -163,8 +162,15 @@ pub enum ScheduleMode { IfMissing, } -pub trait Queue { - const RESCHEDULE_AFTER: Duration = Duration::from_secs(15 * 60); +#[derive(Debug)] +pub struct Queue { + kv: KeyValueStore, +} + +impl Queue { + pub fn new(kv: KeyValueStore) -> Self { + Queue { kv } + } fn lock_scope() -> Scope { Scope::global() @@ -178,58 +184,41 @@ pub trait Queue { Scope::from_segment(RunningTask::SEGMENT) } - /// Returns the number of pending tasks remaining - fn pending_tasks_remaining(&self) -> StorageResult; - - /// Returns the number of running tasks - fn running_tasks_remaining(&self) -> StorageResult; - - /// Returns the currently running tasks - fn running_tasks_keys(&self) -> StorageResult>; - - /// Schedule a task. - fn schedule_task( - &self, - name: SegmentBuf, - value: serde_json::Value, - timestamp_millis: Option, - existing: ScheduleMode, - ) -> StorageResult<()>; - - /// Returns the scheduled timestamp in ms for the named task, if any. - fn pending_task_scheduled(&self, name: SegmentBuf) -> StorageResult>; - - /// Marks a running task as finished. Fails if the task is not running. - fn finish_running_task(&self, running: &Key) -> StorageResult<()>; - - /// Reschedules a running task as pending. Fails if the task is not running. - fn reschedule_running_task(&self, running: &Key, timestamp_millis: Option) -> StorageResult<()>; - - /// Claims the next scheduled pending task, if any. - fn claim_scheduled_pending_task(&self) -> StorageResult>; - - /// Reschedules running tasks that have timed out. - fn reschedule_long_running_tasks(&self, reschedule_after: Option<&Duration>) -> StorageResult<()>; -} + /// Wipe the entire queue. Handle with care. + pub async fn wipe(&self) -> StorageResult<()> { + self.kv.wipe().await + } -impl Queue for KeyValueStore { - fn pending_tasks_remaining(&self) -> StorageResult { - self.execute(&Self::lock_scope(), |kv| { - kv.list_keys(&Self::pending_scope()).map(|list| list.len()) - }) + /// Returns the number of pending tasks remaining + pub async fn pending_tasks_remaining(&self) -> StorageResult { + self.kv + .execute(&Self::lock_scope(), |kv| async move { + kv.list_keys(&Self::pending_scope()).map(|list| list.len()) + }) + .await } - fn running_tasks_remaining(&self) -> StorageResult { - self.execute(&Self::lock_scope(), |kv| { - kv.list_keys(&Self::running_scope()).map(|list| list.len()) - }) + /// Returns the number of running tasks + pub async fn running_tasks_remaining(&self) -> StorageResult { + self.kv + .execute(&Self::lock_scope(), |kv| async move { + kv.list_keys(&Self::running_scope()).map(|list| list.len()) + }) + .await } - fn running_tasks_keys(&self) -> StorageResult> { - self.execute(&Self::lock_scope(), |kv| kv.list_keys(&Self::running_scope())) + /// Returns the currently running tasks + pub async fn running_tasks_keys(&self) -> StorageResult> { + self.kv + .execute( + &Self::lock_scope(), + |kv| async move { kv.list_keys(&Self::running_scope()) }, + ) + .await } - fn schedule_task( + /// Schedule a task. + pub async fn schedule_task( &self, name: SegmentBuf, value: serde_json::Value, @@ -243,90 +232,110 @@ impl Queue for KeyValueStore { }; let new_task_key = Key::from(&new_task); - self.execute(&Self::lock_scope(), |s: &KeyValueStoreDispatcher| { - let running_key_opt = s - .list_keys(&Self::running_scope())? - .into_iter() - .filter_map(|k| TaskKey::try_from(&k).ok()) - .find(|running| running.name.as_ref() == &new_task.name) - .map(|tk| tk.running_key()); - - let pending_key_opt = s - .list_keys(&Self::pending_scope())? - .into_iter() - .filter_map(|k| TaskKey::try_from(&k).ok()) - .find(|p| p.name.as_ref() == &new_task.name) - .map(|tk| tk.pending_key()); - - match mode { - ScheduleMode::IfMissing => { - if pending_key_opt.is_some() || running_key_opt.is_some() { - // nothing to do, there is something - Ok(()) - } else { - // no pending or running task exists, just add the new task - s.store(&new_task_key, new_task.value.clone()) - } - } - ScheduleMode::ReplaceExisting => { - if let Some(pending) = pending_key_opt { - s.delete(&pending)?; + self.kv + .execute(&Self::lock_scope(), |kv| async move { + let running_key_opt = kv + .list_keys(&Self::running_scope())? + .into_iter() + .filter_map(|k| TaskKey::try_from(&k).ok()) + .find(|running| running.name.as_ref() == &new_task.name) + .map(|tk| tk.running_key()); + + let pending_key_opt = kv + .list_keys(&Self::pending_scope())? + .into_iter() + .filter_map(|k| TaskKey::try_from(&k).ok()) + .find(|p| p.name.as_ref() == &new_task.name) + .map(|tk| tk.pending_key()); + + match mode { + ScheduleMode::IfMissing => { + if pending_key_opt.is_some() || running_key_opt.is_some() { + // nothing to do, there is something + Ok(()) + } else { + // no pending or running task exists, just add the new task + kv.store(&new_task_key, new_task.value.clone()) + } } - s.store(&new_task_key, new_task.value.clone()) - } - ScheduleMode::ReplaceExistingSoonest => { - if let Some(pending) = pending_key_opt { - if let Ok(tk) = TaskKey::try_from(&pending) { - new_task.timestamp_millis = new_task.timestamp_millis.min(tk.timestamp_millis); + ScheduleMode::ReplaceExisting => { + if let Some(pending) = pending_key_opt { + kv.delete(&pending)?; } - s.delete(&pending)?; + kv.store(&new_task_key, new_task.value.clone()) } + ScheduleMode::ReplaceExistingSoonest => { + if let Some(pending) = pending_key_opt { + if let Ok(tk) = TaskKey::try_from(&pending) { + new_task.timestamp_millis = new_task.timestamp_millis.min(tk.timestamp_millis); + } + kv.delete(&pending)?; + } - let new_task_key = Key::from(&new_task); - s.store(&new_task_key, new_task.value.clone()) - } - ScheduleMode::FinishOrReplaceExisting => { - if let Some(running) = running_key_opt { - s.delete(&running)?; - } - if let Some(pending) = pending_key_opt { - s.delete(&pending)?; + let new_task_key = Key::from(&new_task); + kv.store(&new_task_key, new_task.value.clone()) } - s.store(&new_task_key, new_task.value.clone()) - } - ScheduleMode::FinishOrReplaceExistingSoonest => { - if let Some(running) = running_key_opt { - s.delete(&running)?; + ScheduleMode::FinishOrReplaceExisting => { + if let Some(running) = running_key_opt { + kv.delete(&running)?; + } + if let Some(pending) = pending_key_opt { + kv.delete(&pending)?; + } + kv.store(&new_task_key, new_task.value.clone()) } + ScheduleMode::FinishOrReplaceExistingSoonest => { + if let Some(running) = running_key_opt { + kv.delete(&running)?; + } - if let Some(pending) = pending_key_opt { - if let Ok(tk) = TaskKey::try_from(&pending) { - new_task.timestamp_millis = new_task.timestamp_millis.min(tk.timestamp_millis); + if let Some(pending) = pending_key_opt { + if let Ok(tk) = TaskKey::try_from(&pending) { + new_task.timestamp_millis = new_task.timestamp_millis.min(tk.timestamp_millis); + } + kv.delete(&pending)?; } - s.delete(&pending)?; - } - let new_task_key = Key::from(&new_task); - s.store(&new_task_key, new_task.value.clone()) + let new_task_key = Key::from(&new_task); + kv.store(&new_task_key, new_task.value.clone()) + } } - } - }) + }) + .await } - fn finish_running_task(&self, running_key: &Key) -> StorageResult<()> { - self.execute(&Self::lock_scope(), |kv| { - if kv.has(running_key)? { - kv.delete(running_key) - } else { - Err(KeyValueError::Other(format!( - "Cannot finish task {}. It is not running.", - running_key - ))) - } - }) + /// Returns the scheduled timestamp in ms for the named task, if any. + pub async fn pending_task_scheduled(&self, name: SegmentBuf) -> StorageResult> { + self.kv + .execute(&Self::lock_scope(), |kv| async move { + kv.list_keys(&Self::pending_scope()).map(|keys| { + keys.into_iter() + .filter_map(|k| TaskKey::try_from(&k).ok()) + .find(|p| p.name.as_ref() == &name) + .map(|p| p.timestamp_millis) + }) + }) + .await + } + + /// Marks a running task as finished. Fails if the task is not running. + pub async fn finish_running_task(&self, running: &Key) -> StorageResult<()> { + self.kv + .execute(&Self::lock_scope(), |kv| async move { + if kv.has(running)? { + kv.delete(running) + } else { + Err(KeyValueError::Other(format!( + "Cannot finish task {}. It is not running.", + running + ))) + } + }) + .await } - fn reschedule_running_task(&self, running: &Key, timestamp_millis: Option) -> StorageResult<()> { + /// Reschedules a running task as pending. Fails if the task is not running. + pub async fn reschedule_running_task(&self, running: &Key, timestamp_millis: Option) -> StorageResult<()> { let pending_key = { let mut task_key = TaskKey::try_from(running)?; task_key.timestamp_millis = timestamp_millis.unwrap_or_else(now); @@ -334,161 +343,146 @@ impl Queue for KeyValueStore { task_key.pending_key() }; - self.execute(&Self::lock_scope(), |kv| kv.move_value(running, &pending_key)) + self.kv + .execute( + &Self::lock_scope(), + |kv| async move { kv.move_value(running, &pending_key) }, + ) + .await } - fn claim_scheduled_pending_task(&self) -> StorageResult> { - self.execute(&Self::lock_scope(), |kv| { - let tasks_before = now(); - - if let Some(pending) = kv - .list_keys(&Self::pending_scope())? - .into_iter() - .filter_map(|k| TaskKey::try_from(&k).ok()) - .filter(|tk| tk.timestamp_millis <= tasks_before) - .min_by_key(|tk| tk.timestamp_millis) - { - let pending_key = pending.pending_key(); - - if let Some(value) = kv.get(&pending_key)? { - let mut running_task = RunningTask { - name: pending.name.into_owned(), - timestamp_millis: tasks_before, - value, - }; - let mut running_key = Key::from(&running_task); - - if kv.has(&running_key)? { - // It's not pretty to sleep blocking, even if it's - // for 1 ms, but if we don't then get a name collision - // with an existing running task. - std::thread::sleep(Duration::from_millis(1)); - running_task.timestamp_millis = now(); - running_key = Key::from(&running_task); - } + /// Claims the next scheduled pending task, if any. + pub async fn claim_scheduled_pending_task(&self) -> StorageResult> { + self.kv + .execute(&Self::lock_scope(), |kv| async move { + let tasks_before = now(); + + if let Some(pending) = kv + .list_keys(&Self::pending_scope())? + .into_iter() + .filter_map(|k| TaskKey::try_from(&k).ok()) + .filter(|tk| tk.timestamp_millis <= tasks_before) + .min_by_key(|tk| tk.timestamp_millis) + { + let pending_key = pending.pending_key(); + + if let Some(value) = kv.get(&pending_key)? { + let mut running_task = RunningTask { + name: pending.name.into_owned(), + timestamp_millis: tasks_before, + value, + }; + let mut running_key = Key::from(&running_task); + + if kv.has(&running_key)? { + // It's not pretty to sleep blocking, even if it's + // for 1 ms, but if we don't then get a name collision + // with an existing running task. + std::thread::sleep(Duration::from_millis(1)); + running_task.timestamp_millis = now(); + running_key = Key::from(&running_task); + } - kv.move_value(&pending_key, &running_key)?; + kv.move_value(&pending_key, &running_key)?; - Ok(Some(running_task)) + Ok(Some(running_task)) + } else { + Ok(None) + } } else { Ok(None) } - } else { - Ok(None) - } - }) + }) + .await } - fn reschedule_long_running_tasks(&self, reschedule_after: Option<&Duration>) -> StorageResult<()> { + /// Reschedules running tasks that have timed out. + pub async fn reschedule_long_running_tasks(&self, reschedule_after: Option<&Duration>) -> StorageResult<()> { let now = now(); - let reschedule_after = reschedule_after.unwrap_or(&KeyValueStore::RESCHEDULE_AFTER); + let reschedule_after = reschedule_after.unwrap_or(&RESCHEDULE_AFTER); let reschedule_timeout = now - reschedule_after.as_millis(); - self.execute(&Self::lock_scope(), |s: &KeyValueStoreDispatcher| { - s.list_keys(&Self::running_scope())? - .into_iter() - .filter_map(|k| { - let task = TaskKey::try_from(&k).ok()?; - if task.timestamp_millis <= reschedule_timeout { - Some(task) - } else { - None - } - }) - .for_each(|tk| { - let running_key = tk.running_key(); - - let pending_key = TaskKey { - name: Cow::Borrowed(&tk.name), - timestamp_millis: now, - } - .pending_key(); + self.kv + .execute(&Self::lock_scope(), |kv| async move { + kv.list_keys(&Self::running_scope())? + .into_iter() + .filter_map(|k| { + let task = TaskKey::try_from(&k).ok()?; + if task.timestamp_millis <= reschedule_timeout { + Some(task) + } else { + None + } + }) + .for_each(|tk| { + let running_key = tk.running_key(); - let _ = s.move_value(&running_key, &pending_key); - }); + let pending_key = TaskKey { + name: Cow::Borrowed(&tk.name), + timestamp_millis: now, + } + .pending_key(); - Ok(()) - }) - } + let _ = kv.move_value(&running_key, &pending_key); + }); - fn pending_task_scheduled(&self, name: SegmentBuf) -> StorageResult> { - self.execute(&Self::lock_scope(), |kv| { - kv.list_keys(&Self::pending_scope()).map(|keys| { - keys.into_iter() - .filter_map(|k| TaskKey::try_from(&k).ok()) - .find(|p| p.name.as_ref() == &name) - .map(|p| p.timestamp_millis) + Ok(()) }) - }) + .await } } #[cfg(test)] mod tests { - use std::{thread, time::Duration}; + use std::time::Duration; + use futures_util::future::join_all; use serde_json::Value; - use super::{PendingTask, Queue}; + use super::Queue; use crate::commons::storage::{ queue::{now, ScheduleMode}, - Key, KeyValueStore, Namespace, Scope, Segment, SegmentBuf, + Key, KeyValueStore, Namespace, Segment, SegmentBuf, }; - fn queue_store(ns: &str) -> KeyValueStore { + fn queue_store(ns: &str) -> Queue { let storage_url = crate::test::mem_storage(); - KeyValueStore::create(&storage_url, Namespace::parse(ns).unwrap()).unwrap() + Queue::new(KeyValueStore::create(&storage_url, Namespace::parse(ns).unwrap()).unwrap()) } - #[test] - fn queue_thread_workers() { + #[tokio::test] + async fn queue_thread_workers() { let queue = queue_store("test_queue"); - queue.wipe().unwrap(); - - thread::scope(|s| { - let create = s.spawn(|| { - for i in 1..=10 { - let name = &format!("job-{i}"); - let segment = Segment::parse(name).unwrap(); - let value = Value::from("value"); - - queue - .schedule_task(segment.into(), value, None, ScheduleMode::FinishOrReplaceExisting) - .unwrap(); - println!("> Scheduled job {}", &name); - } - }); + queue.wipe().await.unwrap(); - create.join().unwrap(); - let keys = queue.list_keys(&Scope::from_segment(PendingTask::SEGMENT)).unwrap(); - assert_eq!(keys.len(), 10); + async fn schedule(queue: &Queue, job_nr: usize) { + let name = &format!("job-{job_nr}"); + let segment = Segment::parse(name).unwrap(); + let value = Value::from("value"); - for _i in 1..=10 { - s.spawn(|| { - while queue.pending_tasks_remaining().unwrap() > 0 { - if let Some(running_task) = queue.claim_scheduled_pending_task().unwrap() { - queue.finish_running_task(&Key::from(&running_task)).unwrap(); - } + queue + .schedule_task(segment.into(), value, None, ScheduleMode::FinishOrReplaceExisting) + .await + .unwrap(); + } - std::thread::sleep(std::time::Duration::from_millis(5)); - } - }); - } - }); + let schedule_jobs: Vec<_> = (1..=10).map(|job_nr| schedule(&queue, job_nr)).collect(); - let pending = queue.pending_tasks_remaining().unwrap(); - assert_eq!(pending, 0); + join_all(schedule_jobs).await; - let running = queue.running_tasks_remaining().unwrap(); + let pending = queue.pending_tasks_remaining().await.unwrap(); + assert_eq!(pending, 10); + + let running = queue.running_tasks_remaining().await.unwrap(); assert_eq!(running, 0); } - #[test] - fn test_reschedule_long_running() { + #[tokio::test] + async fn test_reschedule_long_running() { let queue = queue_store("test_cleanup_queue"); - queue.wipe().unwrap(); + queue.wipe().await.unwrap(); let name = "job"; let segment = Segment::parse(name).unwrap(); @@ -496,38 +490,40 @@ mod tests { queue .schedule_task(segment.into(), value, None, ScheduleMode::FinishOrReplaceExisting) + .await .unwrap(); - assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 1); - let job = queue.claim_scheduled_pending_task().unwrap(); + let job = queue.claim_scheduled_pending_task().await.unwrap(); assert!(job.is_some()); - assert_eq!(queue.pending_tasks_remaining().unwrap(), 0); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 0); - let job = queue.claim_scheduled_pending_task().unwrap(); + let job = queue.claim_scheduled_pending_task().await.unwrap(); assert!(job.is_none()); queue .reschedule_long_running_tasks(Some(&Duration::from_secs(0))) + .await .unwrap(); - let existing = queue.pending_task_scheduled(segment.into()).unwrap(); + let existing = queue.pending_task_scheduled(segment.into()).await.unwrap(); assert!(existing.is_some()); - assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 1); - let job = queue.claim_scheduled_pending_task().unwrap(); + let job = queue.claim_scheduled_pending_task().await.unwrap(); assert!(job.is_some()); - assert_eq!(queue.pending_tasks_remaining().unwrap(), 0); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 0); } - #[test] - fn test_reschedule_finished_task() { + #[tokio::test] + async fn test_reschedule_finished_task() { let queue = queue_store("test_cleanup_queue"); - queue.wipe().unwrap(); + queue.wipe().await.unwrap(); let name = "task"; let segment = Segment::parse(name).unwrap(); @@ -536,13 +532,14 @@ mod tests { // Schedule the task queue .schedule_task(segment.into(), value, None, ScheduleMode::FinishOrReplaceExisting) + .await .unwrap(); - assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 1); // Get the task - let running_task = queue.claim_scheduled_pending_task().unwrap().unwrap(); - assert_eq!(queue.pending_tasks_remaining().unwrap(), 0); - assert_eq!(queue.running_tasks_remaining().unwrap(), 1); + let running_task = queue.claim_scheduled_pending_task().await.unwrap().unwrap(); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 0); + assert_eq!(queue.running_tasks_remaining().await.unwrap(), 1); // Finish the task and reschedule // queue.finish_running_task(task, Some(rescheduled)).unwrap(); @@ -553,26 +550,27 @@ mod tests { Some(now()), ScheduleMode::FinishOrReplaceExisting, ) + .await .unwrap(); // There should now be a new pending task, and the // running task should be removed. - assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); - assert_eq!(queue.running_tasks_remaining().unwrap(), 0); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 1); + assert_eq!(queue.running_tasks_remaining().await.unwrap(), 0); // Get and finish the pending task, but do not reschedule it - let running_task = queue.claim_scheduled_pending_task().unwrap().unwrap(); - assert_eq!(queue.pending_tasks_remaining().unwrap(), 0); - queue.finish_running_task(&Key::from(&running_task)).unwrap(); + let running_task = queue.claim_scheduled_pending_task().await.unwrap().unwrap(); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 0); + queue.finish_running_task(&Key::from(&running_task)).await.unwrap(); // There should not be a new pending task - assert_eq!(queue.pending_tasks_remaining().unwrap(), 0); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 0); } - #[test] - fn test_schedule_with_existing_task() { + #[tokio::test] + async fn test_schedule_with_existing_task() { let queue = queue_store("test_cleanup_queue"); - queue.wipe().unwrap(); + queue.wipe().await.unwrap(); let name: SegmentBuf = SegmentBuf::parse_lossy("task"); let value_1 = Value::from("value_1"); @@ -589,8 +587,9 @@ mod tests { None, ScheduleMode::FinishOrReplaceExisting, ) + .await .unwrap(); - assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 1); // Schedule again, replacing the existing task queue @@ -600,15 +599,16 @@ mod tests { None, ScheduleMode::FinishOrReplaceExisting, ) + .await .unwrap(); - assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 1); // We should have one task and the value should match the new task. - let task = queue.claim_scheduled_pending_task().unwrap().unwrap(); + let task = queue.claim_scheduled_pending_task().await.unwrap().unwrap(); assert_eq!(task.value, value_2); - assert_eq!(queue.running_tasks_remaining().unwrap(), 1); - queue.finish_running_task(&Key::from(&task)).unwrap(); + assert_eq!(queue.running_tasks_remaining().await.unwrap(), 1); + queue.finish_running_task(&Key::from(&task)).await.unwrap(); } // Schedule a task, and then schedule again keeping the old @@ -620,15 +620,18 @@ mod tests { None, ScheduleMode::FinishOrReplaceExisting, ) + .await .unwrap(); + queue .schedule_task(name.clone(), value_2.clone(), Some(in_a_while), ScheduleMode::IfMissing) + .await .unwrap(); // there should be only one task, it should not be rescheduled, // so we get get it and its value should match old. - assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); - let task = queue.claim_scheduled_pending_task().unwrap().unwrap(); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 1); + let task = queue.claim_scheduled_pending_task().await.unwrap().unwrap(); assert_eq!(task.value, value_1); } @@ -641,10 +644,11 @@ mod tests { None, ScheduleMode::FinishOrReplaceExisting, ) + .await .unwrap(); // we expect one pending task - assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 1); // reschedule that task to 3 minutes from now, keeping the // soonest value @@ -655,12 +659,13 @@ mod tests { Some(in_a_while), ScheduleMode::FinishOrReplaceExistingSoonest, ) + .await .unwrap(); // we still expect one pending task with the earlier // time and the new value. - assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); - let task = queue.claim_scheduled_pending_task().unwrap().unwrap(); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 1); + let task = queue.claim_scheduled_pending_task().await.unwrap().unwrap(); assert_eq!(task.value, value_2); // But if we now schedule a task and then reschedule @@ -674,7 +679,9 @@ mod tests { None, ScheduleMode::FinishOrReplaceExisting, ) + .await .unwrap(); + queue .schedule_task( name.clone(), @@ -682,10 +689,11 @@ mod tests { Some(in_a_while), ScheduleMode::FinishOrReplaceExisting, ) + .await .unwrap(); - assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); - assert!(queue.claim_scheduled_pending_task().unwrap().is_none()); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 1); + assert!(queue.claim_scheduled_pending_task().await.unwrap().is_none()); } // Schedule a task, claim it, and then finish and schedule a new task @@ -698,17 +706,18 @@ mod tests { None, ScheduleMode::FinishOrReplaceExisting, ) + .await .unwrap(); // there should be 1 pending task, and 0 running - assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); - assert_eq!(queue.running_tasks_remaining().unwrap(), 0); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 1); + assert_eq!(queue.running_tasks_remaining().await.unwrap(), 0); // claim the task - let task = queue.claim_scheduled_pending_task().unwrap().unwrap(); + let task = queue.claim_scheduled_pending_task().await.unwrap().unwrap(); assert_eq!(task.value, value_1); - assert_eq!(queue.pending_tasks_remaining().unwrap(), 0); - assert_eq!(queue.running_tasks_remaining().unwrap(), 1); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 0); + assert_eq!(queue.running_tasks_remaining().await.unwrap(), 1); // schedule a new task queue @@ -718,14 +727,15 @@ mod tests { None, ScheduleMode::FinishOrReplaceExisting, ) + .await .unwrap(); // the running task should now be finished, and there should be 1 new pending task - assert_eq!(queue.running_tasks_remaining().unwrap(), 0); - assert_eq!(queue.pending_tasks_remaining().unwrap(), 1); + assert_eq!(queue.running_tasks_remaining().await.unwrap(), 0); + assert_eq!(queue.pending_tasks_remaining().await.unwrap(), 1); // claim the task, it should match the new task - let task = queue.claim_scheduled_pending_task().unwrap().unwrap(); + let task = queue.claim_scheduled_pending_task().await.unwrap().unwrap(); assert_eq!(task.value, value_2); } } diff --git a/src/daemon/auth/common/crypt.rs b/src/daemon/auth/common/crypt.rs index 56977be6e..9d1a37830 100644 --- a/src/daemon/auth/common/crypt.rs +++ b/src/daemon/auth/common/crypt.rs @@ -137,11 +137,11 @@ pub(crate) fn decrypt(key: &[u8], payload: &[u8]) -> KrillResult> { .map_err(|err| Error::Custom(format!("Decryption error: {}", &err))) } -pub(crate) fn crypt_init(config: &Config) -> KrillResult { +pub(crate) async fn crypt_init(config: &Config) -> KrillResult { let store = config.key_value_store(CRYPT_STATE_NS)?; let key = Key::new_global(CRYPT_STATE_KEY); - if let Some(state) = store.get(&key)? { + if let Some(state) = store.get(&key).await? { Ok(state) } else { let mut key_bytes = [0; CHACHA20_KEY_BYTE_LEN]; @@ -149,7 +149,7 @@ pub(crate) fn crypt_init(config: &Config) -> KrillResult { .map_err(|err| Error::Custom(format!("Unable to generate symmetric key: {}", err)))?; let state = CryptState::from_key_bytes(key_bytes)?; - store.store_new(&key, &state)?; + store.store_new(&key, &state).await?; Ok(state) } diff --git a/src/daemon/auth/providers/config_file/provider.rs b/src/daemon/auth/providers/config_file/provider.rs index 36544ae87..46ccc3e67 100644 --- a/src/daemon/auth/providers/config_file/provider.rs +++ b/src/daemon/auth/providers/config_file/provider.rs @@ -54,7 +54,7 @@ pub struct ConfigFileAuthProvider { } impl ConfigFileAuthProvider { - pub fn new(config: Arc, session_cache: Arc) -> KrillResult { + pub async fn new(config: Arc, session_cache: Arc) -> KrillResult { match &config.auth_users { Some(auth_users) => { let mut users = HashMap::new(); @@ -62,7 +62,7 @@ impl ConfigFileAuthProvider { users.insert(k.clone(), get_checked_config_user(k, v)?); } - let session_key = Self::init_session_key(&config)?; + let session_key = Self::init_session_key(&config).await?; Ok(ConfigFileAuthProvider { users, @@ -76,9 +76,9 @@ impl ConfigFileAuthProvider { } } - fn init_session_key(config: &Config) -> KrillResult { + async fn init_session_key(config: &Config) -> KrillResult { debug!("Initializing login session encryption key"); - crypt::crypt_init(config) + crypt::crypt_init(config).await } /// Parse HTTP Basic Authorization header diff --git a/src/daemon/auth/providers/openid_connect/provider.rs b/src/daemon/auth/providers/openid_connect/provider.rs index 464ee375a..f25a43146 100644 --- a/src/daemon/auth/providers/openid_connect/provider.rs +++ b/src/daemon/auth/providers/openid_connect/provider.rs @@ -150,8 +150,8 @@ pub struct OpenIDConnectAuthProvider { } impl OpenIDConnectAuthProvider { - pub fn new(config: Arc, session_cache: Arc) -> KrillResult { - let session_key = Self::init_session_key(&config)?; + pub async fn new(config: Arc, session_cache: Arc) -> KrillResult { + let session_key = Self::init_session_key(&config).await?; Ok(OpenIDConnectAuthProvider { config, @@ -728,9 +728,9 @@ impl OpenIDConnectAuthProvider { Ok(None) } - fn init_session_key(config: &Config) -> KrillResult { + async fn init_session_key(config: &Config) -> KrillResult { debug!("Initializing session encryption key"); - crypt::crypt_init(config) + crypt::crypt_init(config).await } fn oidc_conf(&self) -> KrillResult<&ConfigAuthOpenIDConnect> { diff --git a/src/daemon/ca/aspa.rs b/src/daemon/ca/aspa.rs index 5fc6320a9..ff92c889a 100644 --- a/src/daemon/ca/aspa.rs +++ b/src/daemon/ca/aspa.rs @@ -32,7 +32,7 @@ use crate::{ }, }; -pub fn make_aspa_object( +pub async fn make_aspa_object( aspa_def: AspaDefinition, certified_key: &CertifiedKey, validity: Validity, @@ -53,14 +53,16 @@ pub fn make_aspa_object( let ca_issuer = incoming_cert.uri().clone(); let mut object_builder = - SignedObjectBuilder::new(signer.random_serial()?, validity, crl_uri, ca_issuer, aspa_uri); + SignedObjectBuilder::new(signer.random_serial().await?, validity, crl_uri, ca_issuer, aspa_uri); object_builder.set_issuer(Some(incoming_cert.subject().clone())); object_builder.set_signing_time(Some(Time::now())); object_builder }; - Ok(signer.sign_aspa(aspa_builder, object_builder, certified_key.key_id())?) + Ok(signer + .sign_aspa(aspa_builder, object_builder, certified_key.key_id()) + .await?) } //------------ AspaDefinitions --------------------------------------------- @@ -138,7 +140,7 @@ impl AspaDefinitions { pub struct AspaObjects(HashMap); impl AspaObjects { - pub fn make_aspa( + pub async fn make_aspa( &self, aspa_def: AspaDefinition, certified_key: &CertifiedKey, @@ -150,7 +152,8 @@ impl AspaObjects { certified_key, issuance_timing.new_aspa_validity(), signer, - )?; + ) + .await?; Ok(AspaInfo::new_aspa(aspa_def, aspa)) } @@ -160,7 +163,7 @@ impl AspaObjects { /// Note: we pass in *all* AspaDefinitions for the CA, not all /// definitions will be relevant for the RC (key) holding /// this AspaObjects. - pub fn update( + pub async fn update( &self, all_aspa_defs: &AspaDefinitions, certified_key: &CertifiedKey, @@ -182,8 +185,9 @@ impl AspaObjects { .unwrap_or(true); if need_to_issue { - let aspa_info = - self.make_aspa(relevant_aspa.clone(), certified_key, &config.issuance_timing, signer)?; + let aspa_info = self + .make_aspa(relevant_aspa.clone(), certified_key, &config.issuance_timing, signer) + .await?; object_updates.add_updated(aspa_info); } } @@ -202,7 +206,7 @@ impl AspaObjects { // Re-new ASPAs, if the renew_threshold is specified, then // only objects which will expire before that time will be // renewed. - pub fn renew( + pub async fn renew( &self, certified_key: &CertifiedKey, renew_threshold: Option::create(storage_uri, namespace, false) { Err(e) => { // Note: this is highly unlikely.. probably something else is broken and Krill @@ -490,7 +501,7 @@ impl Scheduler { ); } Ok(store) => { - if let Err(e) = store.update_snapshots() { + if let Err(e) = store.update_snapshots().await { // Note: this is highly unlikely.. probably something else is broken and Krill // would have panicked as a result already. error!( @@ -504,7 +515,7 @@ impl Scheduler { } } - fn update_wal_store_snapshots(storage_uri: &Url, namespace: &Namespace) { + async fn update_wal_store_snapshots(storage_uri: &Url, namespace: &Namespace) { match WalStore::::create(storage_uri, namespace) { Err(e) => { // Note: this is highly unlikely.. probably something else is broken and Krill @@ -515,7 +526,7 @@ impl Scheduler { ); } Ok(store) => { - if let Err(e) = store.update_snapshots() { + if let Err(e) = store.update_snapshots().await { // Note: this is highly unlikely.. probably something else is broken and Krill // would have panicked as a result already. error!( @@ -527,18 +538,18 @@ impl Scheduler { } } - update_aggregate_store_snapshots::(&self.config.storage_uri, CASERVER_NS); - update_aggregate_store_snapshots::(&self.config.storage_uri, SIGNERS_NS); - update_aggregate_store_snapshots::(&self.config.storage_uri, PROPERTIES_NS); - update_aggregate_store_snapshots::(&self.config.storage_uri, PUBSERVER_NS); + update_aggregate_store_snapshots::(&self.config.storage_uri, CASERVER_NS).await; + update_aggregate_store_snapshots::(&self.config.storage_uri, SIGNERS_NS).await; + update_aggregate_store_snapshots::(&self.config.storage_uri, PROPERTIES_NS).await; + update_aggregate_store_snapshots::(&self.config.storage_uri, PUBSERVER_NS).await; - update_wal_store_snapshots::(&self.config.storage_uri, PUBSERVER_CONTENT_NS); + update_wal_store_snapshots::(&self.config.storage_uri, PUBSERVER_CONTENT_NS).await; Ok(TaskResult::FollowUp(Task::UpdateSnapshots, in_hours(24))) } - fn update_rrdp_if_needed(&self) -> Result { - match self.repo_manager.update_rrdp_if_needed() { + async fn update_rrdp_if_needed(&self) -> Result { + match self.repo_manager.update_rrdp_if_needed().await { Err(e) => { error!("Could not update RRDP deltas! Error: {}", e); // Should we panic in this case? For now, just keep trying, this may @@ -572,7 +583,7 @@ impl Scheduler { let requests = HashMap::from([(rcn, revocation_requests)]); - if self.ca_manager.has_ca(&ca_handle).map_err(FatalError)? { + if self.ca_manager.has_ca(&ca_handle).await.map_err(FatalError)? { let ca = self.ca_manager.get_ca(&ca_handle).await.map_err(FatalError)?; if ca.version() < ca_version { // premature, we need to wait for the CA to be committed. @@ -602,7 +613,7 @@ impl Scheduler { rcn: ResourceClassName, revocation_request: RevocationRequest, ) -> Result { - if self.ca_manager.has_ca(&ca_handle).map_err(FatalError)? { + if self.ca_manager.has_ca(&ca_handle).await.map_err(FatalError)? { info!( "Trigger sending revocation requests for unexpected key with id '{}' in RC '{}'", revocation_request.key(), diff --git a/src/pubd/events.rs b/src/pubd/events.rs index 5ed5139af..c85b82c9e 100644 --- a/src/pubd/events.rs +++ b/src/pubd/events.rs @@ -39,13 +39,14 @@ impl RepositoryAccessInitEvent { } impl RepositoryAccessInitEvent { - pub fn init( + pub async fn init( rsync_jail: uri::Rsync, rrdp_base_uri: uri::Https, signer: &KrillSigner, ) -> KrillResult { signer .create_self_signed_id_cert() + .await .map_err(Error::signer) .map(|id| RepositoryAccessInitEvent { id_cert: id.into(), diff --git a/src/pubd/manager.rs b/src/pubd/manager.rs index ef70d522f..4408bca2a 100644 --- a/src/pubd/manager.rs +++ b/src/pubd/manager.rs @@ -51,9 +51,9 @@ pub struct RepositoryManager { impl RepositoryManager { /// Builds a RepositoryManager. This will use a KeyValueStore using the /// the storage uri specified in the supplied `Config`. - pub fn build(config: Arc, tasks: Arc, signer: Arc) -> Result { - let access_proxy = Arc::new(RepositoryAccessProxy::create(&config)?); - let content_proxy = Arc::new(RepositoryContentProxy::create(&config)?); + pub async fn build(config: Arc, tasks: Arc, signer: Arc) -> Result { + let access_proxy = Arc::new(RepositoryAccessProxy::create(&config).await?); + let content_proxy = Arc::new(RepositoryContentProxy::create(&config).await?); Ok(RepositoryManager { access: access_proxy, @@ -67,30 +67,30 @@ impl RepositoryManager { /// # Repository Server Management /// impl RepositoryManager { - pub fn initialized(&self) -> KrillResult { - self.access.initialized() + pub async fn initialized(&self) -> KrillResult { + self.access.initialized().await } /// Create the publication server, will fail if it was already created. - pub fn init(&self, uris: PublicationServerUris) -> KrillResult<()> { + pub async fn init(&self, uris: PublicationServerUris) -> KrillResult<()> { info!("Initializing repository"); - self.access.init(uris.clone(), self.signer.clone())?; - self.content.init(self.config.repo_dir(), uris)?; - self.content.write_repository(self.config.rrdp_updates_config)?; + self.access.init(uris.clone(), self.signer.clone()).await?; + self.content.init(self.config.repo_dir(), uris).await?; + self.content.write_repository(self.config.rrdp_updates_config).await?; Ok(()) } /// Clear the publication server. Will fail if it still /// has publishers. Or if it does not exist. - pub fn repository_clear(&self) -> KrillResult<()> { - self.access.clear()?; - self.content.clear() + pub async fn repository_clear(&self) -> KrillResult<()> { + self.access.clear().await?; + self.content.clear().await } /// List all current publishers - pub fn publishers(&self) -> KrillResult> { - self.access.publishers() + pub async fn publishers(&self) -> KrillResult> { + self.access.publishers().await } } @@ -98,12 +98,13 @@ impl RepositoryManager { /// impl RepositoryManager { /// Handle an RFC8181 request and sign the response. - pub fn rfc8181(&self, publisher_handle: PublisherHandle, msg_bytes: Bytes) -> KrillResult { + pub async fn rfc8181(&self, publisher_handle: PublisherHandle, msg_bytes: Bytes) -> KrillResult { let cms_logger = CmsLogger::for_rfc8181_rcvd(self.config.rfc8181_log_dir.as_ref(), &publisher_handle); let cms = self .access .decode_and_validate(&publisher_handle, &msg_bytes) + .await .map_err(|e| { Error::Custom(format!( "Issue with publication request by publisher '{}': {}", @@ -115,7 +116,7 @@ impl RepositoryManager { let is_list_query = query == publication::Query::List; - let response_result = self.rfc8181_message(&publisher_handle, query); + let response_result = self.rfc8181_message(&publisher_handle, query).await; let should_log_cms = response_result.is_err() || !is_list_query; @@ -130,7 +131,7 @@ impl RepositoryManager { } }; - let response_bytes = self.access.respond(response, &self.signer)?.to_bytes(); + let response_bytes = self.access.respond(response, &self.signer).await?.to_bytes(); if should_log_cms { cms_logger.received(&msg_bytes)?; @@ -140,7 +141,7 @@ impl RepositoryManager { Ok(response_bytes) } - pub fn rfc8181_message( + pub async fn rfc8181_message( &self, publisher_handle: &PublisherHandle, query: publication::Query, @@ -148,61 +149,62 @@ impl RepositoryManager { match query { publication::Query::List => { debug!("Received RFC 8181 list query for {}", publisher_handle); - let list_reply = self.list(publisher_handle)?; + let list_reply = self.list(publisher_handle).await?; Ok(publication::Message::list_reply(list_reply)) } publication::Query::Delta(delta) => { debug!("Received RFC 8181 delta query for {}", publisher_handle); - self.publish(publisher_handle, delta)?; + self.publish(publisher_handle, delta).await?; Ok(publication::Message::success()) } } } /// Do an RRDP session reset. - pub fn rrdp_session_reset(&self) -> KrillResult<()> { - self.content.session_reset(self.config.rrdp_updates_config) + pub async fn rrdp_session_reset(&self) -> KrillResult<()> { + self.content.session_reset(self.config.rrdp_updates_config).await } /// Let a known publisher publish in a repository. - pub fn publish(&self, publisher_handle: &PublisherHandle, delta: PublishDelta) -> KrillResult<()> { - let publisher = self.access.get_publisher(publisher_handle)?; + pub async fn publish(&self, publisher_handle: &PublisherHandle, delta: PublishDelta) -> KrillResult<()> { + let publisher = self.access.get_publisher(publisher_handle).await?; self.content - .publish(publisher_handle.clone(), delta, publisher.base_uri())?; + .publish(publisher_handle.clone(), delta, publisher.base_uri()) + .await?; - self.tasks.schedule(Task::RrdpUpdateIfNeeded, now()) + self.tasks.schedule(Task::RrdpUpdateIfNeeded, now()).await } /// Update RRDP (make new delta) if needed. If there are staged changes, but /// the rrdp update interval since last_update has not passed, then no update /// is done, but the eligible time for the next update is returned. - pub fn update_rrdp_if_needed(&self) -> KrillResult> { + pub async fn update_rrdp_if_needed(&self) -> KrillResult> { // See if an update is needed { - match self.content.rrdp_update_needed(self.config.rrdp_updates_config)? { + match self.content.rrdp_update_needed(self.config.rrdp_updates_config).await? { RrdpUpdateNeeded::No => return Ok(None), RrdpUpdateNeeded::Later(time) => return Ok(Some(time)), RrdpUpdateNeeded::Yes => {} // proceed } } - let content = self.content.update_rrdp(self.config.rrdp_updates_config)?; + let content = self.content.update_rrdp(self.config.rrdp_updates_config).await?; content.write_repository(self.config.rrdp_updates_config)?; Ok(None) } /// Purge URI(s) from the server. - pub fn delete_matching_files(&self, criteria: RepoFileDeleteCriteria) -> KrillResult<()> { + pub async fn delete_matching_files(&self, criteria: RepoFileDeleteCriteria) -> KrillResult<()> { // update RRDP first so we apply any staged deltas. - self.content.update_rrdp(self.config.rrdp_updates_config)?; + self.content.update_rrdp(self.config.rrdp_updates_config).await?; // delete matching files using the updated snapshot and stage a delta if needed. - self.content.delete_matching_files(criteria.into())?; + self.content.delete_matching_files(criteria.into()).await?; // update RRDP again to make the delta effective immediately. - let content = self.content.update_rrdp(self.config.rrdp_updates_config)?; + let content = self.content.update_rrdp(self.config.rrdp_updates_config).await?; // Write the updated repository - NOTE: we no longer lock it. content.write_repository(self.config.rrdp_updates_config)?; @@ -210,13 +212,13 @@ impl RepositoryManager { Ok(()) } - pub fn repo_stats(&self) -> KrillResult { - self.content.stats() + pub async fn repo_stats(&self) -> KrillResult { + self.content.stats().await } /// Returns a list reply for a known publisher in a repository. - pub fn list(&self, publisher: &PublisherHandle) -> KrillResult { - self.content.list_reply(publisher) + pub async fn list(&self, publisher: &PublisherHandle) -> KrillResult { + self.content.list_reply(publisher).await } } @@ -224,40 +226,43 @@ impl RepositoryManager { /// impl RepositoryManager { /// Returns the repository URI information for a publisher. - pub fn repo_info_for(&self, name: &PublisherHandle) -> KrillResult { - self.access.repo_info_for(name) + pub async fn repo_info_for(&self, name: &PublisherHandle) -> KrillResult { + self.access.repo_info_for(name).await } - pub fn get_publisher_details(&self, name: &PublisherHandle) -> KrillResult { - let publisher = self.access.get_publisher(name)?; + pub async fn get_publisher_details(&self, name: &PublisherHandle) -> KrillResult { + let publisher = self.access.get_publisher(name).await?; let id_cert = publisher.id_cert().clone(); let base_uri = publisher.base_uri().clone(); - let current = self.content.current_objects(name)?.try_into_publish_elements()?; + let current = self.content.current_objects(name).await?.try_into_publish_elements()?; Ok(PublisherDetails::new(name, id_cert, base_uri, current)) } /// Returns the RFC8183 Repository Response for the publisher. - pub fn repository_response(&self, publisher: &PublisherHandle) -> KrillResult { + pub async fn repository_response( + &self, + publisher: &PublisherHandle, + ) -> KrillResult { let rfc8181_uri = self.config.rfc8181_uri(publisher); - self.access.repository_response(rfc8181_uri, publisher) + self.access.repository_response(rfc8181_uri, publisher).await } /// Adds a publisher. This will fail if a publisher already exists for the handle in the request. - pub fn create_publisher(&self, req: idexchange::PublisherRequest, actor: &Actor) -> KrillResult<()> { + pub async fn create_publisher(&self, req: idexchange::PublisherRequest, actor: &Actor) -> KrillResult<()> { let name = req.publisher_handle().clone(); - self.access.add_publisher(req, actor)?; - self.content.add_publisher(name) + self.access.add_publisher(req, actor).await?; + self.content.add_publisher(name).await } /// Removes a publisher and all of its content. - pub fn remove_publisher(&self, name: PublisherHandle, actor: &Actor) -> KrillResult<()> { - self.content.remove_publisher(name.clone())?; - self.access.remove_publisher(name, actor)?; + pub async fn remove_publisher(&self, name: PublisherHandle, actor: &Actor) -> KrillResult<()> { + self.content.remove_publisher(name.clone()).await?; + self.access.remove_publisher(name, actor).await?; - self.tasks.schedule(Task::RrdpUpdateIfNeeded, now()) + self.tasks.schedule(Task::RrdpUpdateIfNeeded, now()).await } } @@ -265,8 +270,8 @@ impl RepositoryManager { /// impl RepositoryManager { /// Update the RRDP files and rsync content on disk. - pub fn write_repository(&self) -> KrillResult<()> { - self.content.write_repository(self.config.rrdp_updates_config) + pub async fn write_repository(&self) -> KrillResult<()> { + self.content.write_repository(self.config.rrdp_updates_config).await } } @@ -310,7 +315,7 @@ mod tests { test::{self, https, init_config, rsync}, }; - fn publisher_alice(storage_uri: &Url) -> Publisher { + async fn publisher_alice(storage_uri: &Url) -> Publisher { // When the "hsm" feature is enabled we could be running the tests with PKCS#11 as the default signer type. // In that case, if the backend signer is SoftHSMv2, attempting to create a second instance of KrillSigner in // the same process will fail because it will attempt to login to SoftHSMv2 a second time which SoftHSMv2 does @@ -325,7 +330,7 @@ mod tests { .unwrap() }; - let id_cert = signer.create_self_signed_id_cert().unwrap(); + let id_cert = signer.create_self_signed_id_cert().await.unwrap(); let base_uri = uri::Rsync::from_str("rsync://localhost/repo/alice/").unwrap(); Publisher::new(id_cert.into(), base_uri) @@ -336,7 +341,7 @@ mod tests { idexchange::PublisherRequest::new(id_cert.base64().clone(), handle, None) } - fn make_server(storage_uri: &Url, data_dir: &Path) -> RepositoryManager { + async fn make_server(storage_uri: &Url, data_dir: &Path) -> RepositoryManager { enable_test_mode(); let mut config = Config::test(storage_uri, Some(data_dir), true, false, false, false); init_config(&mut config); @@ -350,34 +355,34 @@ mod tests { let signer = Arc::new(signer); let config = Arc::new(config); let mq = Arc::new(TaskQueue::new(&config.storage_uri).unwrap()); - let repository_manager = RepositoryManager::build(config, mq, signer).unwrap(); + let repository_manager = RepositoryManager::build(config, mq, signer).await.unwrap(); let rsync_base = rsync("rsync://localhost/repo/"); let rrdp_base = https("https://localhost/repo/rrdp/"); let uris = PublicationServerUris::new(rrdp_base, rsync_base); - repository_manager.init(uris).unwrap(); + repository_manager.init(uris).await.unwrap(); repository_manager } - #[test] - fn should_add_publisher() { + #[tokio::test] + async fn should_add_publisher() { // we need a disk, as repo_dir, etc. use data_dir by default let (data_dir, cleanup) = test::tmp_dir(); let storage_uri = test::mem_storage(); - let server = make_server(&storage_uri, &data_dir); + let server = make_server(&storage_uri, &data_dir).await; - let alice = publisher_alice(&storage_uri); + let alice = publisher_alice(&storage_uri).await; let alice_handle = Handle::from_str("alice").unwrap(); let publisher_req = make_publisher_req(alice_handle.as_str(), alice.id_cert()); let actor = Actor::actor_from_def(ACTOR_DEF_TEST); - server.create_publisher(publisher_req, &actor).unwrap(); + server.create_publisher(publisher_req, &actor).await.unwrap(); - let alice_found = server.get_publisher_details(&alice_handle).unwrap(); + let alice_found = server.get_publisher_details(&alice_handle).await.unwrap(); assert_eq!(alice_found.base_uri(), alice.base_uri()); assert_eq!(alice_found.id_cert(), alice.id_cert()); @@ -386,23 +391,23 @@ mod tests { cleanup(); } - #[test] - fn should_not_add_publisher_twice() { + #[tokio::test] + async fn should_not_add_publisher_twice() { // we need a disk, as repo_dir, etc. use data_dir by default let (data_dir, cleanup) = test::tmp_dir(); let storage_uri = test::mem_storage(); - let server = make_server(&storage_uri, &data_dir); + let server = make_server(&storage_uri, &data_dir).await; - let alice = publisher_alice(&storage_uri); + let alice = publisher_alice(&storage_uri).await; let alice_handle = Handle::from_str("alice").unwrap(); let publisher_req = make_publisher_req(alice_handle.as_str(), alice.id_cert()); let actor = Actor::actor_from_def(ACTOR_DEF_TEST); - server.create_publisher(publisher_req.clone(), &actor).unwrap(); + server.create_publisher(publisher_req.clone(), &actor).await.unwrap(); - match server.create_publisher(publisher_req, &actor) { + match server.create_publisher(publisher_req, &actor).await { Err(Error::PublisherDuplicate(name)) => assert_eq!(name, alice_handle), _ => panic!("Expected error"), } @@ -410,22 +415,22 @@ mod tests { cleanup(); } - #[test] - fn should_list_files() { + #[tokio::test] + async fn should_list_files() { // we need a disk, as repo_dir, etc. use data_dir by default let (data_dir, cleanup) = test::tmp_dir(); let storage_uri = test::mem_storage(); - let server = make_server(&storage_uri, &data_dir); + let server = make_server(&storage_uri, &data_dir).await; - let alice = publisher_alice(&storage_uri); + let alice = publisher_alice(&storage_uri).await; let alice_handle = Handle::from_str("alice").unwrap(); let publisher_req = make_publisher_req(alice_handle.as_str(), alice.id_cert()); let actor = Actor::actor_from_def(ACTOR_DEF_TEST); - server.create_publisher(publisher_req, &actor).unwrap(); + server.create_publisher(publisher_req, &actor).await.unwrap(); - let list_reply = server.list(&alice_handle).unwrap(); + let list_reply = server.list(&alice_handle).await.unwrap(); assert_eq!(0, list_reply.elements().len()); cleanup(); @@ -436,7 +441,7 @@ mod tests { // we need a disk, as repo_dir, etc. use data_dir by default let (data_dir, cleanup) = test::tmp_dir(); let storage_uri = test::mem_storage(); - let server = make_server(&storage_uri, &data_dir); + let server = make_server(&storage_uri, &data_dir).await; let session = session_dir(&data_dir); @@ -446,13 +451,13 @@ mod tests { assert!(session_dir_contains_serial(&session, RRDP_FIRST_SERIAL)); // set up server with default repository, and publisher alice - let alice = publisher_alice(&storage_uri); + let alice = publisher_alice(&storage_uri).await; let alice_handle = Handle::from_str("alice").unwrap(); let publisher_req = make_publisher_req(alice_handle.as_str(), alice.id_cert()); let actor = Actor::actor_from_def(ACTOR_DEF_TEST); - server.create_publisher(publisher_req, &actor).unwrap(); + server.create_publisher(publisher_req, &actor).await.unwrap(); // get the file out of a list_reply fn find_in_reply<'a>(reply: &'a ListReply, uri: &uri::Rsync) -> Option<&'a ListElement> { @@ -474,12 +479,12 @@ mod tests { delta.add_publish(file1.as_publish()); delta.add_publish(file2.as_publish()); - server.publish(&alice_handle, delta).unwrap(); - server.update_rrdp_if_needed().unwrap(); - server.write_repository().unwrap(); + server.publish(&alice_handle, delta).await.unwrap(); + server.update_rrdp_if_needed().await.unwrap(); + server.write_repository().await.unwrap(); // Two files should now appear in the list - let list_reply = server.list(&alice_handle).unwrap(); + let list_reply = server.list(&alice_handle).await.unwrap(); assert_eq!(2, list_reply.elements().len()); assert!(find_in_reply(&list_reply, &test::rsync("rsync://localhost/repo/alice/file.txt")).is_some()); assert!(find_in_reply(&list_reply, &test::rsync("rsync://localhost/repo/alice/file2.txt")).is_some()); @@ -507,12 +512,12 @@ mod tests { delta.add_withdraw(file2.as_withdraw()); delta.add_publish(file3.as_publish()); - server.publish(&alice_handle, delta).unwrap(); - server.update_rrdp_if_needed().unwrap(); - server.write_repository().unwrap(); + server.publish(&alice_handle, delta).await.unwrap(); + server.update_rrdp_if_needed().await.unwrap(); + server.write_repository().await.unwrap(); // Two files should now appear in the list - let list_reply = server.list(&alice_handle).unwrap(); + let list_reply = server.list(&alice_handle).await.unwrap(); assert_eq!(2, list_reply.elements().len()); assert!(find_in_reply(&list_reply, &test::rsync("rsync://localhost/repo/alice/file.txt")).is_some()); @@ -532,7 +537,7 @@ mod tests { let mut delta = PublishDelta::empty(); delta.add_publish(file_outside.as_publish()); - match server.publish(&alice_handle, delta) { + match server.publish(&alice_handle, delta).await { Err(Error::Rfc8181Delta(PublicationDeltaError::UriOutsideJail(_, _))) => {} // ok _ => panic!("Expected error publishing outside of base uri jail"), } @@ -545,7 +550,7 @@ mod tests { let mut delta = PublishDelta::empty(); delta.add_update(file2_update.as_update(file2.hash())); - match server.publish(&alice_handle, delta) { + match server.publish(&alice_handle, delta).await { Err(Error::Rfc8181Delta(PublicationDeltaError::NoObjectForHashAndOrUri(_))) => {} _ => panic!("Expected error when file for update can't be found"), } @@ -554,7 +559,7 @@ mod tests { let mut delta = PublishDelta::empty(); delta.add_withdraw(file2.as_withdraw()); - match server.publish(&alice_handle, delta) { + match server.publish(&alice_handle, delta).await { Err(Error::Rfc8181Delta(PublicationDeltaError::NoObjectForHashAndOrUri(_))) => {} // ok _ => panic!("Expected error withdrawing file that does not exist"), } @@ -563,7 +568,7 @@ mod tests { let mut delta = PublishDelta::empty(); delta.add_publish(file3.as_publish()); - match server.publish(&alice_handle, delta) { + match server.publish(&alice_handle, delta).await { Err(Error::Rfc8181Delta(PublicationDeltaError::ObjectAlreadyPresent(uri))) => { assert_eq!(uri, test::rsync("rsync://localhost/repo/alice/file3.txt")) } @@ -585,9 +590,9 @@ mod tests { let mut delta = PublishDelta::empty(); delta.add_publish(file4.as_publish()); - server.publish(&alice_handle, delta).unwrap(); - server.update_rrdp_if_needed().unwrap(); - server.write_repository().unwrap(); + server.publish(&alice_handle, delta).await.unwrap(); + server.update_rrdp_if_needed().await.unwrap(); + server.write_repository().await.unwrap(); // Should include new snapshot and delta assert!(session_dir_contains_serial(&session, RRDP_FIRST_SERIAL + 3)); @@ -598,9 +603,9 @@ mod tests { assert!(session_dir_contains_delta(&session, RRDP_FIRST_SERIAL + 2)); // Removing the publisher should remove its contents - server.remove_publisher(alice_handle, &actor).unwrap(); - server.update_rrdp_if_needed().unwrap(); - server.write_repository().unwrap(); + server.remove_publisher(alice_handle, &actor).await.unwrap(); + server.update_rrdp_if_needed().await.unwrap(); + server.write_repository().await.unwrap(); // new snapshot should be published, and should be empty now assert!(session_dir_contains_snapshot(&session, RRDP_FIRST_SERIAL + 4)); @@ -621,20 +626,20 @@ mod tests { cleanup(); } - #[test] - pub fn repository_session_reset() { + #[tokio::test] + pub async fn repository_session_reset() { let (data_dir, cleanup) = test::tmp_dir(); let storage_uri = test::mem_storage(); - let server = make_server(&storage_uri, &data_dir); + let server = make_server(&storage_uri, &data_dir).await; // set up server with default repository, and publisher alice - let alice = publisher_alice(&storage_uri); + let alice = publisher_alice(&storage_uri).await; let alice_handle = Handle::from_str("alice").unwrap(); let publisher_req = make_publisher_req(alice_handle.as_str(), alice.id_cert()); let actor = Actor::actor_from_def(ACTOR_DEF_TEST); - server.create_publisher(publisher_req, &actor).unwrap(); + server.create_publisher(publisher_req, &actor).await.unwrap(); // get the file out of a list_reply fn find_in_reply<'a>(reply: &'a ListReply, uri: &uri::Rsync) -> Option<&'a ListElement> { @@ -656,18 +661,18 @@ mod tests { delta.add_publish(file1.as_publish()); delta.add_publish(file2.as_publish()); - server.publish(&alice_handle, delta).unwrap(); - server.update_rrdp_if_needed().unwrap(); - server.write_repository().unwrap(); + server.publish(&alice_handle, delta).await.unwrap(); + server.update_rrdp_if_needed().await.unwrap(); + server.write_repository().await.unwrap(); // Two files should now appear in the list - let list_reply = server.list(&alice_handle).unwrap(); + let list_reply = server.list(&alice_handle).await.unwrap(); assert_eq!(2, list_reply.elements().len()); assert!(find_in_reply(&list_reply, &test::rsync("rsync://localhost/repo/alice/file.txt")).is_some()); assert!(find_in_reply(&list_reply, &test::rsync("rsync://localhost/repo/alice/file2.txt")).is_some()); // Find RRDP files on disk - let stats_before = server.repo_stats().unwrap(); + let stats_before = server.repo_stats().await.unwrap(); let session_before = stats_before.session(); let snapshot_before_session_reset = find_in_session_and_serial_dir(&data_dir, &session_before, RRDP_FIRST_SERIAL + 1, "snapshot.xml"); @@ -675,10 +680,10 @@ mod tests { assert!(snapshot_before_session_reset.is_some()); // Now test that a session reset works... - server.rrdp_session_reset().unwrap(); + server.rrdp_session_reset().await.unwrap(); // Should write new session and snapshot - let stats_after = server.repo_stats().unwrap(); + let stats_after = server.repo_stats().await.unwrap(); let session_after = stats_after.session(); let snapshot_after_session_reset = diff --git a/src/pubd/repository.rs b/src/pubd/repository.rs index 6198ccb92..3c9bdf00a 100644 --- a/src/pubd/repository.rs +++ b/src/pubd/repository.rs @@ -68,9 +68,9 @@ pub struct RepositoryContentProxy { } impl RepositoryContentProxy { - pub fn create(config: &Config) -> KrillResult { + pub async fn create(config: &Config) -> KrillResult { let store = Arc::new(WalStore::create(&config.storage_uri, PUBSERVER_CONTENT_NS)?); - store.warm()?; + store.warm().await?; let default_handle = MyHandle::new("0".into()); @@ -78,8 +78,8 @@ impl RepositoryContentProxy { } /// Initialize - pub fn init(&self, repo_dir: &Path, uris: PublicationServerUris) -> KrillResult<()> { - if self.store.has(&self.default_handle)? { + pub async fn init(&self, repo_dir: &Path, uris: PublicationServerUris) -> KrillResult<()> { + if self.store.has(&self.default_handle).await? { Err(Error::RepositoryServerAlreadyInitialized) } else { // initialize new repo content @@ -95,29 +95,29 @@ impl RepositoryContentProxy { }; // Store newly initialized repo content on disk - self.store.add(&self.default_handle, repository_content)?; + self.store.add(&self.default_handle, repository_content).await?; Ok(()) } } - fn get_default_content(&self) -> KrillResult> { - self.store.get_latest(&self.default_handle) + async fn get_default_content(&self) -> KrillResult> { + self.store.get_latest(&self.default_handle).await } // Clear all content, so it can be re-initialized. // Only to be called after all publishers have been removed from the RepoAccess as well. - pub fn clear(&self) -> KrillResult<()> { - let content = self.get_default_content()?; + pub async fn clear(&self) -> KrillResult<()> { + let content = self.get_default_content().await?; content.clear(); - self.store.remove(&self.default_handle)?; + self.store.remove(&self.default_handle).await?; Ok(()) } /// Return the repository content stats - pub fn stats(&self) -> KrillResult { - self.get_default_content().map(|content| content.stats()) + pub async fn stats(&self) -> KrillResult { + self.get_default_content().await.map(|content| content.stats()) } /// Add a publisher with an empty set of published objects. @@ -127,16 +127,16 @@ impl RepositoryContentProxy { /// to the RepositoryAccess was successful (and *that* will fail if /// the publisher is a duplicate). This method can only fail if /// there is an issue with the underlying key value store. - pub fn add_publisher(&self, publisher: PublisherHandle) -> KrillResult<()> { + pub async fn add_publisher(&self, publisher: PublisherHandle) -> KrillResult<()> { let command = RepositoryContentCommand::add_publisher(self.default_handle.clone(), publisher); - self.store.send_command(command)?; + self.store.send_command(command).await?; Ok(()) } /// Removes a publisher and its content. - pub fn remove_publisher(&self, publisher: PublisherHandle) -> KrillResult<()> { + pub async fn remove_publisher(&self, publisher: PublisherHandle) -> KrillResult<()> { let command = RepositoryContentCommand::remove_publisher(self.default_handle.clone(), publisher); - self.store.send_command(command)?; + self.store.send_command(command).await?; Ok(()) } @@ -145,45 +145,46 @@ impl RepositoryContentProxy { /// /// Assumes that the RFC 8181 CMS has been verified, but will check that all objects /// are within the publisher's uri space (jail). - pub fn publish(&self, publisher: PublisherHandle, delta: PublishDelta, jail: &uri::Rsync) -> KrillResult<()> { + pub async fn publish(&self, publisher: PublisherHandle, delta: PublishDelta, jail: &uri::Rsync) -> KrillResult<()> { debug!("Publish delta for {}", publisher); let delta = DeltaElements::from(delta); let command = RepositoryContentCommand::publish(self.default_handle.clone(), publisher, jail.clone(), delta); - self.store.send_command(command)?; + self.store.send_command(command).await?; Ok(()) } /// Checks whether an RRDP update is needed - pub fn rrdp_update_needed(&self, rrdp_updates_config: RrdpUpdatesConfig) -> KrillResult { + pub async fn rrdp_update_needed(&self, rrdp_updates_config: RrdpUpdatesConfig) -> KrillResult { self.get_default_content() + .await .map(|content| content.rrdp.update_rrdp_needed(rrdp_updates_config)) } /// Delete matching files from the repository and publishers - pub fn delete_matching_files(&self, uri: uri::Rsync) -> KrillResult> { + pub async fn delete_matching_files(&self, uri: uri::Rsync) -> KrillResult> { let command = RepositoryContentCommand::delete_matching_files(self.default_handle.clone(), uri); - self.store.send_command(command) + self.store.send_command(command).await } /// Update RRDP and return the RepositoryContent so it can be used for writing. - pub fn update_rrdp(&self, rrdp_updates_config: RrdpUpdatesConfig) -> KrillResult> { + pub async fn update_rrdp(&self, rrdp_updates_config: RrdpUpdatesConfig) -> KrillResult> { let command = RepositoryContentCommand::create_rrdp_delta(self.default_handle.clone(), rrdp_updates_config); - self.store.send_command(command) + self.store.send_command(command).await } /// Write all current files to disk - pub fn write_repository(&self, rrdp_updates_config: RrdpUpdatesConfig) -> KrillResult<()> { - let content = self.get_default_content()?; + pub async fn write_repository(&self, rrdp_updates_config: RrdpUpdatesConfig) -> KrillResult<()> { + let content = self.get_default_content().await?; content.write_repository(rrdp_updates_config) } /// Reset the RRDP session if it is initialized. Otherwise do nothing. - pub fn session_reset(&self, rrdp_updates_config: RrdpUpdatesConfig) -> KrillResult<()> { - if self.store.has(&self.default_handle)? { + pub async fn session_reset(&self, rrdp_updates_config: RrdpUpdatesConfig) -> KrillResult<()> { + if self.store.has(&self.default_handle).await? { let command = RepositoryContentCommand::session_reset(self.default_handle.clone()); - let content = self.store.send_command(command)?; + let content = self.store.send_command(command).await?; content.write_repository(rrdp_updates_config) } else { @@ -193,13 +194,14 @@ impl RepositoryContentProxy { } /// Create a list reply containing all current objects for a publisher - pub fn list_reply(&self, publisher: &PublisherHandle) -> KrillResult { - self.get_default_content()?.list_reply(publisher) + pub async fn list_reply(&self, publisher: &PublisherHandle) -> KrillResult { + self.get_default_content().await?.list_reply(publisher) } // Get all current objects for a publisher - pub fn current_objects(&self, name: &PublisherHandle) -> KrillResult { + pub async fn current_objects(&self, name: &PublisherHandle) -> KrillResult { self.get_default_content() + .await .map(|content| content.objects_for_publisher(name).into_owned()) } } @@ -1506,13 +1508,13 @@ pub struct RepositoryAccessProxy { } impl RepositoryAccessProxy { - pub fn create(config: &Config) -> KrillResult { + pub async fn create(config: &Config) -> KrillResult { let store = AggregateStore::::create(&config.storage_uri, PUBSERVER_NS, config.use_history_cache)?; let key = MyHandle::from_str(PUBSERVER_DFLT).unwrap(); - if store.has(&key)? { - if let Err(e) = store.warm() { + if store.has(&key).await? { + if let Err(e) = store.warm().await { // Start to 'warm' the cache. This serves two purposes: // 1. this ensures that the `RepositoryAccess` struct is available in memory // 2. this ensures that there are no apparent data issues @@ -1534,12 +1536,12 @@ impl RepositoryAccessProxy { Ok(RepositoryAccessProxy { store, key }) } - pub fn initialized(&self) -> KrillResult { - self.store.has(&self.key).map_err(Error::AggregateStoreError) + pub async fn initialized(&self) -> KrillResult { + self.store.has(&self.key).await.map_err(Error::AggregateStoreError) } - pub fn init(&self, uris: PublicationServerUris, signer: Arc) -> KrillResult<()> { - if self.initialized()? { + pub async fn init(&self, uris: PublicationServerUris, signer: Arc) -> KrillResult<()> { + if self.initialized().await? { Err(Error::RepositoryServerAlreadyInitialized) } else { let actor = Actor::system_actor(); @@ -1552,95 +1554,96 @@ impl RepositoryAccessProxy { &actor, ); - self.store.add(cmd)?; + self.store.add(cmd).await?; Ok(()) } } - pub fn clear(&self) -> KrillResult<()> { - if !self.initialized()? { + pub async fn clear(&self) -> KrillResult<()> { + if !self.initialized().await? { Err(Error::RepositoryServerNotInitialized) - } else if !self.publishers()?.is_empty() { + } else if !self.publishers().await?.is_empty() { Err(Error::RepositoryServerHasPublishers) } else { - self.store.drop_aggregate(&self.key)?; + self.store.drop_aggregate(&self.key).await?; Ok(()) } } - fn read(&self) -> KrillResult> { - if !self.initialized()? { + async fn read(&self) -> KrillResult> { + if !self.initialized().await? { Err(Error::RepositoryServerNotInitialized) } else { self.store .get_latest(&self.key) + .await .map_err(|e| Error::custom(format!("Publication Server data issue: {}", e))) } } - pub fn publishers(&self) -> KrillResult> { - Ok(self.read()?.publishers()) + pub async fn publishers(&self) -> KrillResult> { + Ok(self.read().await?.publishers()) } - pub fn get_publisher(&self, name: &PublisherHandle) -> KrillResult { - self.read()?.get_publisher(name).map(|p| p.clone()) + pub async fn get_publisher(&self, name: &PublisherHandle) -> KrillResult { + self.read().await?.get_publisher(name).map(|p| p.clone()) } - pub fn add_publisher(&self, req: idexchange::PublisherRequest, actor: &Actor) -> KrillResult<()> { + pub async fn add_publisher(&self, req: idexchange::PublisherRequest, actor: &Actor) -> KrillResult<()> { let name = req.publisher_handle().clone(); let id_cert = req.validate().map_err(Error::rfc8183)?; - let base_uri = self.read()?.base_uri_for(&name)?; + let base_uri = self.read().await?.base_uri_for(&name)?; let cmd = RepositoryAccessCommandDetails::add_publisher(&self.key, id_cert.into(), name, base_uri, actor); - self.store.command(cmd)?; + self.store.command(cmd).await?; Ok(()) } - pub fn remove_publisher(&self, name: PublisherHandle, actor: &Actor) -> KrillResult<()> { - if !self.initialized()? { + pub async fn remove_publisher(&self, name: PublisherHandle, actor: &Actor) -> KrillResult<()> { + if !self.initialized().await? { Err(Error::RepositoryServerNotInitialized) } else { let cmd = RepositoryAccessCommandDetails::remove_publisher(&self.key, name, actor); - self.store.command(cmd)?; + self.store.command(cmd).await?; Ok(()) } } /// Returns the repository URI information for a publisher. - pub fn repo_info_for(&self, name: &PublisherHandle) -> KrillResult { - self.read()?.repo_info_for(name) + pub async fn repo_info_for(&self, name: &PublisherHandle) -> KrillResult { + self.read().await?.repo_info_for(name) } /// Returns the RFC8183 Repository Response for the publisher - pub fn repository_response( + pub async fn repository_response( &self, rfc8181_uri: uri::Https, publisher: &PublisherHandle, ) -> KrillResult { - self.read()?.repository_response(rfc8181_uri, publisher) + self.read().await?.repository_response(rfc8181_uri, publisher) } /// Parse submitted bytes by a Publisher as an RFC8181 ProtocolCms object, and validates it. - pub fn decode_and_validate( + pub async fn decode_and_validate( &self, publisher: &PublisherHandle, bytes: &[u8], ) -> KrillResult { - let publisher = self.get_publisher(publisher)?; + let publisher = self.get_publisher(publisher).await?; let msg = PublicationCms::decode(bytes).map_err(Error::Rfc8181)?; msg.validate(publisher.id_cert().public_key()).map_err(Error::Rfc8181)?; Ok(msg) } // /// Creates and signs an RFC8181 CMS response. - pub fn respond( + pub async fn respond( &self, message: publication::Message, signer: &KrillSigner, ) -> KrillResult { - let key_id = self.read()?.key_id(); - signer.create_rfc8181_cms(message, &key_id).map_err(Error::signer) + let key_id = self.read().await?.key_id(); + signer.create_rfc8181_cms(message, &key_id).await.map_err(Error::signer) } } @@ -1669,6 +1672,7 @@ impl RepositoryAccess { /// # Event Sourcing support /// +#[async_trait::async_trait] impl Aggregate for RepositoryAccess { type Command = RepositoryAccessCommand; type StorableCommandDetails = StorableRepositoryCommand; @@ -1691,11 +1695,11 @@ impl Aggregate for RepositoryAccess { } } - fn process_init_command(command: Self::InitCommand) -> Result { + async fn process_init_command(command: Self::InitCommand) -> Result { let details = command.into_details(); let (rrdp_base_uri, rsync_jail, signer) = details.unpack(); - let id_cert_info = Rfc8183Id::generate(&signer)?.into(); + let id_cert_info = Rfc8183Id::generate(&signer).await?.into(); Ok(RepositoryAccessInitEvent::new(id_cert_info, rrdp_base_uri, rsync_jail)) } @@ -1719,7 +1723,7 @@ impl Aggregate for RepositoryAccess { } } - fn process_command(&self, command: Self::Command) -> Result, Self::Error> { + async fn process_command(&self, command: Self::Command) -> Result, Self::Error> { info!( "Sending command to publisher '{}', version: {}: {}", self.handle, self.version, command diff --git a/src/ta/common.rs b/src/ta/common.rs index 85a5aed46..94b97de63 100644 --- a/src/ta/common.rs +++ b/src/ta/common.rs @@ -70,7 +70,11 @@ pub struct TrustAnchorObjects { impl TrustAnchorObjects { /// Creates a new TrustAnchorObjects for the signing certificate. - pub fn create(signing_cert: &ReceivedCert, next_update_weeks: i64, signer: &KrillSigner) -> KrillResult { + pub async fn create( + signing_cert: &ReceivedCert, + next_update_weeks: i64, + signer: &KrillSigner, + ) -> KrillResult { let revision = ObjectSetRevision::new(1, Self::this_update(), Self::next_update(next_update_weeks)); let key_identifier = signing_cert.key_identifier(); let base_uri = signing_cert.ca_repository().clone(); @@ -79,11 +83,12 @@ impl TrustAnchorObjects { let signing_key = signing_cert.key_identifier(); let issuer = signing_cert.subject().clone(); - let crl = CrlBuilder::build(signing_key, issuer, &revocations, revision, signer)?; + let crl = CrlBuilder::build(signing_key, issuer, &revocations, revision, signer).await?; let manifest = ManifestBuilder::new(revision) .with_objects(&crl, &HashMap::new()) .build_new_mft(signing_cert, signer) + .await .map(|m| m.into())?; Ok(TrustAnchorObjects { @@ -100,7 +105,7 @@ impl TrustAnchorObjects { /// Publish next revision of the published objects. /// - Update CRL (times and revocations) /// - Update Manifest (times and listed objects) - pub fn republish( + pub async fn republish( &mut self, signing_cert: &ReceivedCert, next_update_weeks: i64, @@ -117,11 +122,12 @@ impl TrustAnchorObjects { } else { let issuer = signing_cert.subject().clone(); - self.crl = CrlBuilder::build(signing_key, issuer, &self.revocations, self.revision, signer)?; + self.crl = CrlBuilder::build(signing_key, issuer, &self.revocations, self.revision, signer).await?; self.manifest = ManifestBuilder::new(self.revision) .with_objects(&self.crl, &self.issued_certs_objects()) .build_new_mft(signing_cert, signer) + .await .map(|m| m.into())?; Ok(()) @@ -479,7 +485,7 @@ pub struct TrustAnchorSignerRequest { } impl TrustAnchorSignerRequest { - pub fn sign( + pub async fn sign( &self, signing_key: KeyIdentifier, validity_days: i64, @@ -490,6 +496,7 @@ impl TrustAnchorSignerRequest { signer .create_ta_signed_message(data, validity_days, &signing_key) + .await .map(|msg| TrustAnchorSignedRequest { request: self.clone(), signed: msg.into(), @@ -594,7 +601,7 @@ pub struct TrustAnchorSignerResponse { } impl TrustAnchorSignerResponse { - pub fn sign( + pub async fn sign( &self, validity_days: i64, signing_key: KeyIdentifier, @@ -605,6 +612,7 @@ impl TrustAnchorSignerResponse { signer .create_ta_signed_message(data, validity_days, &signing_key) + .await .map(|msg| TrustAnchorSignedResponse { response: self.clone(), signed: msg.into(), diff --git a/src/ta/mod.rs b/src/ta/mod.rs index 9a1f60cbc..5183d8626 100644 --- a/src/ta/mod.rs +++ b/src/ta/mod.rs @@ -49,126 +49,129 @@ mod tests { test, }; - #[test] - fn init_ta() { - test::test_in_memory(|storage_uri| { - let cleanup = test::init_logging(); - - let ta_signer_store: AggregateStore = - AggregateStore::create(storage_uri, NamespaceBuf::parse_lossy("ta_signer").as_ref(), false).unwrap(); - let ta_proxy_store: AggregateStore = - AggregateStore::create(storage_uri, NamespaceBuf::parse_lossy("ta_proxy").as_ref(), false).unwrap(); - - // We will import a TA key - this is only (supposed to be) supported for the openssl signer - let signers = ConfigDefaults::openssl_signer_only(); - let signer = Arc::new( - KrillSignerBuilder::new(storage_uri, Duration::from_secs(1), &signers) - .build() - .unwrap(), - ); - - let timing = TaTimingConfig::default(); - - let actor = test::test_actor(); - - let proxy_handle = TrustAnchorHandle::new("proxy".into()); - let proxy_init = TrustAnchorProxyInitCommand::make(&proxy_handle, signer.clone(), &actor); + #[tokio::test] + async fn init_ta() { + let storage_uri = test::mem_storage(); + let cleanup = test::init_logging(); - ta_proxy_store.add(proxy_init).unwrap(); + let ta_signer_store: AggregateStore = + AggregateStore::create(&storage_uri, NamespaceBuf::parse_lossy("ta_signer").as_ref(), false).unwrap(); + let ta_proxy_store: AggregateStore = + AggregateStore::create(&storage_uri, NamespaceBuf::parse_lossy("ta_proxy").as_ref(), false).unwrap(); - let repository = { - let repo_info = RepoInfo::new( - test::rsync("rsync://example.krill.cloud/repo/"), - Some(test::https("https://exmple.krill.cloud/repo/notification.xml")), - ); - let repo_key_id = signer.create_key().unwrap(); - let repo_key = signer.get_key_info(&repo_key_id).unwrap(); + // We will import a TA key - this is only (supposed to be) supported for the openssl signer + let signers = ConfigDefaults::openssl_signer_only(); + let signer = Arc::new( + KrillSignerBuilder::new(&storage_uri, Duration::from_secs(1), &signers) + .build() + .unwrap(), + ); - let service_uri = ServiceUri::Https(test::https("https://example.krill.cloud/rfc8181/ta")); - let server_info = PublicationServerInfo::new(repo_key, service_uri); + let timing = TaTimingConfig::default(); - RepositoryContact::new(repo_info, server_info) - }; + let actor = test::test_actor(); - let add_repo_cmd = TrustAnchorProxyCommand::add_repo(&proxy_handle, repository, &actor); - let mut proxy = ta_proxy_store.command(add_repo_cmd).unwrap(); + let proxy_handle = TrustAnchorHandle::new("proxy".into()); + let proxy_init = TrustAnchorProxyInitCommand::make(&proxy_handle, signer.clone(), &actor); - let signer_handle = TrustAnchorHandle::new("signer".into()); - let tal_https = vec![test::https("https://example.krill.cloud/ta/ta.cer")]; - let tal_rsync = test::rsync("rsync://example.krill.cloud/ta/ta.cer"); + ta_proxy_store.add(proxy_init).await.unwrap(); - let import_key_pem = include_str!("../../test-resources/ta/example-pkcs1.pem"); - - let signer_init_cmd = TrustAnchorSignerInitCommand::new( - &signer_handle, - TrustAnchorSignerInitCommandDetails { - proxy_id: proxy.id().clone(), - repo_info: proxy.repository().unwrap().repo_info().clone(), - tal_https: tal_https.clone(), - tal_rsync: tal_rsync.clone(), - private_key_pem: Some(import_key_pem.to_string()), - timing, - signer: signer.clone(), - }, - &actor, + let repository = { + let repo_info = RepoInfo::new( + test::rsync("rsync://example.krill.cloud/repo/"), + Some(test::https("https://exmple.krill.cloud/repo/notification.xml")), ); + let repo_key_id = signer.create_key().await.unwrap(); + let repo_key = signer.get_key_info(&repo_key_id).await.unwrap(); - let mut ta_signer = ta_signer_store.add(signer_init_cmd).unwrap(); - let signer_info = ta_signer.get_signer_info(); - let add_signer_cmd = TrustAnchorProxyCommand::add_signer(&proxy_handle, signer_info, &actor); - - proxy = ta_proxy_store.command(add_signer_cmd).unwrap(); + let service_uri = ServiceUri::Https(test::https("https://example.krill.cloud/rfc8181/ta")); + let server_info = PublicationServerInfo::new(repo_key, service_uri); - // The initial signer starts off with a TA certificate - // and a CRL and manifest with revision number 1. - let ta_objects = proxy.get_trust_anchor_objects().unwrap(); - assert_eq!(ta_objects.revision().number(), 1); + RepositoryContact::new(repo_info, server_info) + }; - let ta_cert_details = proxy.get_ta_details().unwrap(); - assert_eq!(ta_cert_details.tal().uris(), &tal_https); - assert_eq!(ta_cert_details.tal().rsync_uri(), &tal_rsync); + let add_repo_cmd = TrustAnchorProxyCommand::add_repo(&proxy_handle, repository, &actor); + let mut proxy = ta_proxy_store.command(add_repo_cmd).await.unwrap(); - // We can make a new signer request to make a new manifest and CRL - // even if we do not yet have any issued certificates to publish. - let make_publish_request_cmd = TrustAnchorProxyCommand::make_signer_request(&proxy_handle, &actor); - proxy = ta_proxy_store.command(make_publish_request_cmd).unwrap(); + let signer_handle = TrustAnchorHandle::new("signer".into()); + let tal_https = vec![test::https("https://example.krill.cloud/ta/ta.cer")]; + let tal_rsync = test::rsync("rsync://example.krill.cloud/ta/ta.cer"); - let signed_request = proxy.get_signer_request(timing, &signer).unwrap(); - let request_nonce = signed_request.content().nonce.clone(); + let import_key_pem = include_str!("../../test-resources/ta/example-pkcs1.pem"); - let ta_signer_process_request_command = TrustAnchorSignerCommand::make_process_request_command( - &signer_handle, - signed_request, + let signer_init_cmd = TrustAnchorSignerInitCommand::new( + &signer_handle, + TrustAnchorSignerInitCommandDetails { + proxy_id: proxy.id().clone(), + repo_info: proxy.repository().unwrap().repo_info().clone(), + tal_https: tal_https.clone(), + tal_rsync: tal_rsync.clone(), + private_key_pem: Some(import_key_pem.to_string()), timing, - signer, - &actor, - ); - ta_signer = ta_signer_store.command(ta_signer_process_request_command).unwrap(); - - let exchange = ta_signer.get_exchange(&request_nonce).unwrap(); - let ta_proxy_process_signer_response_command = - TrustAnchorProxyCommand::process_signer_response(&proxy_handle, exchange.response.clone(), &actor); - - proxy = ta_proxy_store - .command(ta_proxy_process_signer_response_command) - .unwrap(); - - // The TA should have published again, the revision used for manifest and crl will - // have been updated. - let ta_objects = proxy.get_trust_anchor_objects().unwrap(); - assert_eq!(ta_objects.revision().number(), 2); - - // We still need to test some higher order functions: - // - add child - // - let the child request a certificate - // - let the child perform a key rollover - // - let the TA publish - // - // This is hard to test at this level. So, will test this as part of the higher - // order functional tests found under /tests. I.e. we will start a full krill - // server with testbed support, which will use the TrustAnchorProxy and Signer. - - cleanup(); - }) + signer: signer.clone(), + }, + &actor, + ); + + let mut ta_signer = ta_signer_store.add(signer_init_cmd).await.unwrap(); + let signer_info = ta_signer.get_signer_info(); + let add_signer_cmd = TrustAnchorProxyCommand::add_signer(&proxy_handle, signer_info, &actor); + + proxy = ta_proxy_store.command(add_signer_cmd).await.unwrap(); + + // The initial signer starts off with a TA certificate + // and a CRL and manifest with revision number 1. + let ta_objects = proxy.get_trust_anchor_objects().unwrap(); + assert_eq!(ta_objects.revision().number(), 1); + + let ta_cert_details = proxy.get_ta_details().unwrap(); + assert_eq!(ta_cert_details.tal().uris(), &tal_https); + assert_eq!(ta_cert_details.tal().rsync_uri(), &tal_rsync); + + // We can make a new signer request to make a new manifest and CRL + // even if we do not yet have any issued certificates to publish. + let make_publish_request_cmd = TrustAnchorProxyCommand::make_signer_request(&proxy_handle, &actor); + proxy = ta_proxy_store.command(make_publish_request_cmd).await.unwrap(); + + let signed_request = proxy.get_signer_request(timing, &signer).await.unwrap(); + let request_nonce = signed_request.content().nonce.clone(); + + let ta_signer_process_request_command = TrustAnchorSignerCommand::make_process_request_command( + &signer_handle, + signed_request, + timing, + signer, + &actor, + ); + ta_signer = ta_signer_store + .command(ta_signer_process_request_command) + .await + .unwrap(); + + let exchange = ta_signer.get_exchange(&request_nonce).unwrap(); + let ta_proxy_process_signer_response_command = + TrustAnchorProxyCommand::process_signer_response(&proxy_handle, exchange.response.clone(), &actor); + + proxy = ta_proxy_store + .command(ta_proxy_process_signer_response_command) + .await + .unwrap(); + + // The TA should have published again, the revision used for manifest and crl will + // have been updated. + let ta_objects = proxy.get_trust_anchor_objects().unwrap(); + assert_eq!(ta_objects.revision().number(), 2); + + // We still need to test some higher order functions: + // - add child + // - let the child request a certificate + // - let the child perform a key rollover + // - let the TA publish + // + // This is hard to test at this level. So, will test this as part of the higher + // order functional tests found under /tests. I.e. we will start a full krill + // server with testbed support, which will use the TrustAnchorProxy and Signer. + + cleanup(); } } diff --git a/src/ta/proxy.rs b/src/ta/proxy.rs index d177bddbb..77ec9c9ed 100644 --- a/src/ta/proxy.rs +++ b/src/ta/proxy.rs @@ -375,6 +375,7 @@ impl eventsourcing::CommandDetails for TrustAnchorProxyCommandDetails { } } +#[async_trait::async_trait] impl eventsourcing::Aggregate for TrustAnchorProxy { type Command = TrustAnchorProxyCommand; type StorableCommandDetails = TrustAnchorProxyCommandDetails; @@ -396,8 +397,10 @@ impl eventsourcing::Aggregate for TrustAnchorProxy { } } - fn process_init_command(command: TrustAnchorProxyInitCommand) -> Result { - Rfc8183Id::generate(&command.into_details().signer).map(|id| TrustAnchorProxyInitEvent { id: id.into() }) + async fn process_init_command(command: TrustAnchorProxyInitCommand) -> Result { + Rfc8183Id::generate(&command.into_details().signer) + .await + .map(|id| TrustAnchorProxyInitEvent { id: id.into() }) } fn version(&self) -> u64 { @@ -472,7 +475,7 @@ impl eventsourcing::Aggregate for TrustAnchorProxy { } } - fn process_command(&self, command: Self::Command) -> Result, Self::Error> { + async fn process_command(&self, command: Self::Command) -> Result, Self::Error> { if log_enabled!(log::Level::Trace) { trace!( "Sending command to Trust Anchor Proxy '{}', version: {}: {}", @@ -665,7 +668,7 @@ impl TrustAnchorProxy { self.open_signer_request.is_some() } - pub fn get_signer_request( + pub async fn get_signer_request( &self, timing: TaTimingConfig, signer: &KrillSigner, @@ -682,11 +685,13 @@ impl TrustAnchorProxy { } } - TrustAnchorSignerRequest { nonce, child_requests }.sign( - self.id.public_key().key_identifier(), - timing.signed_message_validity_days, - signer, - ) + TrustAnchorSignerRequest { nonce, child_requests } + .sign( + self.id.public_key().key_identifier(), + timing.signed_message_validity_days, + signer, + ) + .await } else { Err(Error::TaProxyHasNoRequest) } diff --git a/src/ta/signer.rs b/src/ta/signer.rs index fa9f58a42..8a293e589 100644 --- a/src/ta/signer.rs +++ b/src/ta/signer.rs @@ -221,6 +221,7 @@ impl fmt::Display for TrustAnchorSignerStorableCommand { } } +#[async_trait::async_trait] impl eventsourcing::Aggregate for TrustAnchorSigner { type Command = TrustAnchorSignerCommand; type StorableCommandDetails = TrustAnchorSignerStorableCommand; @@ -242,13 +243,13 @@ impl eventsourcing::Aggregate for TrustAnchorSigner { } } - fn process_init_command(command: TrustAnchorSignerInitCommand) -> Result { + async fn process_init_command(command: TrustAnchorSignerInitCommand) -> Result { let cmd = command.into_details(); let timing = cmd.timing; let signer = cmd.signer; - let id = Rfc8183Id::generate(&signer)?.into(); + let id = Rfc8183Id::generate(&signer).await?.into(); let proxy_id = cmd.proxy_id; let ta_cert_details = Self::create_ta_cert_details( cmd.repo_info, @@ -257,8 +258,10 @@ impl eventsourcing::Aggregate for TrustAnchorSigner { cmd.private_key_pem, timing.certificate_validity_years, &signer, - )?; - let objects = TrustAnchorObjects::create(ta_cert_details.cert(), timing.mft_next_update_weeks, &signer)?; + ) + .await?; + + let objects = TrustAnchorObjects::create(ta_cert_details.cert(), timing.mft_next_update_weeks, &signer).await?; Ok(TrustAnchorSignerInitEvent { id, @@ -294,7 +297,7 @@ impl eventsourcing::Aggregate for TrustAnchorSigner { } } - fn process_command(&self, command: Self::Command) -> Result, Self::Error> { + async fn process_command(&self, command: Self::Command) -> Result, Self::Error> { if log_enabled!(log::Level::Trace) { trace!( "Sending command to Trust Anchor Signer '{}', version: {}: {}", @@ -306,7 +309,7 @@ impl eventsourcing::Aggregate for TrustAnchorSigner { match command.into_details() { TrustAnchorSignerCommandDetails::TrustAnchorSignerRequest(request, timing, signer) => { - self.process_signer_request(request, timing, &signer) + self.process_signer_request(request, timing, &signer).await } } } @@ -327,7 +330,7 @@ impl TrustAnchorSigner { } impl TrustAnchorSigner { - fn create_ta_cert_details( + async fn create_ta_cert_details( repo_info: RepoInfo, tal_https: Vec, tal_rsync: uri::Rsync, @@ -336,16 +339,16 @@ impl TrustAnchorSigner { signer: &KrillSigner, ) -> KrillResult { let key = match private_key_pem { - None => signer.create_key(), - Some(pem) => signer.import_key(&pem), + None => signer.create_key().await, + Some(pem) => signer.import_key(&pem).await, }?; let resources = ResourceSet::all(); let cert = { - let serial: Serial = signer.random_serial()?; + let serial: Serial = signer.random_serial().await?; - let pub_key = signer.get_key_info(&key).map_err(Error::signer)?; + let pub_key = signer.get_key_info(&key).await.map_err(Error::signer)?; let name = pub_key.to_subject_name(); let mut cert = TbsCert::new( @@ -377,7 +380,7 @@ impl TrustAnchorSigner { cert.set_v4_resources(resources.to_ip_resources_v4()); cert.set_v6_resources(resources.to_ip_resources_v6()); - signer.sign_cert(cert, &key)? + signer.sign_cert(cert, &key).await? }; let tal = TrustAnchorLocator::new(tal_https, tal_rsync.clone(), cert.subject_public_key_info()); @@ -389,7 +392,7 @@ impl TrustAnchorSigner { } /// Process a request. - fn process_signer_request( + async fn process_signer_request( &self, request: TrustAnchorSignedRequest, timing: TaTimingConfig, @@ -432,7 +435,8 @@ impl TrustAnchorSigner { signing_cert, validity, signer, - )?; + ) + .await?; // Create response for certificate let response = IssuanceResponse::new( @@ -487,7 +491,9 @@ impl TrustAnchorSigner { child_responses.insert(child_request.child.clone(), responses); } - objects.republish(signing_cert, timing.mft_next_update_weeks, signer)?; + objects + .republish(signing_cert, timing.mft_next_update_weeks, signer) + .await?; let response = TrustAnchorSignerResponse { nonce: request.content().nonce.clone(), @@ -498,7 +504,8 @@ impl TrustAnchorSigner { timing.signed_message_validity_days, self.id.public_key().key_identifier(), signer, - )?; + ) + .await?; let exchange = TrustAnchorProxySignerExchange { time: Time::now(), diff --git a/src/upgrades/data_migration.rs b/src/upgrades/data_migration.rs index e66015c26..bb62c1c82 100644 --- a/src/upgrades/data_migration.rs +++ b/src/upgrades/data_migration.rs @@ -27,7 +27,7 @@ use crate::{ use super::UpgradeResult; -pub fn migrate(mut config: Config, target_storage: Url) -> UpgradeResult<()> { +pub async fn migrate(mut config: Config, target_storage: Url) -> UpgradeResult<()> { // Copy the source data from config unmodified into the target_storage info!("-----------------------------------------------------------"); info!(" Krill Data Migration"); @@ -41,7 +41,7 @@ pub fn migrate(mut config: Config, target_storage: Url) -> UpgradeResult<()> { info!("-----------------------------------------------------------"); info!(""); - copy_data_for_migration(&config, &target_storage)?; + copy_data_for_migration(&config, &target_storage).await?; // Update the config file with the new target_storage // and perform a normal data migration - the source data @@ -57,8 +57,10 @@ pub fn migrate(mut config: Config, target_storage: Url) -> UpgradeResult<()> { crate::upgrades::UpgradeMode::PrepareToFinalise, &config, &properties_manager, - )? { - finalise_data_migration(upgrade.versions(), &config, &properties_manager)?; + ) + .await? + { + finalise_data_migration(upgrade.versions(), &config, &properties_manager).await?; } info!("-----------------------------------------------------------"); @@ -77,34 +79,34 @@ pub fn migrate(mut config: Config, target_storage: Url) -> UpgradeResult<()> { // That said, it's a pretty easy check to perform and it kind of makes // sense to do it to now, even if it would be to point users at deeper // source data issues. - verify_target_data(&config) + verify_target_data(&config).await } -fn verify_target_data(config: &Config) -> UpgradeResult<()> { - check_agg_store::(config, PROPERTIES_NS, "Properties")?; - check_agg_store::(config, SIGNERS_NS, "Signer")?; +async fn verify_target_data(config: &Config) -> UpgradeResult<()> { + check_agg_store::(config, PROPERTIES_NS, "Properties").await?; + check_agg_store::(config, SIGNERS_NS, "Signer").await?; - let ca_store = check_agg_store::(config, CASERVER_NS, "CAs")?; - check_ca_objects(config, ca_store)?; + let ca_store = check_agg_store::(config, CASERVER_NS, "CAs").await?; + check_ca_objects(config, ca_store).await?; - check_agg_store::(config, PUBSERVER_NS, "Publication Server Access")?; - check_wal_store::(config, PUBSERVER_CONTENT_NS, "Publication Server Objects")?; - check_agg_store::(config, TA_PROXY_SERVER_NS, "TA Proxy")?; - check_agg_store::(config, TA_SIGNER_SERVER_NS, "TA Signer")?; + check_agg_store::(config, PUBSERVER_NS, "Publication Server Access").await?; + check_wal_store::(config, PUBSERVER_CONTENT_NS, "Publication Server Objects").await?; + check_agg_store::(config, TA_PROXY_SERVER_NS, "TA Proxy").await?; + check_agg_store::(config, TA_SIGNER_SERVER_NS, "TA Signer").await?; - check_openssl_keys(config)?; + check_openssl_keys(config).await?; Ok(()) } -fn check_openssl_keys(config: &Config) -> UpgradeResult<()> { +async fn check_openssl_keys(config: &Config) -> UpgradeResult<()> { info!(""); info!("Verify: OpenSSL keys"); let open_ssl_signer = OpenSslSigner::build(&config.storage_uri, "test", None) .map_err(|e| UpgradeError::Custom(format!("Cannot create openssl signer: {}", e)))?; let keys_key_store = KeyValueStore::create(&config.storage_uri, KEYS_NS)?; - for key in keys_key_store.keys(&Scope::global(), "")? { + for key in keys_key_store.keys(&Scope::global(), "").await? { let key_id = KeyIdentifier::from_str(key.name().as_str()).map_err(|e| { UpgradeError::Custom(format!( "Cannot parse as key identifier: {}. Error: {}", @@ -112,7 +114,7 @@ fn check_openssl_keys(config: &Config) -> UpgradeResult<()> { e )) })?; - open_ssl_signer.get_key_info(&key_id).map_err(|e| { + open_ssl_signer.get_key_info(&key_id).await.map_err(|e| { UpgradeError::Custom(format!( "Cannot get key with key_id {} from openssl keystore. Error: {}", key_id, e @@ -124,12 +126,16 @@ fn check_openssl_keys(config: &Config) -> UpgradeResult<()> { Ok(()) } -fn check_agg_store(config: &Config, ns: &Namespace, name: &str) -> UpgradeResult> { +async fn check_agg_store( + config: &Config, + ns: &Namespace, + name: &str, +) -> UpgradeResult> { info!(""); info!("Verify: {name}"); let store: AggregateStore = AggregateStore::create(&config.storage_uri, ns, false)?; - if !store.list()?.is_empty() { - store.warm()?; + if !store.list().await?.is_empty() { + store.warm().await?; info!("Ok"); } else { info!("not applicable"); @@ -137,12 +143,12 @@ fn check_agg_store(config: &Config, ns: &Namespace, name: &str) -> Ok(store) } -fn check_wal_store(config: &Config, ns: &Namespace, name: &str) -> UpgradeResult<()> { +async fn check_wal_store(config: &Config, ns: &Namespace, name: &str) -> UpgradeResult<()> { info!(""); info!("Verify: {name}"); let store: WalStore = WalStore::create(&config.storage_uri, ns)?; - if !store.list()?.is_empty() { - store.warm()?; + if !store.list().await?.is_empty() { + store.warm().await?; info!("Ok"); } else { info!("not applicable"); @@ -150,7 +156,7 @@ fn check_wal_store(config: &Config, ns: &Namespace, name: &str) - Ok(()) } -fn check_ca_objects(config: &Config, ca_store: AggregateStore) -> UpgradeResult<()> { +async fn check_ca_objects(config: &Config, ca_store: AggregateStore) -> UpgradeResult<()> { // make a dummy Signer to use for the CaObjectsStore - it won't be used, // but it's needed for construction. let probe_interval = std::time::Duration::from_secs(config.signer_probe_retry_seconds); @@ -163,16 +169,16 @@ fn check_ca_objects(config: &Config, ca_store: AggregateStore) -> Upgr let ca_objects_store = CaObjectsStore::create(&config.storage_uri, config.issuance_timing.clone(), signer)?; - let cas_with_objects = ca_objects_store.cas()?; + let cas_with_objects = ca_objects_store.cas().await?; for ca in &cas_with_objects { - ca_objects_store.ca_objects(ca)?; - if !ca_store.has(ca)? { + ca_objects_store.ca_objects(ca).await?; + if !ca_store.has(ca).await? { warn!(" Objects found for CA '{}' which no longer exists.", ca); } } - for ca in ca_store.list()? { + for ca in ca_store.list().await? { if !cas_with_objects.contains(&ca) { debug!(" CA '{}' did not have any CA objects yet.", ca); } @@ -181,7 +187,7 @@ fn check_ca_objects(config: &Config, ca_store: AggregateStore) -> Upgr Ok(()) } -fn copy_data_for_migration(config: &Config, target_storage: &Url) -> UpgradeResult<()> { +async fn copy_data_for_migration(config: &Config, target_storage: &Url) -> UpgradeResult<()> { for ns in &[ "ca_objects", "cas", @@ -196,9 +202,9 @@ fn copy_data_for_migration(config: &Config, target_storage: &Url) -> UpgradeResu let namespace = Namespace::parse(ns) .map_err(|_| UpgradeError::Custom(format!("Cannot parse namespace '{}'. This is a bug.", ns)))?; let source_kv_store = KeyValueStore::create(&config.storage_uri, namespace)?; - if !source_kv_store.is_empty()? { + if !source_kv_store.is_empty().await? { let target_kv_store = KeyValueStore::create(target_storage, namespace)?; - target_kv_store.import(&source_kv_store)?; + target_kv_store.import(&source_kv_store).await?; } } @@ -216,8 +222,8 @@ pub mod tests { use super::*; - #[test] - fn test_data_migration() { + #[tokio::test] + async fn test_data_migration() { // Create a config file that uses test data for its storage_uri let test_sources_base = "test-resources/migrations/v0_9_5/"; let test_sources_url = Url::parse(&format!("local://{}", test_sources_base)).unwrap(); @@ -231,6 +237,6 @@ pub mod tests { // Create an in-memory target store to migrate to let target_store = test::mem_storage(); - migrate(config, target_store).unwrap(); + migrate(config, target_store).await.unwrap(); } } diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs index 7c4c17daf..97b1c764d 100644 --- a/src/upgrades/mod.rs +++ b/src/upgrades/mod.rs @@ -296,11 +296,12 @@ impl UnconvertedEffect { //------------ UpgradeStore -------------------------------------------------- /// Implement this for automatic upgrades to key stores +#[async_trait::async_trait] pub trait UpgradeAggregateStorePre0_14 { type Aggregate: Aggregate; - type OldInitEvent: fmt::Display + Eq + PartialEq + Storable + 'static; - type OldEvent: fmt::Display + Eq + PartialEq + Storable + 'static; + type OldInitEvent: fmt::Display + Eq + PartialEq + Storable + Send + Sync + 'static; + type OldEvent: fmt::Display + Eq + PartialEq + Storable + Send + Sync + 'static; type OldStorableDetails: WithStorableDetails; //--- Mandatory functions to implement @@ -337,7 +338,7 @@ pub trait UpgradeAggregateStorePre0_14 { /// Override this to get a call when the migration of commands for /// an aggregate is done. - fn post_command_migration(&self, handle: &MyHandle) -> UpgradeResult<()> { + async fn post_command_migration(&self, handle: &MyHandle) -> UpgradeResult<()> { trace!("default post migration hook called for '{handle}'"); Ok(()) } @@ -346,10 +347,10 @@ pub trait UpgradeAggregateStorePre0_14 { /// /// Expects implementers of this trait to provide function for converting /// old command/event/init types to the current types. - fn upgrade(&self, mode: UpgradeMode) -> UpgradeResult { + async fn upgrade(&self, mode: UpgradeMode) -> UpgradeResult { // check existing version, wipe it if there is an unfinished upgrade // in progress for another Krill version. - self.preparation_store_prepare()?; + self.preparation_store_prepare().await?; info!( "Prepare upgrading {} to Krill version {}", @@ -358,7 +359,7 @@ pub trait UpgradeAggregateStorePre0_14 { ); // Migrate the event sourced data for each scope and create new snapshots - for scope in self.deployed_store().scopes()? { + for scope in self.deployed_store().scopes().await? { // We only need top-level scopes, not sub-scopes such as 'surplus' archive dirs if scope.len() != 1 { trace!("Skipping migration for sub-scope: {}", scope); @@ -372,17 +373,19 @@ pub trait UpgradeAggregateStorePre0_14 { // Get the upgrade info to see where we got to. // We may be continuing from an earlier migration, e.g. by krillup. - let mut data_upgrade_info = self.data_upgrade_info(&scope)?; + let mut data_upgrade_info = self.data_upgrade_info(&scope).await?; // Get the list of commands to prepare, starting with the last_command we got to (may be 0) - let old_cmd_keys = self.command_keys(&scope, data_upgrade_info.last_command.unwrap_or(0))?; + let old_cmd_keys = self + .command_keys(&scope, data_upgrade_info.last_command.unwrap_or(0)) + .await?; // Migrate the initialisation event, if not done in a previous run. This // is a special event that has no command, so we need to do this separately. if data_upgrade_info.last_command.is_none() { let old_init_key = Self::event_key(scope.clone(), 0); - let old_init: OldStoredEvent = self.get(&old_init_key)?; + let old_init: OldStoredEvent = self.get(&old_init_key).await?; let old_init = old_init.into_details(); // From 0.14.x and up we will have command '0' for the init, where beforehand @@ -394,7 +397,7 @@ pub trait UpgradeAggregateStorePre0_14 { // case that there is no first command, then we might as well set // it to now. let time = if let Some(first_command) = old_cmd_keys.first() { - let cmd: OldStoredCommand = self.get(first_command)?; + let cmd: OldStoredCommand = self.get(first_command).await?; cmd.time() } else { Time::now() @@ -404,7 +407,7 @@ pub trait UpgradeAggregateStorePre0_14 { // init event we found to a StoredCommand that we can save. let command = self.convert_init_event(old_init, handle.clone(), actor, time)?; - self.store_new_command(&scope, &command)?; + self.store_new_command(&scope, &command).await?; data_upgrade_info.increment_command(); } @@ -420,7 +423,7 @@ pub trait UpgradeAggregateStorePre0_14 { for old_cmd_key in old_cmd_keys { // Read and parse the command. trace!(" +- command: {}", old_cmd_key); - let old_command: OldStoredCommand = self.get(&old_cmd_key)?; + let old_command: OldStoredCommand = self.get(&old_cmd_key).await?; // And the unconverted effects let old_effect = match old_command.effect() { @@ -430,7 +433,7 @@ pub trait UpgradeAggregateStorePre0_14 { let event_key = Self::event_key(scope.clone(), *v); trace!(" +- event: {}", event_key); let evt: OldStoredEvent = - self.deployed_store().get(&event_key)?.ok_or_else(|| { + self.deployed_store().get(&event_key).await?.ok_or_else(|| { UpgradeError::Custom(format!("Cannot parse old event: {}", event_key)) })?; full_events.push(evt.into_details()); @@ -442,7 +445,7 @@ pub trait UpgradeAggregateStorePre0_14 { match self.convert_old_command(old_command, old_effect, data_upgrade_info.next_command())? { CommandMigrationEffect::StoredCommand(command) => { - self.store_new_command(&scope, &command)?; + self.store_new_command(&scope, &command).await?; data_upgrade_info.increment_command(); } CommandMigrationEffect::AspaObjectsUpdates(updates) => { @@ -480,21 +483,25 @@ pub trait UpgradeAggregateStorePre0_14 { "Will verify the migration by rebuilding '{}' from migrated commands", &scope ); - let _latest = self.preparation_aggregate_store().save_snapshot(&handle).map_err(|e| { - UpgradeError::Custom(format!( - "Could not rebuild state after migrating CA '{}'! Error was: {}.", - handle, e - )) - })?; + let _latest = self + .preparation_aggregate_store() + .save_snapshot(&handle) + .await + .map_err(|e| { + UpgradeError::Custom(format!( + "Could not rebuild state after migrating CA '{}'! Error was: {}.", + handle, e + )) + })?; // Call the post command migration hook, this will do nothing // unless the implementer of this trait overrode it. - self.post_command_migration(&handle)?; + self.post_command_migration(&handle).await?; // Update the upgrade info as this could be a prepare only // run, and this migration could be resumed later after more // changes were applied. - self.update_data_upgrade_info(&scope, &data_upgrade_info)?; + self.update_data_upgrade_info(&scope, &data_upgrade_info).await?; info!("Verified migration of '{}'", handle); } @@ -509,7 +516,7 @@ pub trait UpgradeAggregateStorePre0_14 { } UpgradeMode::PrepareToFinalise => { let mut aspa_configs = AspaMigrationConfigs::default(); - for scope in self.deployed_store().scopes()? { + for scope in self.deployed_store().scopes().await? { if scope.len() != 1 { continue; } @@ -517,7 +524,7 @@ pub trait UpgradeAggregateStorePre0_14 { // Getting the Handle should never fail, but if it does then we should bail out asap. let ca = MyHandle::from_str(&scope.to_string()) .map_err(|_| UpgradeError::Custom(format!("Found invalid handle '{}'", scope)))?; - let info = self.data_upgrade_info(&scope)?; + let info = self.data_upgrade_info(&scope).await?; let aspa_configs_for_ca: Vec = info .aspa_configs .into_iter() @@ -528,7 +535,7 @@ pub trait UpgradeAggregateStorePre0_14 { aspa_configs.0.insert(ca, aspa_configs_for_ca); } } - self.clean_migration_help_files()?; + self.clean_migration_help_files().await?; info!("Prepared migrating data to Krill version {}.", KRILL_VERSION); Ok(aspa_configs) @@ -541,20 +548,24 @@ pub trait UpgradeAggregateStorePre0_14 { /// Saves the version of the target upgrade. Wipes the store if there is another /// version set as the target. - fn preparation_store_prepare(&self) -> UpgradeResult<()> { + async fn preparation_store_prepare(&self) -> UpgradeResult<()> { let code_version = KrillVersion::code_version(); let version_key = Key::new_global(SegmentBuf::parse_lossy("version")); - if let Ok(Some(existing_migration_version)) = - self.preparation_key_value_store().get::(&version_key) + if let Ok(Some(existing_migration_version)) = self + .preparation_key_value_store() + .get::(&version_key) + .await { if existing_migration_version != code_version { warn!("Found prepared data for Krill version {existing_migration_version}, will remove it and start from scratch for {code_version}"); - self.preparation_key_value_store().wipe()?; + self.preparation_key_value_store().wipe().await?; } } - self.preparation_key_value_store().store(&version_key, &code_version)?; + self.preparation_key_value_store() + .store(&version_key, &code_version) + .await?; Ok(()) } @@ -583,10 +594,11 @@ pub trait UpgradeAggregateStorePre0_14 { Ok(()) } - fn store_new_command(&self, scope: &Scope, command: &StoredCommand) -> UpgradeResult<()> { + async fn store_new_command(&self, scope: &Scope, command: &StoredCommand) -> UpgradeResult<()> { let key = Self::new_stored_command_key(scope.clone(), command.version()); self.preparation_key_value_store() .store_new(&key, command) + .await .map_err(UpgradeError::KeyStoreError) } @@ -595,9 +607,10 @@ pub trait UpgradeAggregateStorePre0_14 { } /// Return the DataUpgradeInfo telling us to where we got to with this migration. - fn data_upgrade_info(&self, scope: &Scope) -> UpgradeResult { + async fn data_upgrade_info(&self, scope: &Scope) -> UpgradeResult { self.preparation_key_value_store() .get(&Self::data_upgrade_info_key(scope.clone())) + .await .map(|opt| match opt { None => DataUpgradeInfo::default(), Some(info) => info, @@ -606,22 +619,25 @@ pub trait UpgradeAggregateStorePre0_14 { } /// Update the DataUpgradeInfo - fn update_data_upgrade_info(&self, scope: &Scope, info: &DataUpgradeInfo) -> UpgradeResult<()> { + async fn update_data_upgrade_info(&self, scope: &Scope, info: &DataUpgradeInfo) -> UpgradeResult<()> { self.preparation_key_value_store() .store(&Self::data_upgrade_info_key(scope.clone()), info) + .await .map_err(UpgradeError::KeyStoreError) } /// Clean up keys used for tracking migration progress - fn clean_migration_help_files(&self) -> UpgradeResult<()> { + async fn clean_migration_help_files(&self) -> UpgradeResult<()> { let version_key = Key::new_global(SegmentBuf::parse_lossy("version")); self.preparation_key_value_store() .drop_key(&version_key) + .await .map_err(UpgradeError::KeyStoreError)?; - for scope in self.preparation_key_value_store().scopes()? { + for scope in self.preparation_key_value_store().scopes().await? { self.preparation_key_value_store() .drop_key(&Self::data_upgrade_info_key(scope)) + .await .map_err(UpgradeError::KeyStoreError)?; } Ok(()) @@ -629,8 +645,8 @@ pub trait UpgradeAggregateStorePre0_14 { /// Find all command keys for the scope, starting from the provided sequence. Then sort them /// by sequence and turn them back into key store keys for further processing. - fn command_keys(&self, scope: &Scope, from: u64) -> Result, UpgradeError> { - let keys = self.deployed_store().keys(scope, "command--")?; + async fn command_keys(&self, scope: &Scope, from: u64) -> Result, UpgradeError> { + let keys = self.deployed_store().keys(scope, "command--").await?; let mut cmd_keys: Vec = vec![]; for key in keys { let cmd_key = OldCommandKey::from_str(key.name().as_str()).map_err(|_| { @@ -649,9 +665,10 @@ pub trait UpgradeAggregateStorePre0_14 { Ok(cmd_keys) } - fn get(&self, key: &Key) -> Result { + async fn get(&self, key: &Key) -> Result { self.deployed_store() - .get(key)? + .get(key) + .await? .ok_or_else(|| UpgradeError::Custom(format!("Cannot read key: {}", key))) } @@ -680,7 +697,7 @@ pub trait UpgradeAggregateStorePre0_14 { /// started, it will call this again - to do the final preparation for a migration - /// knowing that no changes are added to the event history at this time. After this, /// the migration will be finalised. -pub fn prepare_upgrade_data_migrations( +pub async fn prepare_upgrade_data_migrations( mode: UpgradeMode, config: &Config, properties_manager: &PropertiesManager, @@ -692,9 +709,9 @@ pub fn prepare_upgrade_data_migrations( // cheap operation that we can just do at startup. It is done here, because in effect it *is* a data // migration. #[cfg(feature = "hsm")] - record_preexisting_openssl_keys_in_signer_mapper(config)?; + record_preexisting_openssl_keys_in_signer_mapper(config).await?; - match upgrade_versions(config, properties_manager)? { + match upgrade_versions(config, properties_manager).await? { None => Ok(None), Some(versions) => { info!("Preparing upgrade from {} to {}", versions.from(), versions.to()); @@ -704,7 +721,10 @@ pub fn prepare_upgrade_data_migrations( // be migrated to the new setup in 0.13.0. Well.. it could be done, if there would be a strong use // case to put in the effort, but there really isn't. let ca_kv_store = KeyValueStore::create(&config.storage_uri, CASERVER_NS)?; - if ca_kv_store.has_scope(&Scope::from_segment(SegmentBuf::parse_lossy("ta")))? { + if ca_kv_store + .has_scope(&Scope::from_segment(SegmentBuf::parse_lossy("ta"))) + .await? + { return Err(UpgradeError::OldTaMigration); } @@ -718,15 +738,15 @@ pub fn prepare_upgrade_data_migrations( Err(UpgradeError::custom(msg)) } else if versions.from < KrillVersion::candidate(0, 10, 0, 1) { // Complex migrations involving command / event conversions - pre_0_10_0::PublicationServerRepositoryAccessMigration::upgrade(mode, config, &versions)?; - let aspa_configs = pre_0_10_0::CasMigration::upgrade(mode, config)?; + pre_0_10_0::PublicationServerRepositoryAccessMigration::upgrade(mode, config, &versions).await?; + let aspa_configs = pre_0_10_0::CasMigration::upgrade(mode, config).await?; // The way that pubd objects were stored was changed as well (since 0.13.0) - migrate_pre_0_12_pubd_objects(config)?; + migrate_pre_0_12_pubd_objects(config).await?; // Migrate remaining aggregate stores used in < 0.10.0 to the new format // in 0.14.0 where we combine commands and events into a single key-value pair. - pre_0_14_0::UpgradeAggregateStoreSignerInfo::upgrade(SIGNERS_NS, mode, config)?; + pre_0_14_0::UpgradeAggregateStoreSignerInfo::upgrade(SIGNERS_NS, mode, config).await?; Ok(Some(UpgradeReport::new(aspa_configs, true, versions))) } else if versions.from < KrillVersion::candidate(0, 10, 0, 3) { @@ -741,33 +761,33 @@ pub fn prepare_upgrade_data_migrations( ); // The pubd objects storage changed in 0.13.0 - migrate_pre_0_12_pubd_objects(config)?; + migrate_pre_0_12_pubd_objects(config).await?; // Migrate aggregate stores used in < 0.12.0 to the new format in 0.14.0 where // we combine commands and events into a single key-value pair. - pre_0_14_0::UpgradeAggregateStoreSignerInfo::upgrade(SIGNERS_NS, mode, config)?; - let aspa_configs = pre_0_14_0::CasMigration::upgrade(mode, config)?; - pre_0_14_0::UpgradeAggregateStoreRepositoryAccess::upgrade(PUBSERVER_NS, mode, config)?; + pre_0_14_0::UpgradeAggregateStoreSignerInfo::upgrade(SIGNERS_NS, mode, config).await?; + let aspa_configs = pre_0_14_0::CasMigration::upgrade(mode, config).await?; + pre_0_14_0::UpgradeAggregateStoreRepositoryAccess::upgrade(PUBSERVER_NS, mode, config).await?; Ok(Some(UpgradeReport::new(aspa_configs, true, versions))) } else if versions.from < KrillVersion::candidate(0, 13, 0, 0) { - migrate_0_12_pubd_objects(config)?; + migrate_0_12_pubd_objects(config).await?; // Migrate aggregate stores used in < 0.13.0 to the new format in 0.14.0 where // we combine commands and events into a single key-value pair. - pre_0_14_0::UpgradeAggregateStoreSignerInfo::upgrade(SIGNERS_NS, mode, config)?; - let aspa_configs = pre_0_14_0::CasMigration::upgrade(mode, config)?; - pre_0_14_0::UpgradeAggregateStoreRepositoryAccess::upgrade(PUBSERVER_NS, mode, config)?; + pre_0_14_0::UpgradeAggregateStoreSignerInfo::upgrade(SIGNERS_NS, mode, config).await?; + let aspa_configs = pre_0_14_0::CasMigration::upgrade(mode, config).await?; + pre_0_14_0::UpgradeAggregateStoreRepositoryAccess::upgrade(PUBSERVER_NS, mode, config).await?; Ok(Some(UpgradeReport::new(aspa_configs, true, versions))) } else if versions.from < KrillVersion::candidate(0, 14, 0, 0) { // Migrate aggregate stores used in < 0.14.0 to the new format in 0.14.0 where // we combine commands and events into a single key-value pair. - let aspa_configs = pre_0_14_0::CasMigration::upgrade(mode, config)?; - pre_0_14_0::UpgradeAggregateStoreRepositoryAccess::upgrade(PUBSERVER_NS, mode, config)?; - pre_0_14_0::UpgradeAggregateStoreSignerInfo::upgrade(SIGNERS_NS, mode, config)?; - pre_0_14_0::UpgradeAggregateStoreTrustAnchorSigner::upgrade(TA_SIGNER_SERVER_NS, mode, config)?; - pre_0_14_0::UpgradeAggregateStoreTrustAnchorProxy::upgrade(TA_PROXY_SERVER_NS, mode, config)?; + let aspa_configs = pre_0_14_0::CasMigration::upgrade(mode, config).await?; + pre_0_14_0::UpgradeAggregateStoreRepositoryAccess::upgrade(PUBSERVER_NS, mode, config).await?; + pre_0_14_0::UpgradeAggregateStoreSignerInfo::upgrade(SIGNERS_NS, mode, config).await?; + pre_0_14_0::UpgradeAggregateStoreTrustAnchorSigner::upgrade(TA_SIGNER_SERVER_NS, mode, config).await?; + pre_0_14_0::UpgradeAggregateStoreTrustAnchorProxy::upgrade(TA_PROXY_SERVER_NS, mode, config).await?; Ok(Some(UpgradeReport::new(aspa_configs, true, versions))) } else { @@ -783,19 +803,19 @@ pub fn prepare_upgrade_data_migrations( /// Migrate v0.12.x RepositoryContent to the new 0.13.0+ format. /// Apply any open WAL changes to the source first. -fn migrate_0_12_pubd_objects(config: &Config) -> KrillResult { +async fn migrate_0_12_pubd_objects(config: &Config) -> KrillResult { let old_store: WalStore = WalStore::create(&config.storage_uri, PUBSERVER_CONTENT_NS)?; let repo_content_handle = MyHandle::new("0".into()); - if old_store.has(&repo_content_handle)? { - let old_repo_content = old_store.get_latest(&repo_content_handle)?.as_ref().clone(); + if old_store.has(&repo_content_handle).await? { + let old_repo_content = old_store.get_latest(&repo_content_handle).await?.as_ref().clone(); let repo_content: pubd::RepositoryContent = old_repo_content.try_into()?; let new_key = Key::new_scoped( Scope::from_segment(SegmentBuf::parse_lossy("0")), SegmentBuf::parse_lossy("snapshot.json"), ); let upgrade_store = KeyValueStore::create_upgrade_store(&config.storage_uri, PUBSERVER_CONTENT_NS)?; - upgrade_store.store(&new_key, &repo_content)?; + upgrade_store.store(&new_key, &repo_content).await?; Ok(true) } else { Ok(false) @@ -804,10 +824,10 @@ fn migrate_0_12_pubd_objects(config: &Config) -> KrillResult { /// The format of the RepositoryContent did not change in 0.12, but /// the location and way of storing it did. So, migrate if present. -fn migrate_pre_0_12_pubd_objects(config: &Config) -> KrillResult<()> { +async fn migrate_pre_0_12_pubd_objects(config: &Config) -> KrillResult<()> { let old_store = KeyValueStore::create(&config.storage_uri, PUBSERVER_CONTENT_NS)?; let old_key = Key::new_global(SegmentBuf::parse_lossy("0.json")); - if let Ok(Some(old_repo_content)) = old_store.get::(&old_key) { + if let Ok(Some(old_repo_content)) = old_store.get::(&old_key).await { info!("Found pre 0.12.0 RC2 publication server data. Migrating.."); let repo_content: pubd::RepositoryContent = old_repo_content.try_into()?; @@ -816,7 +836,7 @@ fn migrate_pre_0_12_pubd_objects(config: &Config) -> KrillResult<()> { SegmentBuf::parse_lossy("snapshot.json"), ); let upgrade_store = KeyValueStore::create_upgrade_store(&config.storage_uri, PUBSERVER_CONTENT_NS)?; - upgrade_store.store(&new_key, &repo_content)?; + upgrade_store.store(&new_key, &repo_content).await?; } Ok(()) @@ -827,7 +847,7 @@ fn migrate_pre_0_12_pubd_objects(config: &Config) -> KrillResult<()> { /// If there is any prepared data, then: /// - archive the current data /// - make the prepared data current -pub fn finalise_data_migration( +pub async fn finalise_data_migration( upgrade: &UpgradeVersions, config: &Config, properties_manager: &PropertiesManager, @@ -859,23 +879,23 @@ pub fn finalise_data_migration( // Check if there is a non-empty upgrade store for this namespace // that would need to be migrated. let mut upgrade_store = KeyValueStore::create_upgrade_store(&config.storage_uri, ns)?; - if !upgrade_store.is_empty()? { + if !upgrade_store.is_empty().await? { info!("Migrate new data for {} and archive old", ns); let mut current_store = KeyValueStore::create(&config.storage_uri, ns)?; - if !current_store.is_empty()? { - current_store.migrate_to_archive(&config.storage_uri, ns)?; + if !current_store.is_empty().await? { + current_store.migrate_to_archive(&config.storage_uri, ns).await?; } - upgrade_store.migrate_to_current(&config.storage_uri, ns)?; + upgrade_store.migrate_to_current(&config.storage_uri, ns).await?; } else { // No migration needed, but check if we have a current store // for this namespace that still includes a version file. If // so, remove it. let current_store = KeyValueStore::create(&config.storage_uri, ns)?; let version_key = Key::new_global(SegmentBuf::parse_lossy("version")); - if current_store.has(&version_key)? { + if current_store.has(&version_key).await? { debug!("Removing excess version key in ns: {}", ns); - current_store.drop_key(&version_key)?; + current_store.drop_key(&version_key).await?; } } } @@ -883,10 +903,10 @@ pub fn finalise_data_migration( // Set the current version of the store to that of the running code let code_version = KrillVersion::code_version(); info!("Finished upgrading Krill to version: {code_version}"); - if properties_manager.is_initialized() { - properties_manager.upgrade_krill_version(code_version)?; + if properties_manager.is_initialized().await { + properties_manager.upgrade_krill_version(code_version).await?; } else { - properties_manager.init(code_version)?; + properties_manager.init(code_version).await?; } Ok(()) @@ -900,9 +920,9 @@ pub fn finalise_data_migration( /// signers have been registered and no key mappings have been recorded, and then walk KEYS_NS adding the keys one by /// one to the mapping in the signer store, if any. #[allow(dead_code)] // Remove when the hsm feature is removed. -fn record_preexisting_openssl_keys_in_signer_mapper(config: &Config) -> Result<(), UpgradeError> { +async fn record_preexisting_openssl_keys_in_signer_mapper(config: &Config) -> Result<(), UpgradeError> { let signers_key_store = KeyValueStore::create(&config.storage_uri, SIGNERS_NS)?; - if signers_key_store.is_empty()? { + if signers_key_store.is_empty().await? { let mut num_recorded_keys = 0; // If the key value store for the "signers" namespace is empty, then it was not yet initialised // and we may need to import keys from a previous krill installation (earlier version, or a custom @@ -923,14 +943,14 @@ fn record_preexisting_openssl_keys_in_signer_mapper(config: &Config) -> Result<( let mut openssl_signer_handle: Option = None; - for key in keys_key_store.keys(&Scope::global(), "")? { + for key in keys_key_store.keys(&Scope::global(), "").await? { debug!("Found key: {}", key); // Is it a key identifier? if let Ok(key_id) = rpki::crypto::KeyIdentifier::from_str(key.name().as_str()) { // Is the key already recorded in the mapper? It shouldn't be, but asking will cause the initial // registration of the OpenSSL signer to occur and for it to be assigned a handle. We need the // handle so that we can register keys with the mapper. - if krill_signer.get_key_info(&key_id).is_err() { + if krill_signer.get_key_info(&key_id).await.is_err() { // No, record it // Find out the handle of the OpenSSL signer used to create this key, if not yet known. @@ -939,8 +959,8 @@ fn record_preexisting_openssl_keys_in_signer_mapper(config: &Config) -> Result<( // them must have it and it should be the one and only OpenSSL signer that Krill was // using previously. We can't just find and use the only OpenSSL signers as Krill may // have been configured with more than one each with separate keys directories. - for (a_signer_handle, a_signer) in krill_signer.get_active_signers().iter() { - if a_signer.get_key_info(&key_id).is_ok() { + for (a_signer_handle, a_signer) in krill_signer.get_active_signers().await.iter() { + if a_signer.get_key_info(&key_id).await.is_ok() { openssl_signer_handle = Some(a_signer_handle.clone()); break; } @@ -951,7 +971,7 @@ fn record_preexisting_openssl_keys_in_signer_mapper(config: &Config) -> Result<( if let Some(signer_handle) = &openssl_signer_handle { let internal_key_id = key_id.to_string(); if let Some(mapper) = krill_signer.get_mapper() { - mapper.add_key(signer_handle, &key_id, &internal_key_id)?; + mapper.add_key(signer_handle, &key_id, &internal_key_id).await?; num_recorded_keys += 1; } } @@ -992,14 +1012,14 @@ pub async fn post_start_upgrade(report: UpgradeReport, server: &KrillServer) -> /// - if the code is newer than the version used then we upgrade /// - if the code is the same version then we do not upgrade /// - if the code is older then we need to error out -fn upgrade_versions( +async fn upgrade_versions( config: &Config, properties_manager: &PropertiesManager, ) -> Result, UpgradeError> { - if properties_manager.is_initialized() { + if properties_manager.is_initialized().await { // The properties manager was introduced in Krill 0.14.0. // If it's initialised then it MUST have a Krill Version. - let current = properties_manager.current_krill_version()?; + let current = properties_manager.current_krill_version().await?; UpgradeVersions::for_current(current) } else { // No KrillVersion yet. So, either this is an older Krill version, @@ -1023,7 +1043,7 @@ fn upgrade_versions( trace!("checking for version in key value store: {}", kv_store); let key = Key::new_global(SegmentBuf::parse_lossy("version")); - if let Some(key_store_version) = kv_store.get::(&key)? { + if let Some(key_store_version) = kv_store.get::(&key).await? { if let Some(last_seen) = ¤t { if &key_store_version > last_seen { current = Some(key_store_version) @@ -1054,7 +1074,7 @@ mod tests { use super::*; - fn test_upgrade(base_dir: &str, namespaces: &[&str]) { + async fn test_upgrade(base_dir: &str, namespaces: &[&str]) { // Copy data for the given names spaces into memory for testing. let mem_storage_base_uri = test::mem_storage(); let bogus_path = PathBuf::from("/dev/null"); // needed for tls_dir etc, but will be ignored here @@ -1068,81 +1088,92 @@ mod tests { let source_store = KeyValueStore::create(&source_url, namespace.as_ref()).unwrap(); let target_store = KeyValueStore::create(&mem_storage_base_uri, namespace.as_ref()).unwrap(); - target_store.import(&source_store).unwrap(); + target_store.import(&source_store).await.unwrap(); } let properties_manager = PropertiesManager::create(&config.storage_uri, config.use_history_cache).unwrap(); prepare_upgrade_data_migrations(UpgradeMode::PrepareOnly, &config, &properties_manager) + .await .unwrap() .unwrap(); // and continue - immediately, but still tests that this can pick up again. let report = prepare_upgrade_data_migrations(UpgradeMode::PrepareToFinalise, &config, &properties_manager) + .await .unwrap() .unwrap(); - finalise_data_migration(report.versions(), &config, &properties_manager).unwrap(); + finalise_data_migration(report.versions(), &config, &properties_manager) + .await + .unwrap(); } - #[test] - fn prepare_then_upgrade_0_9_6() { + #[tokio::test] + async fn prepare_then_upgrade_0_9_6() { test_upgrade( "test-resources/migrations/v0_9_6/", &["ca_objects", "cas", "pubd", "pubd_objects"], - ); + ) + .await; } - #[test] - fn prepare_then_upgrade_0_9_5_pubserver() { + #[tokio::test] + async fn prepare_then_upgrade_0_9_5_pubserver() { test_upgrade( "test-resources/migrations/v0_9_5_pubserver/", &["ca_objects", "cas", "pubd", "pubd_objects"], - ); + ) + .await; } - #[test] - fn prepare_then_upgrade_0_10_3() { + #[tokio::test] + async fn prepare_then_upgrade_0_10_3() { test_upgrade( "test-resources/migrations/v0_10_3/", &["ca_objects", "cas", "pubd", "pubd_objects"], - ); + ) + .await; } - #[test] - fn prepare_then_upgrade_0_11_0() { + #[tokio::test] + async fn prepare_then_upgrade_0_11_0() { test_upgrade( "test-resources/migrations/v0_11_0/", &["ca_objects", "cas", "pubd", "pubd_objects"], - ); + ) + .await; } - #[test] - fn prepare_then_upgrade_0_12_1_pubserver() { + #[tokio::test] + async fn prepare_then_upgrade_0_12_1_pubserver() { test_upgrade( "test-resources/migrations/v0_12_1_pubserver/", &["pubd", "pubd_objects"], - ); + ) + .await; } - #[test] - fn prepare_then_upgrade_0_12_3() { + #[tokio::test] + async fn prepare_then_upgrade_0_12_3() { test_upgrade( "test-resources/migrations/v0_12_3/", &["ca_objects", "cas", "pubd", "pubd_objects"], - ); + ) + .await; } - #[test] - fn prepare_then_upgrade_0_13_1_cas() { + #[tokio::test] + async fn prepare_then_upgrade_0_13_1_cas() { test_upgrade( "test-resources/migrations/v0_13_1/", &["ca_objects", "cas", "keys", "pubd", "pubd_objects", "signers", "status"], - ); + ) + .await; } - #[test] - fn prepare_then_upgrade_0_13_1_pubserver() { + #[tokio::test] + async fn prepare_then_upgrade_0_13_1_pubserver() { test_upgrade( "test-resources/migrations/v0_13_1_pubserver/", &[ @@ -1156,7 +1187,8 @@ mod tests { "ta_proxy", "ta_signer", ], - ); + ) + .await; } #[test] @@ -1167,7 +1199,7 @@ mod tests { // #[cfg(all(feature = "hsm", not(any(feature = "hsm-tests-kmip", feature = "hsm-tests-pkcs11"))))] #[allow(dead_code)] // this only looks dead because of complex features. - fn unmapped_keys_test_core(do_upgrade: bool) { + async fn unmapped_keys_test_core(do_upgrade: bool) { let expected_key_id = rpki::crypto::KeyIdentifier::from_str("5CBCAB14B810C864F3EEA8FD102B79F4E53FCC70").unwrap(); @@ -1178,7 +1210,7 @@ mod tests { let source_store = KeyValueStore::create(&source_url, KEYS_NS).unwrap(); let target_store = KeyValueStore::create(&mem_storage_base_uri, KEYS_NS).unwrap(); - target_store.import(&source_store).unwrap(); + target_store.import(&source_store).await.unwrap(); let bogus_path = PathBuf::from("/dev/null"); // needed for tls_dir etc, but will be ignored here @@ -1187,7 +1219,7 @@ mod tests { config.process().unwrap(); if do_upgrade { - record_preexisting_openssl_keys_in_signer_mapper(&config).unwrap(); + record_preexisting_openssl_keys_in_signer_mapper(&config).await.unwrap(); } // Now test that a newly initialized `KrillSigner` with a default OpenSSL signer @@ -1202,31 +1234,31 @@ mod tests { .unwrap(); // Trigger the signer to be bound to the one the migration just registered in the mapper - krill_signer.random_serial().unwrap(); + krill_signer.random_serial().await.unwrap(); // Verify that the mapper has a single registered signer let mapper = krill_signer.get_mapper().unwrap(); - let signer_handles = mapper.get_signer_handles().unwrap(); + let signer_handles = mapper.get_signer_handles().await.unwrap(); assert_eq!(1, signer_handles.len()); if do_upgrade { // Verify that the mapper has a record of the test key belonging to the signer - krill_signer.get_key_info(&expected_key_id).unwrap(); + krill_signer.get_key_info(&expected_key_id).await.unwrap(); } else { // Verify that the mapper does NOT have a record of the test key belonging to the signer - assert!(mapper.get_signer_for_key(&expected_key_id).is_err()); + assert!(mapper.get_signer_for_key(&expected_key_id).await.is_err()); } } #[cfg(all(feature = "hsm", not(any(feature = "hsm-tests-kmip", feature = "hsm-tests-pkcs11"))))] - #[test] - fn test_key_not_found_error_if_unmapped_keys_are_not_mapped_on_upgrade() { - unmapped_keys_test_core(false); + #[tokio::test] + async fn test_key_not_found_error_if_unmapped_keys_are_not_mapped_on_upgrade() { + unmapped_keys_test_core(false).await; } #[cfg(all(feature = "hsm", not(any(feature = "hsm-tests-kmip", feature = "hsm-tests-pkcs11"))))] - #[test] - fn test_upgrading_with_unmapped_keys() { - unmapped_keys_test_core(true); + #[tokio::test] + async fn test_upgrading_with_unmapped_keys() { + unmapped_keys_test_core(true).await; } } diff --git a/src/upgrades/pre_0_10_0/cas_migration.rs b/src/upgrades/pre_0_10_0/cas_migration.rs index 4a244aa7d..cb5780302 100644 --- a/src/upgrades/pre_0_10_0/cas_migration.rs +++ b/src/upgrades/pre_0_10_0/cas_migration.rs @@ -43,12 +43,12 @@ impl CaObjectsMigration { }) } - fn prepare_new_data_for(&self, ca: &CaHandle) -> Result<(), UpgradeError> { + async fn prepare_new_data_for(&self, ca: &CaHandle) -> Result<(), UpgradeError> { let key = Key::new_global(SegmentBuf::parse_lossy(&format!("{}.json", ca))); // ca should always be a valid Segment - if let Some(old_objects) = self.current_store.get::(&key)? { + if let Some(old_objects) = self.current_store.get::(&key).await? { let converted: CaObjects = old_objects.try_into()?; - self.new_store.store(&key, &converted)?; + self.new_store.store(&key, &converted).await?; debug!("Stored updated objects for CA {} in {}", ca, self.new_store); } @@ -67,7 +67,7 @@ pub struct CasMigration { } impl CasMigration { - pub fn upgrade(mode: UpgradeMode, config: &Config) -> UpgradeResult { + pub async fn upgrade(mode: UpgradeMode, config: &Config) -> UpgradeResult { let current_kv_store = KeyValueStore::create(&config.storage_uri, CASERVER_NS)?; let new_kv_store = KeyValueStore::create_upgrade_store(&config.storage_uri, CASERVER_NS)?; @@ -85,9 +85,11 @@ impl CasMigration { ca_objects_migration, } .upgrade(mode) + .await } } +#[async_trait::async_trait] impl UpgradeAggregateStorePre0_14 for CasMigration { type Aggregate = CertAuth; @@ -198,8 +200,8 @@ impl UpgradeAggregateStorePre0_14 for CasMigration { } /// Override post migration, we need to do extra stuff. - fn post_command_migration(&self, handle: &MyHandle) -> UpgradeResult<()> { + async fn post_command_migration(&self, handle: &MyHandle) -> UpgradeResult<()> { info!("Will migrate the current repository objects for CA '{}'", handle); - self.ca_objects_migration.prepare_new_data_for(handle) + self.ca_objects_migration.prepare_new_data_for(handle).await } } diff --git a/src/upgrades/pre_0_10_0/pubd_migration.rs b/src/upgrades/pre_0_10_0/pubd_migration.rs index 17420e149..073f76dfc 100644 --- a/src/upgrades/pre_0_10_0/pubd_migration.rs +++ b/src/upgrades/pre_0_10_0/pubd_migration.rs @@ -24,7 +24,7 @@ pub struct PublicationServerRepositoryAccessMigration { } impl PublicationServerRepositoryAccessMigration { - pub fn upgrade(mode: UpgradeMode, config: &Config, versions: &UpgradeVersions) -> UpgradeResult<()> { + pub async fn upgrade(mode: UpgradeMode, config: &Config, versions: &UpgradeVersions) -> UpgradeResult<()> { let current_kv_store = KeyValueStore::create(&config.storage_uri, PUBSERVER_NS)?; let new_kv_store = KeyValueStore::create_upgrade_store(&config.storage_uri, PUBSERVER_NS)?; let new_agg_store = @@ -38,11 +38,12 @@ impl PublicationServerRepositoryAccessMigration { if store_migration .current_kv_store - .has_scope(&Scope::from_segment(SegmentBuf::parse_lossy("0")))? + .has_scope(&Scope::from_segment(SegmentBuf::parse_lossy("0"))) + .await? && versions.from >= KrillVersion::release(0, 9, 0) && versions.from < KrillVersion::candidate(0, 10, 0, 1) { - store_migration.upgrade(mode).map(|_| ()) // aspa configs are irrelevant here + store_migration.upgrade(mode).await.map(|_| ()) // aspa configs are irrelevant here } else { Ok(()) } diff --git a/src/upgrades/pre_0_14_0/cas_migration.rs b/src/upgrades/pre_0_14_0/cas_migration.rs index df365389f..eb4a42fd6 100644 --- a/src/upgrades/pre_0_14_0/cas_migration.rs +++ b/src/upgrades/pre_0_14_0/cas_migration.rs @@ -30,7 +30,7 @@ pub struct CasMigration { } impl CasMigration { - pub fn upgrade(mode: UpgradeMode, config: &Config) -> UpgradeResult { + pub async fn upgrade(mode: UpgradeMode, config: &Config) -> UpgradeResult { let current_kv_store = KeyValueStore::create(&config.storage_uri, CASERVER_NS)?; let new_kv_store = KeyValueStore::create_upgrade_store(&config.storage_uri, CASERVER_NS)?; @@ -46,6 +46,7 @@ impl CasMigration { new_agg_store, } .upgrade(mode) + .await } } diff --git a/src/upgrades/pre_0_14_0/mod.rs b/src/upgrades/pre_0_14_0/mod.rs index eb4190c5c..023e842bb 100644 --- a/src/upgrades/pre_0_14_0/mod.rs +++ b/src/upgrades/pre_0_14_0/mod.rs @@ -297,10 +297,14 @@ pub struct GenericUpgradeAggregateStore { } impl GenericUpgradeAggregateStore { - pub fn upgrade(name_space: &Namespace, mode: UpgradeMode, config: &Config) -> UpgradeResult { + pub async fn upgrade( + name_space: &Namespace, + mode: UpgradeMode, + config: &Config, + ) -> UpgradeResult { let current_kv_store = KeyValueStore::create(&config.storage_uri, name_space)?; - if current_kv_store.scopes()?.is_empty() { + if current_kv_store.scopes().await?.is_empty() { // nothing to do here Ok(AspaMigrationConfigs::default()) } else { @@ -315,7 +319,7 @@ impl GenericUpgradeAggregateStore { new_agg_store, }; - store_migration.upgrade(mode) + store_migration.upgrade(mode).await } } } @@ -473,37 +477,3 @@ pub struct Pre0_14_0AspaProvidersUpdate { added: Vec, removed: Vec, } - -// //------------ AspaObjectsUpdates ------------------------------------------ - -// #[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] -// pub struct Pre0_14_0AspaObjectsUpdates { -// #[serde(skip_serializing_if = "Vec::is_empty", default)] -// updated: Vec, - -// #[serde(skip_serializing_if = "Vec::is_empty", default)] -// removed: Vec, -// } - -// //------------ Pre0_14_0AspaInfo ------------------------------------------- - -// #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -// pub struct Pre0_14_0AspaInfo { -// // The customer ASN and all Provider ASNs -// definition: Pre0_14_0AspaDefinition, - -// // The validity time for this ASPA. -// validity: Validity, - -// // The serial number (needed for revocation) -// serial: Serial, - -// // The URI where this object is expected to be published -// uri: uri::Rsync, - -// // The actual ASPA object in base64 format. -// base64: Base64, - -// // The ASPA object's hash -// hash: Hash, -// }