From 0c0d554fc90d2609a44b4b075f4f4a257133990e Mon Sep 17 00:00:00 2001 From: Sergei Zaychenko Date: Wed, 18 Sep 2024 23:37:36 +0300 Subject: [PATCH] v0.201.0: Got rid of FlowTimeWheelService, and it's integration via Outbox * recording `FlowEventScheduledForActivation` event (previously, placement moment into the time wheel) * maintaining `schedule_for_activation_at time` field in denormalized flows Postgres/SQLite tables * using` schedule_for_activation_at` for queries of flows to process within the main flow executor loop * in-memory implementation replaced with mirrored lookup tables between FlowID and schedule_for_activation_at time. * extra unit tests for new operations in flow events store. --- CHANGELOG.md | 7 +- Cargo.lock | 260 ++++---- Cargo.toml | 132 ++-- LICENSE.txt | 4 +- .../20240917133430_flow_activation_time.sql | 7 + .../20240917133527_flow_activation_time.sql | 7 + resources/schema.gql | 6 + .../graphql/src/queries/flows/flow_event.rs | 35 + .../tests/test_gql_account_flow_configs.rs | 4 +- .../tests/tests/test_gql_dataset_flow_runs.rs | 90 +-- .../http/src/upload/upload_service_s3.rs | 7 +- src/app/cli/src/app.rs | 4 +- src/app/cli/src/services/gc_service.rs | 2 +- .../domain/src/aggregates/flow/flow.rs | 15 + .../domain/src/entities/flow/flow_event.rs | 18 + .../domain/src/entities/flow/flow_state.rs | 34 +- .../src/entities/shared/transform_rule.rs | 22 +- .../domain/src/flow_messages_types.rs | 14 +- .../domain/src/repos/flow/flow_event_store.rs | 9 + .../services/flow/flow_time_wheel_service.rs | 39 -- .../domain/src/services/flow/mod.rs | 2 - .../flow-system/services/src/dependencies.rs | 3 +- .../services/src/flow/flow_executor_impl.rs | 212 +++--- .../src/flow/flow_query_service_impl.rs | 6 +- ...ue_helper.rs => flow_scheduling_helper.rs} | 97 ++- .../src/flow/flow_time_wheel_service_impl.rs | 339 ---------- .../flow-system/services/src/flow/mod.rs | 6 +- .../src/messages/flow_message_consumers.rs | 3 - .../test_flow_configuration_service_impl.rs | 22 +- .../tests/tests/test_flow_executor_impl.rs | 626 +++++++++--------- .../tests/tests/utils/flow_harness_shared.rs | 13 +- .../tests/utils/flow_system_test_listener.rs | 2 +- .../core/src/repos/object_repository_s3.rs | 8 +- .../inmem/src/flow/inmem_flow_event_store.rs | 80 ++- .../tests/test_inmem_flow_event_store.rs | 27 + ...da4ff4638450031bf974ec57055d7755c8e81.json | 22 + ...9ed5ab8492ca105655daa6d0fd9beb02c7b1.json} | 5 +- ...42850899165f03d6bcd851884ac973a76587c.json | 20 + src/infra/flow-system/postgres/Cargo.toml | 1 + .../postgres/src/postgres_flow_event_store.rs | 71 +- .../tests/test_postgres_flow_event_store.rs | 27 + .../repo-tests/src/test_flow_event_store.rs | 398 ++++++++++- ...c74f18c4e37e81cb05d69181caaff1d6f45fc.json | 20 + ...0508f8fdc9f47e03ea87dd34214513b392a3c.json | 20 + ...92e604e9881d2c02cce177a7e03945c334018.json | 20 - ...9970c70bd270c7fc4a444039ef7699c7dd95e.json | 20 + src/infra/flow-system/sqlite/Cargo.toml | 1 + .../sqlite/src/sqlite_flow_event_store.rs | 75 ++- .../tests/test_sqlite_flow_event_store.rs | 27 + .../tests/tests/test_time_source.rs | 46 +- 50 files changed, 1677 insertions(+), 1258 deletions(-) create mode 100644 migrations/postgres/20240917133430_flow_activation_time.sql create mode 100644 migrations/sqlite/20240917133527_flow_activation_time.sql delete mode 100644 src/domain/flow-system/domain/src/services/flow/flow_time_wheel_service.rs rename src/domain/flow-system/services/src/flow/{flow_enqueue_helper.rs => flow_scheduling_helper.rs} (92%) delete mode 100644 src/domain/flow-system/services/src/flow/flow_time_wheel_service_impl.rs create mode 100644 src/infra/flow-system/postgres/.sqlx/query-03a5ca688456ac1c619ed1fba97da4ff4638450031bf974ec57055d7755c8e81.json rename src/infra/flow-system/postgres/.sqlx/{query-eb2fd83f0a62ed5546e4af19b9a6f9fe8019e7bc1b5dc7ef650af0ef9886aeb5.json => query-66f90f578e5c0d0e4b40fc058ffd9ed5ab8492ca105655daa6d0fd9beb02c7b1.json} (57%) create mode 100644 src/infra/flow-system/postgres/.sqlx/query-ed899492a3b7dc735cd0dda739a42850899165f03d6bcd851884ac973a76587c.json create mode 100644 src/infra/flow-system/sqlite/.sqlx/query-379da668e4d617c8bd2f384a45ac74f18c4e37e81cb05d69181caaff1d6f45fc.json create mode 100644 src/infra/flow-system/sqlite/.sqlx/query-7686af1119ee85b9019d157497b0508f8fdc9f47e03ea87dd34214513b392a3c.json delete mode 100644 src/infra/flow-system/sqlite/.sqlx/query-c7b1895e06e6920f7868b251c0e92e604e9881d2c02cce177a7e03945c334018.json create mode 100644 src/infra/flow-system/sqlite/.sqlx/query-deeb6fe445c44ad051ad615751e9970c70bd270c7fc4a444039ef7699c7dd95e.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b817544e1..ade65606ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 -## [Unreleased] +## [0.201.0] - 2024-09-18 ### Added - REST API: New `/verify` endpoint allows verification of query commitment as per [documentation](https://docs.kamu.dev/node/commitments/#dispute-resolution) (#831) ### Changed @@ -19,6 +19,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - consumption jobs invoke consumers and detect their failures - Detecting concurrent modifications in flow and task event stores - Improved and cleaned handling of flow abortions at different stages of processing +- Revised implementation of flow scheduling to avoid in-memory time wheel: + - recording `FlowEventScheduledForActivation` event (previously, placement moment into the time wheel) + - replaced binary heap based time wheel operations with event store queries + - Postgres/SQlite event stores additionally track activation time for the waiting flows + - in-memory event store keeps prepared map-based lookup structures for activation time ## [0.200.0] - 2024-09-13 ### Added diff --git a/Cargo.lock b/Cargo.lock index 18b9a3563d..e2ce7287fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -135,9 +135,9 @@ dependencies = [ [[package]] name = "alloy-chains" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b4f201b0ac8f81315fbdc55269965a8ddadbc04ab47fa65a1a468f9a40f7a5f" +checksum = "b68b94c159bcc2ca5f758b8663d7b00fc7c5e40569984595ddf2221b0f7f7f6e" dependencies = [ "num_enum", "strum 0.26.3", @@ -576,7 +576,7 @@ dependencies = [ "alloy-transport", "futures", "http 1.1.0", - "rustls 0.23.12", + "rustls 0.23.13", "serde_json", "tokio", "tokio-tungstenite 0.23.1", @@ -656,9 +656,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.87" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "approx" @@ -807,9 +807,9 @@ dependencies = [ [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" @@ -1997,9 +1997,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" dependencies = [ "serde", ] @@ -2103,9 +2103,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.18" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -2237,9 +2237,9 @@ dependencies = [ [[package]] name = "clap_complete" -version = "4.5.26" +version = "4.5.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "205d5ef6d485fa47606b98b0ddc4ead26eb850aaa86abfb562a94fb3280ecba0" +checksum = "9b378c786d3bde9442d2c6dd7e6080b2a818db2b96e30d6e7f1b6d224eb617d3" dependencies = [ "clap", ] @@ -2375,7 +2375,7 @@ checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" [[package]] name = "container-runtime" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "cfg-if", @@ -2692,9 +2692,9 @@ dependencies = [ [[package]] name = "curl-sys" -version = "0.4.74+curl-8.9.0" +version = "0.4.75+curl-8.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8af10b986114528fcdc4b63b6f5f021b7057618411046a4de2ba0f0149a097bf" +checksum = "2a4fd752d337342e4314717c0d9b6586b059a120c80029ebe4d49b11fec7875e" dependencies = [ "cc", "libc", @@ -2814,7 +2814,7 @@ checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "database-common" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "aws-config", @@ -2838,7 +2838,7 @@ dependencies = [ [[package]] name = "database-common-macros" -version = "0.200.0" +version = "0.201.0" dependencies = [ "quote", "syn 2.0.77", @@ -3581,11 +3581,11 @@ checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" [[package]] name = "enum-as-inner" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.77", @@ -3593,7 +3593,7 @@ dependencies = [ [[package]] name = "enum-variants" -version = "0.200.0" +version = "0.201.0" [[package]] name = "env_filter" @@ -3666,7 +3666,7 @@ dependencies = [ [[package]] name = "event-sourcing" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-stream", "async-trait", @@ -3682,7 +3682,7 @@ dependencies = [ [[package]] name = "event-sourcing-macros" -version = "0.200.0" +version = "0.201.0" dependencies = [ "quote", "syn 2.0.77", @@ -4346,7 +4346,7 @@ dependencies = [ [[package]] name = "http-common" -version = "0.200.0" +version = "0.201.0" dependencies = [ "axum", "http 0.2.12", @@ -4462,7 +4462,7 @@ dependencies = [ "http 1.1.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", @@ -4484,9 +4484,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-channel", @@ -4504,9 +4504,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -4670,7 +4670,7 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "internal-error" -version = "0.200.0" +version = "0.201.0" dependencies = [ "thiserror", ] @@ -4827,7 +4827,7 @@ dependencies = [ [[package]] name = "kamu" -version = "0.200.0" +version = "0.201.0" dependencies = [ "alloy", "async-recursion", @@ -4914,7 +4914,7 @@ dependencies = [ [[package]] name = "kamu-accounts" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "base32", @@ -4940,7 +4940,7 @@ dependencies = [ [[package]] name = "kamu-accounts-inmem" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -4961,7 +4961,7 @@ dependencies = [ [[package]] name = "kamu-accounts-mysql" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -4982,7 +4982,7 @@ dependencies = [ [[package]] name = "kamu-accounts-postgres" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -5003,7 +5003,7 @@ dependencies = [ [[package]] name = "kamu-accounts-repo-tests" -version = "0.200.0" +version = "0.201.0" dependencies = [ "argon2", "chrono", @@ -5019,7 +5019,7 @@ dependencies = [ [[package]] name = "kamu-accounts-services" -version = "0.200.0" +version = "0.201.0" dependencies = [ "argon2", "async-trait", @@ -5045,7 +5045,7 @@ dependencies = [ [[package]] name = "kamu-accounts-sqlite" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -5066,7 +5066,7 @@ dependencies = [ [[package]] name = "kamu-adapter-auth-oso" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "dill", @@ -5088,7 +5088,7 @@ dependencies = [ [[package]] name = "kamu-adapter-flight-sql" -version = "0.200.0" +version = "0.201.0" dependencies = [ "arrow-flight", "async-trait", @@ -5111,7 +5111,7 @@ dependencies = [ [[package]] name = "kamu-adapter-graphql" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-graphql", "async-trait", @@ -5161,7 +5161,7 @@ dependencies = [ [[package]] name = "kamu-adapter-http" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "aws-sdk-s3", @@ -5225,7 +5225,7 @@ dependencies = [ [[package]] name = "kamu-adapter-oauth" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -5244,7 +5244,7 @@ dependencies = [ [[package]] name = "kamu-adapter-odata" -version = "0.200.0" +version = "0.201.0" dependencies = [ "axum", "chrono", @@ -5279,7 +5279,7 @@ dependencies = [ [[package]] name = "kamu-auth-rebac" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "internal-error", @@ -5291,7 +5291,7 @@ dependencies = [ [[package]] name = "kamu-auth-rebac-inmem" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "database-common-macros", @@ -5305,7 +5305,7 @@ dependencies = [ [[package]] name = "kamu-auth-rebac-repo-tests" -version = "0.200.0" +version = "0.201.0" dependencies = [ "dill", "kamu-auth-rebac", @@ -5314,7 +5314,7 @@ dependencies = [ [[package]] name = "kamu-auth-rebac-services" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "dill", @@ -5333,7 +5333,7 @@ dependencies = [ [[package]] name = "kamu-auth-rebac-sqlite" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "database-common", @@ -5350,7 +5350,7 @@ dependencies = [ [[package]] name = "kamu-cli" -version = "0.200.0" +version = "0.201.0" dependencies = [ "arrow-flight", "async-graphql", @@ -5468,7 +5468,7 @@ dependencies = [ [[package]] name = "kamu-cli-e2e-common" -version = "0.200.0" +version = "0.201.0" dependencies = [ "chrono", "indoc 2.0.5", @@ -5488,7 +5488,7 @@ dependencies = [ [[package]] name = "kamu-cli-e2e-common-macros" -version = "0.200.0" +version = "0.201.0" dependencies = [ "quote", "syn 2.0.77", @@ -5496,7 +5496,7 @@ dependencies = [ [[package]] name = "kamu-cli-e2e-inmem" -version = "0.200.0" +version = "0.201.0" dependencies = [ "indoc 2.0.5", "kamu-cli-e2e-common", @@ -5509,7 +5509,7 @@ dependencies = [ [[package]] name = "kamu-cli-e2e-mysql" -version = "0.200.0" +version = "0.201.0" dependencies = [ "indoc 2.0.5", "kamu-cli-e2e-common", @@ -5523,7 +5523,7 @@ dependencies = [ [[package]] name = "kamu-cli-e2e-postgres" -version = "0.200.0" +version = "0.201.0" dependencies = [ "indoc 2.0.5", "kamu-cli-e2e-common", @@ -5537,7 +5537,7 @@ dependencies = [ [[package]] name = "kamu-cli-e2e-repo-tests" -version = "0.200.0" +version = "0.201.0" dependencies = [ "chrono", "indoc 2.0.5", @@ -5553,7 +5553,7 @@ dependencies = [ [[package]] name = "kamu-cli-e2e-sqlite" -version = "0.200.0" +version = "0.201.0" dependencies = [ "indoc 2.0.5", "kamu-cli-e2e-common", @@ -5567,7 +5567,7 @@ dependencies = [ [[package]] name = "kamu-cli-puppet" -version = "0.200.0" +version = "0.201.0" dependencies = [ "assert_cmd", "async-trait", @@ -5583,7 +5583,7 @@ dependencies = [ [[package]] name = "kamu-core" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-stream", "async-trait", @@ -5613,7 +5613,7 @@ dependencies = [ [[package]] name = "kamu-data-utils" -version = "0.200.0" +version = "0.201.0" dependencies = [ "arrow", "arrow-digest", @@ -5638,7 +5638,7 @@ dependencies = [ [[package]] name = "kamu-datafusion-cli" -version = "0.200.0" +version = "0.201.0" dependencies = [ "arrow", "async-trait", @@ -5660,7 +5660,7 @@ dependencies = [ [[package]] name = "kamu-datasets" -version = "0.200.0" +version = "0.201.0" dependencies = [ "aes-gcm", "async-trait", @@ -5679,7 +5679,7 @@ dependencies = [ [[package]] name = "kamu-datasets-inmem" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -5702,7 +5702,7 @@ dependencies = [ [[package]] name = "kamu-datasets-postgres" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -5724,7 +5724,7 @@ dependencies = [ [[package]] name = "kamu-datasets-repo-tests" -version = "0.200.0" +version = "0.201.0" dependencies = [ "chrono", "database-common", @@ -5738,7 +5738,7 @@ dependencies = [ [[package]] name = "kamu-datasets-services" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -5759,7 +5759,7 @@ dependencies = [ [[package]] name = "kamu-datasets-sqlite" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -5782,7 +5782,7 @@ dependencies = [ [[package]] name = "kamu-flow-system" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -5811,7 +5811,7 @@ dependencies = [ [[package]] name = "kamu-flow-system-inmem" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-stream", "async-trait", @@ -5841,7 +5841,7 @@ dependencies = [ [[package]] name = "kamu-flow-system-postgres" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-stream", "async-trait", @@ -5861,11 +5861,12 @@ dependencies = [ "test-log", "tokio", "tokio-stream", + "tracing", ] [[package]] name = "kamu-flow-system-repo-tests" -version = "0.200.0" +version = "0.201.0" dependencies = [ "chrono", "database-common", @@ -5878,7 +5879,7 @@ dependencies = [ [[package]] name = "kamu-flow-system-services" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-stream", "async-trait", @@ -5920,7 +5921,7 @@ dependencies = [ [[package]] name = "kamu-flow-system-sqlite" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-stream", "async-trait", @@ -5940,11 +5941,12 @@ dependencies = [ "test-log", "tokio", "tokio-stream", + "tracing", ] [[package]] name = "kamu-ingest-datafusion" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -5981,7 +5983,7 @@ dependencies = [ [[package]] name = "kamu-messaging-outbox-inmem" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -6000,7 +6002,7 @@ dependencies = [ [[package]] name = "kamu-messaging-outbox-postgres" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-stream", "async-trait", @@ -6023,7 +6025,7 @@ dependencies = [ [[package]] name = "kamu-messaging-outbox-repo-tests" -version = "0.200.0" +version = "0.201.0" dependencies = [ "chrono", "database-common", @@ -6037,7 +6039,7 @@ dependencies = [ [[package]] name = "kamu-messaging-outbox-sqlite" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-stream", "async-trait", @@ -6059,7 +6061,7 @@ dependencies = [ [[package]] name = "kamu-repo-tools" -version = "0.200.0" +version = "0.201.0" dependencies = [ "chrono", "clap", @@ -6074,7 +6076,7 @@ dependencies = [ [[package]] name = "kamu-task-system" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -6092,7 +6094,7 @@ dependencies = [ [[package]] name = "kamu-task-system-inmem" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -6111,7 +6113,7 @@ dependencies = [ [[package]] name = "kamu-task-system-postgres" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-stream", "async-trait", @@ -6134,7 +6136,7 @@ dependencies = [ [[package]] name = "kamu-task-system-repo-tests" -version = "0.200.0" +version = "0.201.0" dependencies = [ "chrono", "database-common", @@ -6146,7 +6148,7 @@ dependencies = [ [[package]] name = "kamu-task-system-services" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-stream", "async-trait", @@ -6173,7 +6175,7 @@ dependencies = [ [[package]] name = "kamu-task-system-sqlite" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-stream", "async-trait", @@ -6205,9 +6207,9 @@ dependencies = [ [[package]] name = "keccak-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422fbc7ff2f2f5bdffeb07718e5a5324dca72b0c9293d50df4026652385e3314" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" dependencies = [ "digest 0.10.7", "sha3-asm", @@ -6532,9 +6534,9 @@ checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memmap2" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322" +checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" dependencies = [ "libc", ] @@ -6563,7 +6565,7 @@ dependencies = [ [[package]] name = "messaging-outbox" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -6705,7 +6707,7 @@ dependencies = [ [[package]] name = "multiformats" -version = "0.200.0" +version = "0.201.0" dependencies = [ "base64 0.22.1", "bs58", @@ -7010,7 +7012,7 @@ dependencies = [ [[package]] name = "observability" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "axum", @@ -7055,7 +7057,7 @@ checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "opendatafabric" -version = "0.200.0" +version = "0.201.0" dependencies = [ "arrow", "base64 0.22.1", @@ -7732,9 +7734,9 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" dependencies = [ "diff", "yansi", @@ -7781,7 +7783,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.22.20", + "toml_edit 0.22.21", ] [[package]] @@ -7910,7 +7912,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.12", + "rustls 0.23.13", "socket2", "thiserror", "tokio", @@ -7927,7 +7929,7 @@ dependencies = [ "rand", "ring", "rustc-hash", - "rustls 0.23.12", + "rustls 0.23.13", "slab", "thiserror", "tinyvec", @@ -8013,7 +8015,7 @@ dependencies = [ [[package]] name = "random-names" -version = "0.200.0" +version = "0.201.0" dependencies = [ "rand", ] @@ -8052,9 +8054,9 @@ checksum = "2f178674da3d005db760b30d6735a989d692da37b86337daec6f2e311223d608" [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" dependencies = [ "bitflags 2.6.0", ] @@ -8184,7 +8186,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-native-certs 0.7.3", "rustls-pemfile 2.1.3", "rustls-pki-types", @@ -8428,9 +8430,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.36" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f55e80d50763938498dd5ebb18647174e0c76dc38c5505294bb224624f30f36" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -8453,14 +8455,14 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -8540,9 +8542,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.7" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring", "rustls-pki-types", @@ -8869,9 +8871,9 @@ dependencies = [ [[package]] name = "sha3-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d79b758b7cb2085612b11a235055e485605a5103faccdd633f35bd7aee69dd" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" dependencies = [ "cc", "cfg-if", @@ -9137,7 +9139,7 @@ dependencies = [ "once_cell", "paste", "percent-encoding", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pemfile 2.1.3", "serde", "serde_json", @@ -9652,7 +9654,7 @@ dependencies = [ [[package]] name = "time-source" -version = "0.200.0" +version = "0.201.0" dependencies = [ "async-trait", "chrono", @@ -9761,7 +9763,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", "tokio", ] @@ -9801,7 +9803,7 @@ checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd" dependencies = [ "futures-util", "log", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -9832,7 +9834,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.20", + "toml_edit 0.22.21", ] [[package]] @@ -9857,9 +9859,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.20" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ "indexmap 2.5.0", "serde", @@ -10055,7 +10057,7 @@ dependencies = [ [[package]] name = "tracing-perfetto" -version = "0.200.0" +version = "0.201.0" dependencies = [ "conv", "serde", @@ -10181,7 +10183,7 @@ dependencies = [ "httparse", "log", "rand", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", "sha1", "thiserror", @@ -10245,15 +10247,15 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] @@ -10266,9 +10268,9 @@ checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" @@ -10951,9 +10953,9 @@ dependencies = [ [[package]] name = "yansi" -version = "0.5.1" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "zerocopy" diff --git a/Cargo.toml b/Cargo.toml index 59498dc822..e1c18510ca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -89,92 +89,92 @@ resolver = "2" [workspace.dependencies] # Apps -kamu-cli = { version = "0.200.0", path = "src/app/cli", default-features = false } +kamu-cli = { version = "0.201.0", path = "src/app/cli", default-features = false } # Utils -container-runtime = { version = "0.200.0", path = "src/utils/container-runtime", default-features = false } -database-common = { version = "0.200.0", path = "src/utils/database-common", default-features = false } -database-common-macros = { version = "0.200.0", path = "src/utils/database-common-macros", default-features = false } -enum-variants = { version = "0.200.0", path = "src/utils/enum-variants", default-features = false } -event-sourcing = { version = "0.200.0", path = "src/utils/event-sourcing", default-features = false } -event-sourcing-macros = { version = "0.200.0", path = "src/utils/event-sourcing-macros", default-features = false } -http-common = { version = "0.200.0", path = "src/utils/http-common", default-features = false } -internal-error = { version = "0.200.0", path = "src/utils/internal-error", default-features = false } -kamu-cli-puppet = { version = "0.200.0", path = "src/utils/kamu-cli-puppet", default-features = false } -kamu-data-utils = { version = "0.200.0", path = "src/utils/data-utils", default-features = false } -kamu-datafusion-cli = { version = "0.200.0", path = "src/utils/datafusion-cli", default-features = false } -messaging-outbox = { version = "0.200.0", path = "src/utils/messaging-outbox", default-features = false } -multiformats = { version = "0.200.0", path = "src/utils/multiformats", default-features = false } -observability = { version = "0.200.0", path = "src/utils/observability", default-features = false } -random-names = { version = "0.200.0", path = "src/utils/random-names", default-features = false } -time-source = { version = "0.200.0", path = "src/utils/time-source", default-features = false } -tracing-perfetto = { version = "0.200.0", path = "src/utils/tracing-perfetto", default-features = false } +container-runtime = { version = "0.201.0", path = "src/utils/container-runtime", default-features = false } +database-common = { version = "0.201.0", path = "src/utils/database-common", default-features = false } +database-common-macros = { version = "0.201.0", path = "src/utils/database-common-macros", default-features = false } +enum-variants = { version = "0.201.0", path = "src/utils/enum-variants", default-features = false } +event-sourcing = { version = "0.201.0", path = "src/utils/event-sourcing", default-features = false } +event-sourcing-macros = { version = "0.201.0", path = "src/utils/event-sourcing-macros", default-features = false } +http-common = { version = "0.201.0", path = "src/utils/http-common", default-features = false } +internal-error = { version = "0.201.0", path = "src/utils/internal-error", default-features = false } +kamu-cli-puppet = { version = "0.201.0", path = "src/utils/kamu-cli-puppet", default-features = false } +kamu-data-utils = { version = "0.201.0", path = "src/utils/data-utils", default-features = false } +kamu-datafusion-cli = { version = "0.201.0", path = "src/utils/datafusion-cli", default-features = false } +messaging-outbox = { version = "0.201.0", path = "src/utils/messaging-outbox", default-features = false } +multiformats = { version = "0.201.0", path = "src/utils/multiformats", default-features = false } +observability = { version = "0.201.0", path = "src/utils/observability", default-features = false } +random-names = { version = "0.201.0", path = "src/utils/random-names", default-features = false } +time-source = { version = "0.201.0", path = "src/utils/time-source", default-features = false } +tracing-perfetto = { version = "0.201.0", path = "src/utils/tracing-perfetto", default-features = false } # Domain -kamu-accounts = { version = "0.200.0", path = "src/domain/accounts/domain", default-features = false } -kamu-auth-rebac = { version = "0.200.0", path = "src/domain/auth-rebac/domain", default-features = false } -kamu-core = { version = "0.200.0", path = "src/domain/core", default-features = false } -kamu-datasets = { version = "0.200.0", path = "src/domain/datasets/domain", default-features = false } -kamu-flow-system = { version = "0.200.0", path = "src/domain/flow-system/domain", default-features = false } -kamu-task-system = { version = "0.200.0", path = "src/domain/task-system/domain", default-features = false } -opendatafabric = { version = "0.200.0", path = "src/domain/opendatafabric", default-features = false } +kamu-accounts = { version = "0.201.0", path = "src/domain/accounts/domain", default-features = false } +kamu-auth-rebac = { version = "0.201.0", path = "src/domain/auth-rebac/domain", default-features = false } +kamu-core = { version = "0.201.0", path = "src/domain/core", default-features = false } +kamu-datasets = { version = "0.201.0", path = "src/domain/datasets/domain", default-features = false } +kamu-flow-system = { version = "0.201.0", path = "src/domain/flow-system/domain", default-features = false } +kamu-task-system = { version = "0.201.0", path = "src/domain/task-system/domain", default-features = false } +opendatafabric = { version = "0.201.0", path = "src/domain/opendatafabric", default-features = false } # Domain service layer -kamu-accounts-services = { version = "0.200.0", path = "src/domain/accounts/services", default-features = false } -kamu-auth-rebac-services = { version = "0.200.0", path = "src/domain/auth-rebac/services", default-features = false } -kamu-datasets-services = { version = "0.200.0", path = "src/domain/datasets/services", default-features = false } -kamu-flow-system-services = { version = "0.200.0", path = "src/domain/flow-system/services", default-features = false } -kamu-task-system-services = { version = "0.200.0", path = "src/domain/task-system/services", default-features = false } +kamu-accounts-services = { version = "0.201.0", path = "src/domain/accounts/services", default-features = false } +kamu-auth-rebac-services = { version = "0.201.0", path = "src/domain/auth-rebac/services", default-features = false } +kamu-datasets-services = { version = "0.201.0", path = "src/domain/datasets/services", default-features = false } +kamu-flow-system-services = { version = "0.201.0", path = "src/domain/flow-system/services", default-features = false } +kamu-task-system-services = { version = "0.201.0", path = "src/domain/task-system/services", default-features = false } # Infra -kamu = { version = "0.200.0", path = "src/infra/core", default-features = false } -kamu-ingest-datafusion = { version = "0.200.0", path = "src/infra/ingest-datafusion", default-features = false } +kamu = { version = "0.201.0", path = "src/infra/core", default-features = false } +kamu-ingest-datafusion = { version = "0.201.0", path = "src/infra/ingest-datafusion", default-features = false } ## Flow System -kamu-flow-system-repo-tests = { version = "0.200.0", path = "src/infra/flow-system/repo-tests", default-features = false } -kamu-flow-system-inmem = { version = "0.200.0", path = "src/infra/flow-system/inmem", default-features = false } -kamu-flow-system-postgres = { version = "0.200.0", path = "src/infra/flow-system/postgres", default-features = false } -kamu-flow-system-sqlite = { version = "0.200.0", path = "src/infra/flow-system/sqlite", default-features = false } +kamu-flow-system-repo-tests = { version = "0.201.0", path = "src/infra/flow-system/repo-tests", default-features = false } +kamu-flow-system-inmem = { version = "0.201.0", path = "src/infra/flow-system/inmem", default-features = false } +kamu-flow-system-postgres = { version = "0.201.0", path = "src/infra/flow-system/postgres", default-features = false } +kamu-flow-system-sqlite = { version = "0.201.0", path = "src/infra/flow-system/sqlite", default-features = false } ## Accounts -kamu-accounts-inmem = { version = "0.200.0", path = "src/infra/accounts/inmem", default-features = false } -kamu-accounts-mysql = { version = "0.200.0", path = "src/infra/accounts/mysql", default-features = false } -kamu-accounts-postgres = { version = "0.200.0", path = "src/infra/accounts/postgres", default-features = false } -kamu-accounts-sqlite = { version = "0.200.0", path = "src/infra/accounts/sqlite", default-features = false } -kamu-accounts-repo-tests = { version = "0.200.0", path = "src/infra/accounts/repo-tests", default-features = false } +kamu-accounts-inmem = { version = "0.201.0", path = "src/infra/accounts/inmem", default-features = false } +kamu-accounts-mysql = { version = "0.201.0", path = "src/infra/accounts/mysql", default-features = false } +kamu-accounts-postgres = { version = "0.201.0", path = "src/infra/accounts/postgres", default-features = false } +kamu-accounts-sqlite = { version = "0.201.0", path = "src/infra/accounts/sqlite", default-features = false } +kamu-accounts-repo-tests = { version = "0.201.0", path = "src/infra/accounts/repo-tests", default-features = false } ## Datasets -kamu-datasets-inmem = { version = "0.200.0", path = "src/infra/datasets/inmem", default-features = false } -kamu-datasets-postgres = { version = "0.200.0", path = "src/infra/datasets/postgres", default-features = false } -kamu-datasets-sqlite = { version = "0.200.0", path = "src/infra/datasets/sqlite", default-features = false } -kamu-datasets-repo-tests = { version = "0.200.0", path = "src/infra/datasets/repo-tests", default-features = false } +kamu-datasets-inmem = { version = "0.201.0", path = "src/infra/datasets/inmem", default-features = false } +kamu-datasets-postgres = { version = "0.201.0", path = "src/infra/datasets/postgres", default-features = false } +kamu-datasets-sqlite = { version = "0.201.0", path = "src/infra/datasets/sqlite", default-features = false } +kamu-datasets-repo-tests = { version = "0.201.0", path = "src/infra/datasets/repo-tests", default-features = false } ## Task System -kamu-task-system-inmem = { version = "0.200.0", path = "src/infra/task-system/inmem", default-features = false } -kamu-task-system-postgres = { version = "0.200.0", path = "src/infra/task-system/postgres", default-features = false } -kamu-task-system-sqlite = { version = "0.200.0", path = "src/infra/task-system/sqlite", default-features = false } -kamu-task-system-repo-tests = { version = "0.200.0", path = "src/infra/task-system/repo-tests", default-features = false } +kamu-task-system-inmem = { version = "0.201.0", path = "src/infra/task-system/inmem", default-features = false } +kamu-task-system-postgres = { version = "0.201.0", path = "src/infra/task-system/postgres", default-features = false } +kamu-task-system-sqlite = { version = "0.201.0", path = "src/infra/task-system/sqlite", default-features = false } +kamu-task-system-repo-tests = { version = "0.201.0", path = "src/infra/task-system/repo-tests", default-features = false } ## ReBAC -kamu-auth-rebac-inmem = { version = "0.200.0", path = "src/infra/auth-rebac/inmem", default-features = false } -kamu-auth-rebac-repo-tests = { version = "0.200.0", path = "src/infra/auth-rebac/repo-tests", default-features = false } -kamu-auth-rebac-sqlite = { version = "0.200.0", path = "src/infra/auth-rebac/sqlite", default-features = false } +kamu-auth-rebac-inmem = { version = "0.201.0", path = "src/infra/auth-rebac/inmem", default-features = false } +kamu-auth-rebac-repo-tests = { version = "0.201.0", path = "src/infra/auth-rebac/repo-tests", default-features = false } +kamu-auth-rebac-sqlite = { version = "0.201.0", path = "src/infra/auth-rebac/sqlite", default-features = false } ## Outbox -kamu-messaging-outbox-inmem = { version = "0.200.0", path = "src/infra/messaging-outbox/inmem", default-features = false } -kamu-messaging-outbox-postgres = { version = "0.200.0", path = "src/infra/messaging-outbox/postgres", default-features = false } -kamu-messaging-outbox-sqlite = { version = "0.200.0", path = "src/infra/messaging-outbox/sqlite", default-features = false } -kamu-messaging-outbox-repo-tests = { version = "0.200.0", path = "src/infra/messaging-outbox/repo-tests", default-features = false } +kamu-messaging-outbox-inmem = { version = "0.201.0", path = "src/infra/messaging-outbox/inmem", default-features = false } +kamu-messaging-outbox-postgres = { version = "0.201.0", path = "src/infra/messaging-outbox/postgres", default-features = false } +kamu-messaging-outbox-sqlite = { version = "0.201.0", path = "src/infra/messaging-outbox/sqlite", default-features = false } +kamu-messaging-outbox-repo-tests = { version = "0.201.0", path = "src/infra/messaging-outbox/repo-tests", default-features = false } # Adapters -kamu-adapter-auth-oso = { version = "0.200.0", path = "src/adapter/auth-oso", default-features = false } -kamu-adapter-flight-sql = { version = "0.200.0", path = "src/adapter/flight-sql", default-features = false } -kamu-adapter-graphql = { version = "0.200.0", path = "src/adapter/graphql", default-features = false } -kamu-adapter-http = { version = "0.200.0", path = "src/adapter/http", default-features = false } -kamu-adapter-odata = { version = "0.200.0", path = "src/adapter/odata", default-features = false } -kamu-adapter-oauth = { version = "0.200.0", path = "src/adapter/oauth", default-features = false } +kamu-adapter-auth-oso = { version = "0.201.0", path = "src/adapter/auth-oso", default-features = false } +kamu-adapter-flight-sql = { version = "0.201.0", path = "src/adapter/flight-sql", default-features = false } +kamu-adapter-graphql = { version = "0.201.0", path = "src/adapter/graphql", default-features = false } +kamu-adapter-http = { version = "0.201.0", path = "src/adapter/http", default-features = false } +kamu-adapter-odata = { version = "0.201.0", path = "src/adapter/odata", default-features = false } +kamu-adapter-oauth = { version = "0.201.0", path = "src/adapter/oauth", default-features = false } # E2E -kamu-cli-e2e-common = { version = "0.200.0", path = "src/e2e/app/cli/common", default-features = false } -kamu-cli-e2e-common-macros = { version = "0.200.0", path = "src/e2e/app/cli/common-macros", default-features = false } -kamu-cli-e2e-repo-tests = { version = "0.200.0", path = "src/e2e/app/cli/repo-tests", default-features = false } +kamu-cli-e2e-common = { version = "0.201.0", path = "src/e2e/app/cli/common", default-features = false } +kamu-cli-e2e-common-macros = { version = "0.201.0", path = "src/e2e/app/cli/common-macros", default-features = false } +kamu-cli-e2e-repo-tests = { version = "0.201.0", path = "src/e2e/app/cli/repo-tests", default-features = false } [workspace.package] -version = "0.200.0" +version = "0.201.0" edition = "2021" homepage = "https://github.com/kamu-data/kamu-cli" repository = "https://github.com/kamu-data/kamu-cli" diff --git a/LICENSE.txt b/LICENSE.txt index 75b771f25f..388a72f191 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -11,7 +11,7 @@ Business Source License 1.1 Licensor: Kamu Data, Inc. -Licensed Work: Kamu CLI Version 0.200.0 +Licensed Work: Kamu CLI Version 0.201.0 The Licensed Work is © 2023 Kamu Data, Inc. Additional Use Grant: You may use the Licensed Work for any purpose, @@ -24,7 +24,7 @@ Additional Use Grant: You may use the Licensed Work for any purpose, Licensed Work where data or transformations are controlled by such third parties. -Change Date: 2028-09-13 +Change Date: 2028-09-18 Change License: Apache License, Version 2.0 diff --git a/migrations/postgres/20240917133430_flow_activation_time.sql b/migrations/postgres/20240917133430_flow_activation_time.sql new file mode 100644 index 0000000000..2cd513f244 --- /dev/null +++ b/migrations/postgres/20240917133430_flow_activation_time.sql @@ -0,0 +1,7 @@ + +/* ------------------------------ */ + +ALTER TABLE flows + ADD COLUMN scheduled_for_activation_at TIMESTAMPTZ; + +/* ------------------------------ */ diff --git a/migrations/sqlite/20240917133527_flow_activation_time.sql b/migrations/sqlite/20240917133527_flow_activation_time.sql new file mode 100644 index 0000000000..2cd513f244 --- /dev/null +++ b/migrations/sqlite/20240917133527_flow_activation_time.sql @@ -0,0 +1,7 @@ + +/* ------------------------------ */ + +ALTER TABLE flows + ADD COLUMN scheduled_for_activation_at TIMESTAMPTZ; + +/* ------------------------------ */ diff --git a/resources/schema.gql b/resources/schema.gql index e167b597a2..4c6c3b3b5b 100644 --- a/resources/schema.gql +++ b/resources/schema.gql @@ -1080,6 +1080,12 @@ type FlowEventInitiated implements FlowEvent { trigger: FlowTrigger! } +type FlowEventScheduledForActivation implements FlowEvent { + eventId: EventID! + eventTime: DateTime! + scheduledForActivationAt: DateTime! +} + type FlowEventStartConditionUpdated implements FlowEvent { eventId: EventID! eventTime: DateTime! diff --git a/src/adapter/graphql/src/queries/flows/flow_event.rs b/src/adapter/graphql/src/queries/flows/flow_event.rs index eb6e6cfc0f..8f0e8775a7 100644 --- a/src/adapter/graphql/src/queries/flows/flow_event.rs +++ b/src/adapter/graphql/src/queries/flows/flow_event.rs @@ -27,6 +27,8 @@ pub enum FlowEvent { Initiated(FlowEventInitiated), /// Start condition defined StartConditionUpdated(FlowEventStartConditionUpdated), + /// Flow scheduled for activation + ScheduledForActivation(FlowEventScheduledForActivation), /// Secondary trigger added TriggerAdded(FlowEventTriggerAdded), /// Associated task has changed status @@ -62,6 +64,13 @@ impl FlowEvent { fs::FlowEvent::TriggerAdded(e) => { Self::TriggerAdded(FlowEventTriggerAdded::build(event_id, e, ctx).await?) } + fs::FlowEvent::ScheduledForActivation(e) => { + Self::ScheduledForActivation(FlowEventScheduledForActivation::new( + event_id, + e.event_time, + e.scheduled_for_activation_at, + )) + } fs::FlowEvent::TaskScheduled(e) => Self::TaskChanged(FlowEventTaskChanged::new( event_id, e.event_time, @@ -156,6 +165,32 @@ impl FlowEventTriggerAdded { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +#[derive(SimpleObject)] +#[graphql(complex)] +pub struct FlowEventScheduledForActivation { + event_id: EventID, + event_time: DateTime, + scheduled_for_activation_at: DateTime, +} + +#[ComplexObject] +impl FlowEventScheduledForActivation { + #[graphql(skip)] + fn new( + event_id: evs::EventID, + event_time: DateTime, + scheduled_for_activation_at: DateTime, + ) -> Self { + Self { + event_id: event_id.into(), + event_time, + scheduled_for_activation_at, + } + } +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + #[derive(SimpleObject)] #[graphql(complex)] pub struct FlowEventTaskChanged { diff --git a/src/adapter/graphql/tests/tests/test_gql_account_flow_configs.rs b/src/adapter/graphql/tests/tests/test_gql_account_flow_configs.rs index 878b2dc791..431aedd8b8 100644 --- a/src/adapter/graphql/tests/tests/test_gql_account_flow_configs.rs +++ b/src/adapter/graphql/tests/tests/test_gql_account_flow_configs.rs @@ -680,8 +680,8 @@ impl FlowConfigHarness { .add::() .add::() .add_value(FlowExecutorConfig::new( - Duration::try_seconds(1).unwrap(), - Duration::try_minutes(1).unwrap(), + Duration::seconds(1), + Duration::minutes(1), )) .add::() .add::() diff --git a/src/adapter/graphql/tests/tests/test_gql_dataset_flow_runs.rs b/src/adapter/graphql/tests/tests/test_gql_dataset_flow_runs.rs index 5de916d135..947ffb3eb7 100644 --- a/src/adapter/graphql/tests/tests/test_gql_dataset_flow_runs.rs +++ b/src/adapter/graphql/tests/tests/test_gql_dataset_flow_runs.rs @@ -193,9 +193,7 @@ async fn test_trigger_ingest_root_dataset() { }) ); - let schedule_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let schedule_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); let flow_task_id = harness.mimic_flow_scheduled("0", schedule_time).await; let flow_task_metadata = TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]); @@ -269,9 +267,7 @@ async fn test_trigger_ingest_root_dataset() { }) ); - let running_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let running_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); harness .mimic_task_running(flow_task_id, flow_task_metadata.clone(), running_time) .await; @@ -343,9 +339,7 @@ async fn test_trigger_ingest_root_dataset() { }) ); - let complete_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let complete_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); harness .mimic_task_completed( flow_task_id, @@ -499,22 +493,16 @@ async fn test_trigger_reset_root_dataset_flow() { }) ); - let schedule_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let schedule_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); let flow_task_id = harness.mimic_flow_scheduled("0", schedule_time).await; let flow_task_metadata = TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]); - let running_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let running_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); harness .mimic_task_running(flow_task_id, flow_task_metadata.clone(), running_time) .await; - let complete_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let complete_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); harness .mimic_task_completed( flow_task_id, @@ -823,22 +811,16 @@ async fn test_trigger_execute_transform_derived_dataset() { }) ); - let schedule_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let schedule_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); let flow_task_id = harness.mimic_flow_scheduled("0", schedule_time).await; let flow_task_metadata = TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]); - let running_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let running_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); harness .mimic_task_running(flow_task_id, flow_task_metadata.clone(), running_time) .await; - let complete_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let complete_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); harness .mimic_task_completed( flow_task_id, @@ -1047,9 +1029,7 @@ async fn test_trigger_compaction_root_dataset() { }) ); - let schedule_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let schedule_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); let flow_task_id = harness.mimic_flow_scheduled("0", schedule_time).await; let flow_task_metadata = TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]); @@ -1123,9 +1103,7 @@ async fn test_trigger_compaction_root_dataset() { }) ); - let running_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let running_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); harness .mimic_task_running(flow_task_id, flow_task_metadata.clone(), running_time) .await; @@ -1197,9 +1175,7 @@ async fn test_trigger_compaction_root_dataset() { }) ); - let complete_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let complete_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); let new_head = Multihash::from_digest_sha3_256(b"new-slice"); harness @@ -2634,22 +2610,26 @@ async fn test_history_of_completed_flow() { } }, { - "__typename": "FlowEventTriggerAdded", + "__typename": "FlowEventScheduledForActivation", "eventId": "2", + }, + { + "__typename": "FlowEventTriggerAdded", + "eventId": "3", "trigger": { "__typename": "FlowTriggerAutoPolling" } }, { "__typename": "FlowEventStartConditionUpdated", - "eventId": "3", + "eventId": "4", "startCondition": { "__typename" : "FlowStartConditionExecutor" } }, { "__typename": "FlowEventTaskChanged", - "eventId": "4", + "eventId": "5", "taskId": "0", "taskStatus": "QUEUED", "task": { @@ -2658,7 +2638,7 @@ async fn test_history_of_completed_flow() { }, { "__typename": "FlowEventTaskChanged", - "eventId": "5", + "eventId": "6", "taskId": "0", "taskStatus": "RUNNING", "task": { @@ -2667,7 +2647,7 @@ async fn test_history_of_completed_flow() { }, { "__typename": "FlowEventTaskChanged", - "eventId": "6", + "eventId": "7", "taskId": "0", "taskStatus": "FINISHED", "task": { @@ -2746,21 +2726,15 @@ async fn test_execute_transfrom_flow_error_after_compaction() { } }) ); - let schedule_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let schedule_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); let flow_task_id = harness.mimic_flow_scheduled("0", schedule_time).await; let flow_task_metadata = TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]); - let running_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let running_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); harness .mimic_task_running(flow_task_id, flow_task_metadata.clone(), running_time) .await; - let complete_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let complete_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); let new_head = Multihash::from_digest_sha3_256(b"new-slice"); harness @@ -2899,21 +2873,15 @@ async fn test_execute_transfrom_flow_error_after_compaction() { }) ); - let schedule_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let schedule_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); let flow_task_id = harness.mimic_flow_scheduled("1", schedule_time).await; let flow_task_metadata = TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]); - let running_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let running_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); harness .mimic_task_running(flow_task_id, flow_task_metadata.clone(), running_time) .await; - let complete_time = Utc::now() - .duration_round(Duration::try_seconds(1).unwrap()) - .unwrap(); + let complete_time = Utc::now().duration_round(Duration::seconds(1)).unwrap(); harness .mimic_task_completed( flow_task_id, @@ -3228,8 +3196,8 @@ impl FlowRunsHarness { .add::() .add::() .add_value(FlowExecutorConfig::new( - Duration::try_seconds(1).unwrap(), - Duration::try_minutes(1).unwrap(), + Duration::seconds(1), + Duration::minutes(1), )) .add::() .add::() diff --git a/src/adapter/http/src/upload/upload_service_s3.rs b/src/adapter/http/src/upload/upload_service_s3.rs index 246ff12950..00d585cc1a 100644 --- a/src/adapter/http/src/upload/upload_service_s3.rs +++ b/src/adapter/http/src/upload/upload_service_s3.rs @@ -70,12 +70,7 @@ impl UploadService for UploadServiceS3 { let file_key = self.make_file_key(account_id, &upload_id, &file_name); let presigned_conf = PresigningConfig::builder() - .expires_in( - chrono::Duration::try_seconds(3600) - .unwrap() - .to_std() - .unwrap(), - ) + .expires_in(chrono::Duration::seconds(3600).to_std().unwrap()) .build() .expect("Invalid presigning config"); diff --git a/src/app/cli/src/app.rs b/src/app/cli/src/app.rs index 050163e15b..8bc22d6822 100644 --- a/src/app/cli/src/app.rs +++ b/src/app/cli/src/app.rs @@ -408,8 +408,8 @@ pub fn configure_base_catalog( kamu_task_system_services::register_dependencies(&mut b); b.add_value(kamu_flow_system_inmem::domain::FlowExecutorConfig::new( - chrono::Duration::try_seconds(1).unwrap(), - chrono::Duration::try_minutes(1).unwrap(), + chrono::Duration::seconds(1), + chrono::Duration::minutes(1), )); kamu_flow_system_services::register_dependencies(&mut b); diff --git a/src/app/cli/src/services/gc_service.rs b/src/app/cli/src/services/gc_service.rs index b56c2b2a62..29d0e4e982 100644 --- a/src/app/cli/src/services/gc_service.rs +++ b/src/app/cli/src/services/gc_service.rs @@ -58,7 +58,7 @@ impl GcService { pub fn evict_cache(&self) -> Result { // TODO: Make const after https://github.com/chronotope/chrono/issues/309 // Or make into a config option - let eviction_threshold: Duration = Duration::try_hours(24).unwrap(); + let eviction_threshold: Duration = Duration::hours(24); let now = Utc::now(); let mut entries_freed = 0; let mut bytes_freed = 0; diff --git a/src/domain/flow-system/domain/src/aggregates/flow/flow.rs b/src/domain/flow-system/domain/src/aggregates/flow/flow.rs index 192f30290d..7ff9da2d00 100644 --- a/src/domain/flow-system/domain/src/aggregates/flow/flow.rs +++ b/src/domain/flow-system/domain/src/aggregates/flow/flow.rs @@ -80,6 +80,21 @@ impl Flow { } } + /// Indicate flow is scheduled for activation at particular time + pub fn schedule_for_activation( + &mut self, + now: DateTime, + scheduled_for_activation_at: DateTime, + ) -> Result<(), ProjectionError> { + let event = FlowEventScheduledForActivation { + event_time: now, + flow_id: self.flow_id, + scheduled_for_activation_at, + }; + self.apply(event)?; + Ok(()) + } + /// Attaches a scheduled task pub fn on_task_scheduled( &mut self, diff --git a/src/domain/flow-system/domain/src/entities/flow/flow_event.rs b/src/domain/flow-system/domain/src/entities/flow/flow_event.rs index 022e91c643..9881a59116 100644 --- a/src/domain/flow-system/domain/src/entities/flow/flow_event.rs +++ b/src/domain/flow-system/domain/src/entities/flow/flow_event.rs @@ -24,6 +24,8 @@ pub enum FlowEvent { StartConditionUpdated(FlowEventStartConditionUpdated), /// Secondary trigger added TriggerAdded(FlowEventTriggerAdded), + /// Scheduled for activation at a particular time + ScheduledForActivation(FlowEventScheduledForActivation), /// Scheduled/Rescheduled a task TaskScheduled(FlowEventTaskScheduled), /// Task running @@ -40,6 +42,7 @@ impl FlowEvent { FlowEvent::Initiated(_) => "FlowEventInitiated", FlowEvent::StartConditionUpdated(_) => "FlowEventStartConditionUpdated", FlowEvent::TriggerAdded(_) => "FlowEventTriggerAdded", + FlowEvent::ScheduledForActivation(_) => "FlowEventScheduledForActivation", FlowEvent::TaskScheduled(_) => "FlowEventTaskScheduled", FlowEvent::TaskRunning(_) => "FlowEventTaskRunning", FlowEvent::TaskFinished(_) => "FlowEventTaskFinished", @@ -80,6 +83,15 @@ pub struct FlowEventTriggerAdded { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct FlowEventScheduledForActivation { + pub event_time: DateTime, + pub flow_id: FlowID, + pub scheduled_for_activation_at: DateTime, +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct FlowEventTaskScheduled { pub event_time: DateTime, @@ -122,6 +134,7 @@ impl FlowEvent { FlowEvent::Initiated(e) => e.flow_id, FlowEvent::StartConditionUpdated(e) => e.flow_id, FlowEvent::TriggerAdded(e) => e.flow_id, + FlowEvent::ScheduledForActivation(e) => e.flow_id, FlowEvent::TaskScheduled(e) => e.flow_id, FlowEvent::TaskRunning(e) => e.flow_id, FlowEvent::TaskFinished(e) => e.flow_id, @@ -134,6 +147,7 @@ impl FlowEvent { FlowEvent::Initiated(e) => e.event_time, FlowEvent::StartConditionUpdated(e) => e.event_time, FlowEvent::TriggerAdded(e) => e.event_time, + FlowEvent::ScheduledForActivation(e) => e.event_time, FlowEvent::TaskScheduled(e) => e.event_time, FlowEvent::TaskRunning(e) => e.event_time, FlowEvent::TaskFinished(e) => e.event_time, @@ -146,6 +160,7 @@ impl FlowEvent { FlowEvent::Initiated(_) => Some(FlowStatus::Waiting), FlowEvent::StartConditionUpdated(_) | FlowEvent::TriggerAdded(_) + | FlowEvent::ScheduledForActivation(_) | FlowEvent::TaskScheduled(_) => None, FlowEvent::TaskRunning(_) => Some(FlowStatus::Running), FlowEvent::TaskFinished(_) | FlowEvent::Aborted(_) => Some(FlowStatus::Finished), @@ -160,6 +175,9 @@ impl_enum_variant!(FlowEvent::StartConditionUpdated( FlowEventStartConditionUpdated )); impl_enum_variant!(FlowEvent::TriggerAdded(FlowEventTriggerAdded)); +impl_enum_variant!(FlowEvent::ScheduledForActivation( + FlowEventScheduledForActivation +)); impl_enum_variant!(FlowEvent::TaskScheduled(FlowEventTaskScheduled)); impl_enum_variant!(FlowEvent::TaskRunning(FlowEventTaskRunning)); impl_enum_variant!(FlowEvent::TaskFinished(FlowEventTaskFinished)); diff --git a/src/domain/flow-system/domain/src/entities/flow/flow_state.rs b/src/domain/flow-system/domain/src/entities/flow/flow_state.rs index 4bb2d9b95f..0923052e58 100644 --- a/src/domain/flow-system/domain/src/entities/flow/flow_state.rs +++ b/src/domain/flow-system/domain/src/entities/flow/flow_state.rs @@ -37,6 +37,8 @@ pub struct FlowState { #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct FlowTimingRecords { + /// Flow scheduled and will be activated at time + pub scheduled_for_activation_at: Option>, /// Task scheduled and waiting for execution since time pub awaiting_executor_since: Option>, /// Started running at time @@ -63,17 +65,6 @@ impl FlowState { } } - // Extract wakeup time - pub fn wake_up_at(&self) -> Option> { - if self.status() == FlowStatus::Waiting { - if let Some(start_condition) = self.start_condition.as_ref() { - return start_condition.wake_up_at(); - } - } - - None - } - pub fn try_result_as_ref(&self) -> Option<&FlowResult> { self.outcome .as_ref() @@ -106,6 +97,7 @@ impl Projection for FlowState { triggers: vec![trigger], start_condition: None, timing: FlowTimingRecords { + scheduled_for_activation_at: None, awaiting_executor_since: None, running_since: None, finished_at: None, @@ -125,7 +117,7 @@ impl Projection for FlowState { start_condition, .. }) => { - if s.outcome.is_some() || s.timing.running_since.is_some() { + if s.outcome.is_some() || s.timing.awaiting_executor_since.is_some() { Err(ProjectionError::new(Some(s), event)) } else { Ok(FlowState { @@ -143,12 +135,28 @@ impl Projection for FlowState { Ok(FlowState { triggers, ..s }) } } + E::ScheduledForActivation(FlowEventScheduledForActivation { + scheduled_for_activation_at, + .. + }) => { + if s.outcome.is_some() || s.timing.awaiting_executor_since.is_some() { + Err(ProjectionError::new(Some(s), event)) + } else { + Ok(FlowState { + timing: FlowTimingRecords { + scheduled_for_activation_at: Some(scheduled_for_activation_at), + ..s.timing + }, + ..s + }) + } + } E::TaskScheduled(FlowEventTaskScheduled { event_time, task_id, .. }) => { - if s.outcome.is_some() { + if s.outcome.is_some() || s.timing.scheduled_for_activation_at.is_none() { Err(ProjectionError::new(Some(s), event)) } else { let mut task_ids = s.task_ids; diff --git a/src/domain/flow-system/domain/src/entities/shared/transform_rule.rs b/src/domain/flow-system/domain/src/entities/shared/transform_rule.rs index 83f0224d1a..e4d5cec0b7 100644 --- a/src/domain/flow-system/domain/src/entities/shared/transform_rule.rs +++ b/src/domain/flow-system/domain/src/entities/shared/transform_rule.rs @@ -32,12 +32,12 @@ impl TransformRule { return Err(TransformRuleValidationError::MinRecordsToAwaitNotPositive); } - let lower_interval_bound = Duration::try_seconds(0).unwrap(); + let lower_interval_bound = Duration::seconds(0); if lower_interval_bound >= max_batching_interval { return Err(TransformRuleValidationError::MinIntervalNotPositive); } - let upper_interval_bound = Duration::try_hours(Self::MAX_BATCHING_INTERVAL_HOURS).unwrap(); + let upper_interval_bound = Duration::hours(Self::MAX_BATCHING_INTERVAL_HOURS); if max_batching_interval > upper_interval_bound { return Err(TransformRuleValidationError::MaxIntervalAboveLimit); } @@ -88,24 +88,18 @@ mod tests { #[test] fn test_good_transform_rule() { + assert_matches!(TransformRule::new_checked(1, TimeDelta::minutes(15)), Ok(_)); assert_matches!( - TransformRule::new_checked(1, TimeDelta::try_minutes(15).unwrap()), - Ok(_) - ); - assert_matches!( - TransformRule::new_checked(1_000_000, TimeDelta::try_hours(3).unwrap()), - Ok(_) - ); - assert_matches!( - TransformRule::new_checked(1, TimeDelta::try_hours(24).unwrap()), + TransformRule::new_checked(1_000_000, TimeDelta::hours(3)), Ok(_) ); + assert_matches!(TransformRule::new_checked(1, TimeDelta::hours(24)), Ok(_)); } #[test] fn test_non_positive_min_records() { assert_matches!( - TransformRule::new_checked(0, TimeDelta::try_minutes(15).unwrap()), + TransformRule::new_checked(0, TimeDelta::minutes(15)), Err(TransformRuleValidationError::MinRecordsToAwaitNotPositive) ); } @@ -113,11 +107,11 @@ mod tests { #[test] fn test_non_positive_max_interval() { assert_matches!( - TransformRule::new_checked(1, TimeDelta::try_minutes(0).unwrap()), + TransformRule::new_checked(1, TimeDelta::minutes(0)), Err(TransformRuleValidationError::MinIntervalNotPositive) ); assert_matches!( - TransformRule::new_checked(1, TimeDelta::try_minutes(-1).unwrap()), + TransformRule::new_checked(1, TimeDelta::minutes(-1)), Err(TransformRuleValidationError::MinIntervalNotPositive) ); } diff --git a/src/domain/flow-system/domain/src/flow_messages_types.rs b/src/domain/flow-system/domain/src/flow_messages_types.rs index 79dfa10148..553e2e0f69 100644 --- a/src/domain/flow-system/domain/src/flow_messages_types.rs +++ b/src/domain/flow-system/domain/src/flow_messages_types.rs @@ -47,7 +47,7 @@ pub enum FlowExecutorUpdateDetails { #[derive(Debug, Clone, Serialize, Deserialize)] pub enum FlowProgressMessage { - Enqueued(FlowProgressMessageEnqueued), + Scheduled(FlowProgressMessageScheduled), Running(FlowProgressMessageRunning), Finished(FlowProgressMessageFinished), Cancelled(FlowProgressMessageCancelled), @@ -56,15 +56,15 @@ pub enum FlowProgressMessage { impl Message for FlowProgressMessage {} impl FlowProgressMessage { - pub fn enqueued( + pub fn scheduled( event_time: DateTime, flow_id: FlowID, - activate_at: DateTime, + scheduled_for_activation_at: DateTime, ) -> Self { - Self::Enqueued(FlowProgressMessageEnqueued { + Self::Scheduled(FlowProgressMessageScheduled { event_time, flow_id, - activate_at, + scheduled_for_activation_at, }) } @@ -92,10 +92,10 @@ impl FlowProgressMessage { } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct FlowProgressMessageEnqueued { +pub struct FlowProgressMessageScheduled { pub event_time: DateTime, pub flow_id: FlowID, - pub activate_at: DateTime, + pub scheduled_for_activation_at: DateTime, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] diff --git a/src/domain/flow-system/domain/src/repos/flow/flow_event_store.rs b/src/domain/flow-system/domain/src/repos/flow/flow_event_store.rs index 31253f672a..70be43e09f 100644 --- a/src/domain/flow-system/domain/src/repos/flow/flow_event_store.rs +++ b/src/domain/flow-system/domain/src/repos/flow/flow_event_store.rs @@ -42,6 +42,15 @@ pub trait FlowEventStore: EventStore { flow_type: SystemFlowType, ) -> Result; + /// Returns nearest time when one or more flows are scheduled for activation + async fn nearest_flow_activation_moment(&self) -> Result>, InternalError>; + + /// Returns flows scheduled for activation at the given time + async fn get_flows_scheduled_for_activation_at( + &self, + scheduled_for_activation_at: DateTime, + ) -> Result, InternalError>; + /// Returns IDs of the flows associated with the specified /// dataset in reverse chronological order based on creation time. /// Applies filters/pagination, if specified diff --git a/src/domain/flow-system/domain/src/services/flow/flow_time_wheel_service.rs b/src/domain/flow-system/domain/src/services/flow/flow_time_wheel_service.rs deleted file mode 100644 index 8dda3067b0..0000000000 --- a/src/domain/flow-system/domain/src/services/flow/flow_time_wheel_service.rs +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright Kamu Data, Inc. and contributors. All rights reserved. -// -// Use of this software is governed by the Business Source License -// included in the LICENSE file. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0. - -use chrono::{DateTime, Utc}; -use thiserror::Error; - -use crate::FlowID; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -pub trait FlowTimeWheelService: Send + Sync { - fn nearest_activation_moment(&self) -> Option>; - - fn take_nearest_planned_flows(&self) -> Vec; - - fn get_planned_flow_activation_time(&self, flow_id: FlowID) -> Option>; -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -#[derive(Error, Debug)] -pub enum TimeWheelCancelActivationError { - #[error(transparent)] - FlowNotPlanned(TimeWheelFlowNotPlannedError), -} - -#[derive(Error, Debug)] -#[error("Flow '{flow_id}' not found planned in the time wheel")] -pub struct TimeWheelFlowNotPlannedError { - pub flow_id: FlowID, -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/domain/flow-system/domain/src/services/flow/mod.rs b/src/domain/flow-system/domain/src/services/flow/mod.rs index bccb42dc2e..ed3db92ff1 100644 --- a/src/domain/flow-system/domain/src/services/flow/mod.rs +++ b/src/domain/flow-system/domain/src/services/flow/mod.rs @@ -9,8 +9,6 @@ mod flow_query_service; mod flow_service_test_driver; -mod flow_time_wheel_service; pub use flow_query_service::*; pub use flow_service_test_driver::*; -pub use flow_time_wheel_service::*; diff --git a/src/domain/flow-system/services/src/dependencies.rs b/src/domain/flow-system/services/src/dependencies.rs index dfa020574f..da47b5d8a1 100644 --- a/src/domain/flow-system/services/src/dependencies.rs +++ b/src/domain/flow-system/services/src/dependencies.rs @@ -17,10 +17,9 @@ pub fn register_dependencies(catalog_builder: &mut CatalogBuilder) { catalog_builder.add::(); catalog_builder.add::(); catalog_builder.add::(); - catalog_builder.add::(); catalog_builder.add::(); - catalog_builder.add::(); + catalog_builder.add::(); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/domain/flow-system/services/src/flow/flow_executor_impl.rs b/src/domain/flow-system/services/src/flow/flow_executor_impl.rs index af0adba894..341c5f501e 100644 --- a/src/domain/flow-system/services/src/flow/flow_executor_impl.rs +++ b/src/domain/flow-system/services/src/flow/flow_executor_impl.rs @@ -32,7 +32,7 @@ use time_source::SystemTimeSource; use crate::{ FlowAbortHelper, - FlowEnqueueHelper, + FlowSchedulingHelper, MESSAGE_CONSUMER_KAMU_FLOW_EXECUTOR, MESSAGE_PRODUCER_KAMU_FLOW_CONFIGURATION_SERVICE, MESSAGE_PRODUCER_KAMU_FLOW_EXECUTOR, @@ -43,7 +43,6 @@ use crate::{ pub struct FlowExecutorImpl { catalog: Catalog, - flow_time_wheel_service: Arc, time_source: Arc, executor_config: Arc, } @@ -70,13 +69,11 @@ pub struct FlowExecutorImpl { impl FlowExecutorImpl { pub fn new( catalog: Catalog, - flow_time_wheel_service: Arc, time_source: Arc, executor_config: Arc, ) -> Self { Self { catalog, - flow_time_wheel_service, time_source, executor_config, } @@ -88,7 +85,7 @@ impl FlowExecutorImpl { start_time: DateTime, ) -> Result<(), InternalError> { // Recover already scheduled flows after server restart - self.recover_time_wheel(&transaction_catalog, start_time) + self.recover_waiting_flows(&transaction_catalog, start_time) .await?; // Restore auto polling flows: @@ -113,15 +110,14 @@ impl FlowExecutorImpl { } #[tracing::instrument(level = "debug", skip_all)] - async fn recover_time_wheel( + async fn recover_waiting_flows( &self, target_catalog: &Catalog, start_time: DateTime, ) -> Result<(), InternalError> { // Extract necessary dependencies let flow_event_store = target_catalog.get_one::().unwrap(); - let enqueue_helper = target_catalog.get_one::().unwrap(); - let outbox = target_catalog.get_one::().unwrap(); + let scheduling_helper = target_catalog.get_one::().unwrap(); // How many waiting flows do we have? let waiting_filters = AllFlowFilters { @@ -155,39 +151,18 @@ impl FlowExecutorImpl { .await .int_err()?; - // We are not interested in flows with scheduled tasks, - // as restoring these will be handled by TaskExecutor. - if let Some(start_condition) = &flow.start_condition { - // We have to recover wakeup for scheduled/throttling condition - if let Some(wakeup_time) = start_condition.wake_up_at() { - let mut activation_time = wakeup_time; - if activation_time < start_time { - activation_time = start_time; - } - outbox - .post_message( - MESSAGE_PRODUCER_KAMU_FLOW_PROGRESS_SERVICE, - FlowProgressMessage::enqueued( - start_time, - *waiting_flow_id, - activation_time, - ), - ) - .await?; - } - // and we also need to re-evaluate the batching condition - else if let FlowStartCondition::Batching(b) = start_condition { - enqueue_helper - .trigger_flow_common( - &flow.flow_key, - FlowTrigger::AutoPolling(FlowTriggerAutoPolling { - trigger_time: start_time, - }), - FlowTriggerContext::Batching(b.active_transform_rule), - None, - ) - .await?; - } + // We need to re-evaluate batching conditions only + if let Some(FlowStartCondition::Batching(b)) = &flow.start_condition { + scheduling_helper + .trigger_flow_common( + &flow.flow_key, + FlowTrigger::AutoPolling(FlowTriggerAutoPolling { + trigger_time: start_time, + }), + FlowTriggerContext::Batching(b.active_transform_rule), + None, + ) + .await?; } } @@ -219,7 +194,7 @@ impl FlowExecutorImpl { .into_iter() .partition(|config| matches!(config.rule, FlowConfigurationRule::Schedule(_))); - let enqueue_helper = target_catalog.get_one::().unwrap(); + let scheduling_helper = target_catalog.get_one::().unwrap(); // Activate all configs, ensuring schedule configs precedes non-schedule configs // (this i.e. forces all root datasets to be updated earlier than the derived) @@ -235,7 +210,7 @@ impl FlowExecutorImpl { .try_get_pending_flow(&enabled_config.flow_key) .await?; if maybe_pending_flow_id.is_none() { - enqueue_helper + scheduling_helper .activate_flow_configuration( start_time, enabled_config.flow_key, @@ -249,61 +224,78 @@ impl FlowExecutorImpl { } #[transactional_method] - async fn run_flows_current_timeslot( - &self, - timeslot_time: DateTime, - ) -> Result<(), InternalError> { - let planned_flow_ids: Vec<_> = self.flow_time_wheel_service.take_nearest_planned_flows(); + async fn run_flows_current_timeslot(&self) -> Result<(), InternalError> { + // Do we have a timeslot scheduled? + let flow_event_store = transaction_catalog.get_one::().unwrap(); + let maybe_nearest_flow_activation_moment = + flow_event_store.nearest_flow_activation_moment().await?; + + // Is it time to execute it yet? + let current_time = self.time_source.now(); + if let Some(nearest_flow_activation_moment) = maybe_nearest_flow_activation_moment + && nearest_flow_activation_moment <= current_time + { + let activation_span = tracing::info_span!("FlowExecutor::activation"); + let _ = activation_span.enter(); - let mut planned_task_futures = Vec::new(); - for planned_flow_id in planned_flow_ids { - let transaction_catalog = transaction_catalog.clone(); - let flow_event_store = transaction_catalog.get_one::().unwrap(); + let planned_flow_ids: Vec<_> = flow_event_store + .get_flows_scheduled_for_activation_at(nearest_flow_activation_moment) + .await?; - planned_task_futures.push(async move { - let mut flow = Flow::load(planned_flow_id, flow_event_store.as_ref()) - .await - .int_err()?; + let mut planned_task_futures = Vec::new(); + for planned_flow_id in planned_flow_ids { + let transaction_catalog = transaction_catalog.clone(); + let flow_event_store = flow_event_store.clone(); - if flow.can_schedule() { - self.schedule_flow_task(transaction_catalog, &mut flow, timeslot_time) - .await?; - } else { - tracing::warn!( - flow_id = %planned_flow_id, - flow_status = %flow.status(), - "Skipped flow scheduling as no longer relevant" - ); - } + planned_task_futures.push(async move { + let mut flow = Flow::load(planned_flow_id, flow_event_store.as_ref()) + .await + .int_err()?; - Ok(()) - }); - } + if flow.can_schedule() { + self.schedule_flow_task( + transaction_catalog, + &mut flow, + nearest_flow_activation_moment, + ) + .await?; + } else { + tracing::warn!( + flow_id = %planned_flow_id, + flow_status = %flow.status(), + "Skipped flow scheduling as no longer relevant" + ); + } - let results = futures::future::join_all(planned_task_futures).await; - results - .into_iter() - .filter(Result::is_err) - .map(|e| e.err().unwrap()) - .for_each(|e: InternalError| { - tracing::error!( - error = ?e, - error_msg = %e, - "Scheduling flow failed" - ); - }); + Ok(()) + }); + } - // Publish progress event - let outbox = transaction_catalog.get_one::().unwrap(); - outbox - .post_message( - MESSAGE_PRODUCER_KAMU_FLOW_EXECUTOR, - FlowExecutorUpdatedMessage { - update_time: timeslot_time, - update_details: FlowExecutorUpdateDetails::ExecutedTimeslot, - }, - ) - .await?; + let results = futures::future::join_all(planned_task_futures).await; + results + .into_iter() + .filter(Result::is_err) + .map(|e| e.err().unwrap()) + .for_each(|e: InternalError| { + tracing::error!( + error = ?e, + error_msg = %e, + "Scheduling flow failed" + ); + }); + + // Publish progress event + let outbox = transaction_catalog.get_one::().unwrap(); + outbox + .post_message( + MESSAGE_PRODUCER_KAMU_FLOW_EXECUTOR, + FlowExecutorUpdatedMessage { + update_time: nearest_flow_activation_moment, + update_details: FlowExecutorUpdateDetails::ExecutedTimeslot, + }, + ) + .await?; + } Ok(()) } @@ -444,23 +436,8 @@ impl FlowExecutor for FlowExecutorImpl { let tick_span = tracing::trace_span!("FlowExecutor::tick"); let _ = tick_span.enter(); - let current_time = self.time_source.now(); - - // Do we have a timeslot scheduled? - let maybe_nearest_activation_time = - self.flow_time_wheel_service.nearest_activation_moment(); - - // Is it time to execute it yet? - if let Some(nearest_activation_time) = maybe_nearest_activation_time - && nearest_activation_time <= current_time - { - let activation_span = tracing::info_span!("FlowExecutor::activation"); - let _ = activation_span.enter(); - - // Run scheduling for current time slot. Should not throw any errors - self.run_flows_current_timeslot(nearest_activation_time) - .await?; - } + // Run scheduling for current time slot + self.run_flows_current_timeslot().await?; self.time_source .sleep(self.executor_config.awaiting_step) @@ -563,7 +540,8 @@ impl MessageConsumerT for FlowExecutorImpl { .int_err()?; flow.save(flow_event_store.as_ref()).await.int_err()?; - let enqueue_helper = target_catalog.get_one::().unwrap(); + let scheduling_helper = + target_catalog.get_one::().unwrap(); let finish_time = self.executor_config.round_time(message.event_time)?; @@ -575,18 +553,18 @@ impl MessageConsumerT for FlowExecutorImpl { match flow.flow_key.get_type().success_followup_method() { FlowSuccessFollowupMethod::Ignore => {} FlowSuccessFollowupMethod::TriggerDependent => { - enqueue_helper - .enqueue_dependent_flows(finish_time, &flow, flow_result) + scheduling_helper + .schedule_dependent_flows(finish_time, &flow, flow_result) .await?; } } } // In case of success: - // - enqueue next auto-polling flow cycle + // - schedule next auto-polling flow cycle if message.outcome.is_success() { - enqueue_helper - .try_enqueue_scheduled_auto_polling_flow_if_enabled( + scheduling_helper + .try_schedule_auto_polling_flow_if_enabled( finish_time, &flow.flow_key, ) @@ -654,8 +632,8 @@ impl MessageConsumerT for FlowExecutorImpl { abort_helper.abort_flow(flow_id).await?; } } else { - let enqueue_helper = target_catalog.get_one::().unwrap(); - enqueue_helper + let scheduling_helper = target_catalog.get_one::().unwrap(); + scheduling_helper .activate_flow_configuration( self.executor_config.round_time(message.event_time)?, message.flow_key.clone(), diff --git a/src/domain/flow-system/services/src/flow/flow_query_service_impl.rs b/src/domain/flow-system/services/src/flow/flow_query_service_impl.rs index 899838fa11..20b0219667 100644 --- a/src/domain/flow-system/services/src/flow/flow_query_service_impl.rs +++ b/src/domain/flow-system/services/src/flow/flow_query_service_impl.rs @@ -20,7 +20,7 @@ use kamu_flow_system::*; use opendatafabric::{AccountID, DatasetID}; use super::FlowTriggerContext; -use crate::{FlowAbortHelper, FlowEnqueueHelper}; +use crate::{FlowAbortHelper, FlowSchedulingHelper}; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -285,8 +285,8 @@ impl FlowQueryService for FlowQueryServiceImpl { ) -> Result { let activation_time = self.executor_config.round_time(trigger_time)?; - let enqueue_helper = self.catalog.get_one::().unwrap(); - enqueue_helper + let scheduling_helper = self.catalog.get_one::().unwrap(); + scheduling_helper .trigger_flow_common( &flow_key, FlowTrigger::Manual(FlowTriggerManual { diff --git a/src/domain/flow-system/services/src/flow/flow_enqueue_helper.rs b/src/domain/flow-system/services/src/flow/flow_scheduling_helper.rs similarity index 92% rename from src/domain/flow-system/services/src/flow/flow_enqueue_helper.rs rename to src/domain/flow-system/services/src/flow/flow_scheduling_helper.rs index 059a4e64eb..dcbc7c77a7 100644 --- a/src/domain/flow-system/services/src/flow/flow_enqueue_helper.rs +++ b/src/domain/flow-system/services/src/flow/flow_scheduling_helper.rs @@ -22,8 +22,7 @@ use crate::{DownstreamDependencyTriggerType, MESSAGE_PRODUCER_KAMU_FLOW_PROGRESS //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -pub(crate) struct FlowEnqueueHelper { - flow_timewheel_service: Arc, +pub(crate) struct FlowSchedulingHelper { flow_event_store: Arc, flow_configuration_service: Arc, outbox: Arc, @@ -35,9 +34,8 @@ pub(crate) struct FlowEnqueueHelper { } #[component(pub)] -impl FlowEnqueueHelper { +impl FlowSchedulingHelper { pub(crate) fn new( - flow_timewheel_service: Arc, flow_event_store: Arc, flow_configuration_service: Arc, outbox: Arc, @@ -48,7 +46,6 @@ impl FlowEnqueueHelper { executor_config: Arc, ) -> Self { Self { - flow_timewheel_service, flow_event_store, flow_configuration_service, outbox, @@ -72,11 +69,11 @@ impl FlowEnqueueHelper { FlowKey::Dataset(_) => { match &rule { FlowConfigurationRule::TransformRule(_) => { - self.enqueue_auto_polling_flow_unconditionally(start_time, &flow_key) + self.schedule_auto_polling_flow_unconditionally(start_time, &flow_key) .await?; } FlowConfigurationRule::IngestRule(ingest_rule) => { - self.enqueue_scheduled_auto_polling_flow( + self.schedule_auto_polling_flow( start_time, &flow_key, &ingest_rule.schedule_condition, @@ -93,7 +90,7 @@ impl FlowEnqueueHelper { } FlowKey::System(_) => { if let FlowConfigurationRule::Schedule(schedule) = &rule { - self.enqueue_scheduled_auto_polling_flow(start_time, &flow_key, schedule) + self.schedule_auto_polling_flow(start_time, &flow_key, schedule) .await?; } else { unimplemented!( @@ -106,7 +103,7 @@ impl FlowEnqueueHelper { Ok(()) } - pub(crate) async fn try_enqueue_scheduled_auto_polling_flow_if_enabled( + pub(crate) async fn try_schedule_auto_polling_flow_if_enabled( &self, start_time: DateTime, flow_key: &FlowKey, @@ -118,7 +115,7 @@ impl FlowEnqueueHelper { .int_err()?; if let Some(active_schedule) = maybe_active_schedule { - self.enqueue_scheduled_auto_polling_flow(start_time, flow_key, &active_schedule) + self.schedule_auto_polling_flow(start_time, flow_key, &active_schedule) .await?; } @@ -126,7 +123,7 @@ impl FlowEnqueueHelper { } #[tracing::instrument(level = "trace", skip_all, fields(?flow.flow_key, %flow.flow_id))] - pub(crate) async fn enqueue_dependent_flows( + pub(crate) async fn schedule_dependent_flows( &self, input_success_time: DateTime, flow: &Flow, @@ -284,7 +281,7 @@ impl FlowEnqueueHelper { } } - pub(crate) async fn enqueue_scheduled_auto_polling_flow( + pub(crate) async fn schedule_auto_polling_flow( &self, start_time: DateTime, flow_key: &FlowKey, @@ -303,7 +300,7 @@ impl FlowEnqueueHelper { .await } - pub(crate) async fn enqueue_auto_polling_flow_unconditionally( + pub(crate) async fn schedule_auto_polling_flow_unconditionally( &self, start_time: DateTime, flow_key: &FlowKey, @@ -372,19 +369,14 @@ impl FlowEnqueueHelper { } FlowTriggerContext::Scheduled(_) | FlowTriggerContext::Unconditional => { // Evaluate throttling condition: is new time earlier than planned? - let maybe_planned_time = - self.find_planned_flow_activation_time(flow.flow_id); - // In case of batching condition and manual trigger, // there is no planned time, but otherwise compare - if maybe_planned_time.is_none() - || maybe_planned_time + if flow.timing.scheduled_for_activation_at.is_none() + || flow + .timing + .scheduled_for_activation_at .is_some_and(|planned_time| throttling_boundary_time < planned_time) { - // If so, enqueue the flow earlier - self.enqueue_flow(flow.flow_id, throttling_boundary_time) - .await?; - // Indicate throttling, if applied if throttling_boundary_time > trigger_time { self.indicate_throttling_activity( @@ -393,6 +385,10 @@ impl FlowEnqueueHelper { trigger_time, )?; } + + // Schedule the flow earlier than previously planned + self.schedule_flow_for_activation(&mut flow, throttling_boundary_time) + .await?; } } } @@ -401,7 +397,7 @@ impl FlowEnqueueHelper { Ok(flow.into()) } - // Otherwise, initiate a new flow, and enqueue it in the time wheel + // Otherwise, initiate a new flow and schedule it for activation None => { // Initiate new flow let config_snapshot_maybe = if config_snapshot_maybe.is_some() { @@ -442,8 +438,6 @@ impl FlowEnqueueHelper { // Apply throttling boundary let next_activation_time = std::cmp::max(throttling_boundary_time, naive_next_activation_time); - self.enqueue_flow(flow.flow_id, next_activation_time) - .await?; // Set throttling activity as start condition if throttling_boundary_time > naive_next_activation_time { @@ -462,13 +456,15 @@ impl FlowEnqueueHelper { ) .int_err()?; } + + // Schedule flow for the decided moment + self.schedule_flow_for_activation(&mut flow, next_activation_time) + .await?; } FlowTriggerContext::Unconditional => { // Apply throttling boundary let next_activation_time = std::cmp::max(throttling_boundary_time, trigger_time); - self.enqueue_flow(flow.flow_id, next_activation_time) - .await?; // Set throttling activity as start condition if throttling_boundary_time > trigger_time { @@ -478,6 +474,10 @@ impl FlowEnqueueHelper { trigger_time, )?; } + + // Schedule flow for the decided moment + self.schedule_flow_for_activation(&mut flow, next_activation_time) + .await?; } } @@ -578,19 +578,6 @@ impl FlowEnqueueHelper { batching_deadline }; - // Throttling boundary correction - let corrected_finish_time = - std::cmp::max(batching_finish_time, throttling_boundary_time); - - let should_activate = match self.find_planned_flow_activation_time(flow.flow_id) { - Some(activation_time) => activation_time > corrected_finish_time, - None => true, - }; - if should_activate { - self.enqueue_flow(flow.flow_id, corrected_finish_time) - .await?; - } - // If batching is over, it's start condition is no longer valid. // However, set throttling condition, if it applies if (satisfied || is_compacted) && throttling_boundary_time > batching_finish_time { @@ -600,6 +587,21 @@ impl FlowEnqueueHelper { batching_finish_time, )?; } + + // Throttling boundary correction + let corrected_finish_time = + std::cmp::max(batching_finish_time, throttling_boundary_time); + + let should_activate = match flow.timing.scheduled_for_activation_at { + Some(scheduled_for_activation_at) => { + scheduled_for_activation_at > corrected_finish_time + } + None => true, + }; + if should_activate { + self.schedule_flow_for_activation(flow, corrected_finish_time) + .await?; + } } Ok(()) @@ -627,12 +629,6 @@ impl FlowEnqueueHelper { self.flow_event_store.try_get_pending_flow(flow_key).await } - #[inline] - fn find_planned_flow_activation_time(&self, flow_id: FlowID) -> Option> { - self.flow_timewheel_service - .get_planned_flow_activation_time(flow_id) - } - async fn make_new_flow( &self, flow_event_store: &dyn FlowEventStore, @@ -668,15 +664,18 @@ impl FlowEnqueueHelper { } } - async fn enqueue_flow( + async fn schedule_flow_for_activation( &self, - flow_id: FlowID, + flow: &mut Flow, activate_at: DateTime, ) -> Result<(), InternalError> { + flow.schedule_for_activation(self.time_source.now(), activate_at) + .int_err()?; + self.outbox .post_message( MESSAGE_PRODUCER_KAMU_FLOW_PROGRESS_SERVICE, - FlowProgressMessage::enqueued(self.time_source.now(), flow_id, activate_at), + FlowProgressMessage::scheduled(self.time_source.now(), flow.flow_id, activate_at), ) .await } diff --git a/src/domain/flow-system/services/src/flow/flow_time_wheel_service_impl.rs b/src/domain/flow-system/services/src/flow/flow_time_wheel_service_impl.rs deleted file mode 100644 index 6e9b993e02..0000000000 --- a/src/domain/flow-system/services/src/flow/flow_time_wheel_service_impl.rs +++ /dev/null @@ -1,339 +0,0 @@ -// Copyright Kamu Data, Inc. and contributors. All rights reserved. -// -// Use of this software is governed by the Business Source License -// included in the LICENSE file. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0. - -use std::cmp::Reverse; -use std::collections::{BinaryHeap, HashMap}; -use std::sync::{Arc, Mutex}; - -use chrono::{DateTime, Utc}; -use dill::{component, interface, meta, scope, Catalog, Singleton}; -use internal_error::{InternalError, ResultIntoInternal}; -use kamu_flow_system::{ - FlowID, - FlowProgressMessage, - FlowTimeWheelService, - TimeWheelCancelActivationError, - TimeWheelFlowNotPlannedError, -}; -use messaging_outbox::{ - MessageConsumer, - MessageConsumerMeta, - MessageConsumerT, - MessageConsumptionDurability, -}; - -use crate::{ - MESSAGE_CONSUMER_KAMU_FLOW_TIME_WHEEL_SERVICE, - MESSAGE_PRODUCER_KAMU_FLOW_PROGRESS_SERVICE, -}; - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -pub struct FlowTimeWheelServiceImpl { - state: Arc>, -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -#[derive(Default)] -struct State { - flow_heap: BinaryHeap>, - flow_activation_times_by_id: HashMap>, -} - -impl State { - fn is_flow_activation_planned_at( - &self, - flow_id: FlowID, - activation_moment: DateTime, - ) -> bool { - self.flow_activation_times_by_id - .get(&flow_id) - .is_some_and(|flow_activation_time| *flow_activation_time == activation_moment) - } - - fn clean_top_cancellations(&mut self) { - while let Some(ar) = self.flow_heap.peek() { - if self.is_flow_activation_planned_at(ar.0.flow_id, ar.0.activation_time) { - break; - } - - self.flow_heap.pop(); - } - } - - fn plan_flow(&mut self, flow_record: FlowRecord) { - self.flow_activation_times_by_id - .insert(flow_record.flow_id, flow_record.activation_time); - - self.flow_heap.push(Reverse(flow_record)); - } - - fn unplan_flow(&mut self, flow_id: FlowID) { - self.flow_activation_times_by_id.remove(&flow_id); - self.clean_top_cancellations(); - } -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -// TODO: assign a score, and use it as an ordering criteria for the tasks within -// the same activation time -#[derive(PartialEq, Eq, PartialOrd, Ord)] -struct FlowRecord { - pub activation_time: DateTime, - pub flow_id: FlowID, -} - -impl FlowRecord { - fn new(activation_time: DateTime, flow_id: FlowID) -> Self { - Self { - activation_time, - flow_id, - } - } -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -#[component(pub)] -#[interface(dyn FlowTimeWheelService)] -#[interface(dyn MessageConsumer)] -#[interface(dyn MessageConsumerT)] -#[meta(MessageConsumerMeta { - consumer_name: MESSAGE_CONSUMER_KAMU_FLOW_TIME_WHEEL_SERVICE, - feeding_producers: &[ - MESSAGE_PRODUCER_KAMU_FLOW_PROGRESS_SERVICE - ], - durability: MessageConsumptionDurability::Durable, -})] -#[scope(Singleton)] -impl FlowTimeWheelServiceImpl { - pub fn new() -> Self { - Self { - state: Arc::new(Mutex::new(State::default())), - } - } - - fn activate_at(&self, activation_time: DateTime, flow_id: FlowID) { - let mut guard = self.state.lock().unwrap(); - - match guard.flow_activation_times_by_id.get(&flow_id) { - Some(earlier_activation_time) => { - if activation_time < *earlier_activation_time { - guard.unplan_flow(flow_id); - guard.plan_flow(FlowRecord::new(activation_time, flow_id)); - } - } - None => { - guard.plan_flow(FlowRecord::new(activation_time, flow_id)); - } - } - } - - fn cancel_flow_activation( - &self, - flow_id: FlowID, - ) -> Result<(), TimeWheelCancelActivationError> { - let mut guard = self.state.lock().unwrap(); - - if guard.flow_activation_times_by_id.contains_key(&flow_id) { - guard.unplan_flow(flow_id); - Ok(()) - } else { - Err(TimeWheelCancelActivationError::FlowNotPlanned( - TimeWheelFlowNotPlannedError { flow_id }, - )) - } - } -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -impl FlowTimeWheelService for FlowTimeWheelServiceImpl { - fn nearest_activation_moment(&self) -> Option> { - let guard = self.state.lock().unwrap(); - guard.flow_heap.peek().map(|ar| ar.0.activation_time) - } - - fn take_nearest_planned_flows(&self) -> Vec { - let mut guard = self.state.lock().unwrap(); - - if guard.flow_heap.is_empty() { - vec![] - } else { - let activation_moment = guard.flow_heap.peek().unwrap().0.activation_time; - - let mut res: Vec<_> = Vec::new(); - while let Some(ar) = guard.flow_heap.peek() { - if ar.0.activation_time > activation_moment { - break; - } - - if guard.is_flow_activation_planned_at(ar.0.flow_id, activation_moment) { - res.push(ar.0.flow_id); - } - - guard.flow_heap.pop(); - } - - guard.clean_top_cancellations(); - - res - } - } - - fn get_planned_flow_activation_time(&self, flow_id: FlowID) -> Option> { - let guard = self.state.lock().unwrap(); - guard.flow_activation_times_by_id.get(&flow_id).copied() - } -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -impl MessageConsumer for FlowTimeWheelServiceImpl {} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -#[async_trait::async_trait] -impl MessageConsumerT for FlowTimeWheelServiceImpl { - #[tracing::instrument( - level = "debug", - skip_all, - name = "FlowTimeWheelServiceImpl[FlowProgressMessage]" - )] - async fn consume_message( - &self, - _: &Catalog, - message: &FlowProgressMessage, - ) -> Result<(), InternalError> { - tracing::debug!(received_message = ?message, "Received flow progress message"); - - match message { - FlowProgressMessage::Enqueued(e) => { - self.activate_at(e.activate_at, e.flow_id); - Ok(()) - } - FlowProgressMessage::Cancelled(e) => { - self.cancel_flow_activation(e.flow_id).int_err()?; - Ok(()) - } - FlowProgressMessage::Running(_) | FlowProgressMessage::Finished(_) => Ok(()), - } - } -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -#[cfg(test)] -mod tests { - use chrono::Duration; - - use super::*; - - const FLOW_ID_1: u64 = 115; - const FLOW_ID_2: u64 = 116; - const FLOW_ID_3: u64 = 117; - const FLOW_ID_4: u64 = 118; - const FLOW_ID_5: u64 = 119; - - #[test] - fn test_sequential_scheduling() { - let timewheel = FlowTimeWheelServiceImpl::new(); - assert!(timewheel.nearest_activation_moment().is_none()); - - let now = Utc::now(); - let moment_1 = now + Duration::try_seconds(10).unwrap(); - let moment_2 = now + Duration::try_seconds(20).unwrap(); - let moment_3 = now + Duration::try_seconds(30).unwrap(); - - schedule_flow(&timewheel, moment_1, FLOW_ID_1); - schedule_flow(&timewheel, moment_1, FLOW_ID_2); - schedule_flow(&timewheel, moment_2, FLOW_ID_3); - schedule_flow(&timewheel, moment_3, FLOW_ID_4); - schedule_flow(&timewheel, moment_3, FLOW_ID_5); - - check_next_time_slot(&timewheel, moment_1, &[FLOW_ID_1, FLOW_ID_2]); - check_next_time_slot(&timewheel, moment_2, &[FLOW_ID_3]); - check_next_time_slot(&timewheel, moment_3, &[FLOW_ID_4, FLOW_ID_5]); - } - - #[test] - fn test_random_order_scheduling() { - let timewheel = FlowTimeWheelServiceImpl::new(); - assert!(timewheel.nearest_activation_moment().is_none()); - - let now = Utc::now(); - let moment_1 = now + Duration::try_seconds(10).unwrap(); - let moment_2 = now + Duration::try_seconds(20).unwrap(); - let moment_3 = now + Duration::try_seconds(30).unwrap(); - - schedule_flow(&timewheel, moment_2, FLOW_ID_3); - schedule_flow(&timewheel, moment_3, FLOW_ID_5); - schedule_flow(&timewheel, moment_1, FLOW_ID_1); - schedule_flow(&timewheel, moment_3, FLOW_ID_4); - schedule_flow(&timewheel, moment_1, FLOW_ID_2); - - check_next_time_slot(&timewheel, moment_1, &[FLOW_ID_1, FLOW_ID_2]); - check_next_time_slot(&timewheel, moment_2, &[FLOW_ID_3]); - check_next_time_slot(&timewheel, moment_3, &[FLOW_ID_4, FLOW_ID_5]); - } - - #[test] - fn test_cancellations() { - let timewheel = FlowTimeWheelServiceImpl::new(); - assert!(timewheel.nearest_activation_moment().is_none()); - - let now = Utc::now(); - let moment_1 = now + Duration::try_seconds(10).unwrap(); - let moment_2 = now + Duration::try_seconds(20).unwrap(); - let moment_3 = now + Duration::try_seconds(30).unwrap(); - - schedule_flow(&timewheel, moment_1, FLOW_ID_1); - schedule_flow(&timewheel, moment_1, FLOW_ID_2); - schedule_flow(&timewheel, moment_2, FLOW_ID_3); - schedule_flow(&timewheel, moment_3, FLOW_ID_4); - schedule_flow(&timewheel, moment_3, FLOW_ID_5); - - timewheel - .cancel_flow_activation(FlowID::new(FLOW_ID_1)) - .unwrap(); - timewheel - .cancel_flow_activation(FlowID::new(FLOW_ID_3)) - .unwrap(); - timewheel - .cancel_flow_activation(FlowID::new(FLOW_ID_5)) - .unwrap(); - - check_next_time_slot(&timewheel, moment_1, &[FLOW_ID_2]); - check_next_time_slot(&timewheel, moment_3, &[FLOW_ID_4]); - assert!(timewheel.nearest_activation_moment().is_none()); - } - - fn schedule_flow(timewheel: &FlowTimeWheelServiceImpl, moment: DateTime, flow_id: u64) { - timewheel.activate_at(moment, FlowID::new(flow_id)); - } - - fn check_next_time_slot( - timewheel: &dyn FlowTimeWheelService, - moment: DateTime, - flow_ids: &[u64], - ) { - assert_eq!(timewheel.nearest_activation_moment().unwrap(), moment); - assert_eq!( - timewheel.take_nearest_planned_flows(), - flow_ids - .iter() - .map(|id| FlowID::new(*id)) - .collect::>() - ); - } -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/domain/flow-system/services/src/flow/mod.rs b/src/domain/flow-system/services/src/flow/mod.rs index 654600ec54..adff1a19c6 100644 --- a/src/domain/flow-system/services/src/flow/mod.rs +++ b/src/domain/flow-system/services/src/flow/mod.rs @@ -8,13 +8,11 @@ // by the Apache License, Version 2.0. mod flow_abort_helper; -mod flow_enqueue_helper; mod flow_executor_impl; mod flow_query_service_impl; -mod flow_time_wheel_service_impl; +mod flow_scheduling_helper; pub(crate) use flow_abort_helper::*; -pub(crate) use flow_enqueue_helper::*; pub use flow_executor_impl::*; pub use flow_query_service_impl::*; -pub use flow_time_wheel_service_impl::*; +pub(crate) use flow_scheduling_helper::*; diff --git a/src/domain/flow-system/services/src/messages/flow_message_consumers.rs b/src/domain/flow-system/services/src/messages/flow_message_consumers.rs index f8930c2300..64eb11480d 100644 --- a/src/domain/flow-system/services/src/messages/flow_message_consumers.rs +++ b/src/domain/flow-system/services/src/messages/flow_message_consumers.rs @@ -14,7 +14,4 @@ pub const MESSAGE_CONSUMER_KAMU_FLOW_CONFIGURATION_SERVICE: &str = pub const MESSAGE_CONSUMER_KAMU_FLOW_EXECUTOR: &str = "dev.kamu.domain.flow-system.FlowExecutor"; -pub const MESSAGE_CONSUMER_KAMU_FLOW_TIME_WHEEL_SERVICE: &str = - "dev.kamu.domain.flow-system.FlowTimeWheelService"; - //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/domain/flow-system/services/tests/tests/test_flow_configuration_service_impl.rs b/src/domain/flow-system/services/tests/tests/test_flow_configuration_service_impl.rs index dea281940b..4b2a332f55 100644 --- a/src/domain/flow-system/services/tests/tests/test_flow_configuration_service_impl.rs +++ b/src/domain/flow-system/services/tests/tests/test_flow_configuration_service_impl.rs @@ -35,7 +35,7 @@ async fn test_visibility() { let harness = FlowConfigurationHarness::new(); assert!(harness.list_enabled_configurations().await.is_empty()); - let gc_schedule: Schedule = Duration::try_minutes(30).unwrap().into(); + let gc_schedule: Schedule = Duration::minutes(30).into(); harness .set_system_flow_schedule(SystemFlowType::GC, gc_schedule.clone()) .await @@ -44,7 +44,7 @@ async fn test_visibility() { let foo_id = harness.create_root_dataset("foo").await; let bar_id = harness.create_root_dataset("bar").await; - let foo_ingest_schedule: Schedule = Duration::try_days(1).unwrap().into(); + let foo_ingest_schedule: Schedule = Duration::days(1).into(); harness .set_dataset_flow_schedule( foo_id.clone(), @@ -53,7 +53,7 @@ async fn test_visibility() { ) .await; - let foo_compaction_schedule: Schedule = Duration::try_weeks(1).unwrap().into(); + let foo_compaction_schedule: Schedule = Duration::weeks(1).into(); harness .set_dataset_flow_schedule( foo_id.clone(), @@ -62,7 +62,7 @@ async fn test_visibility() { ) .await; - let bar_ingest_schedule: Schedule = Duration::try_hours(3).unwrap().into(); + let bar_ingest_schedule: Schedule = Duration::hours(3).into(); harness .set_dataset_flow_schedule( bar_id.clone(), @@ -103,7 +103,7 @@ async fn test_pause_resume_individual_dataset_flows() { // Make a dataset and configure daily ingestion schedule let foo_id = harness.create_root_dataset("foo").await; - let foo_ingest_schedule: Schedule = Duration::try_days(1).unwrap().into(); + let foo_ingest_schedule: Schedule = Duration::days(1).into(); harness .set_dataset_flow_schedule( foo_id.clone(), @@ -176,7 +176,7 @@ async fn test_pause_resume_all_dataset_flows() { // Make a dataset and configure ingestion and compaction schedule let foo_id = harness.create_root_dataset("foo").await; - let foo_ingest_schedule: Schedule = Duration::try_days(1).unwrap().into(); + let foo_ingest_schedule: Schedule = Duration::days(1).into(); harness .set_dataset_flow_schedule( foo_id.clone(), @@ -184,7 +184,7 @@ async fn test_pause_resume_all_dataset_flows() { foo_ingest_schedule.clone(), ) .await; - let foo_compaction_schedule: Schedule = Duration::try_weeks(1).unwrap().into(); + let foo_compaction_schedule: Schedule = Duration::weeks(1).into(); harness .set_dataset_flow_schedule( foo_id.clone(), @@ -276,7 +276,7 @@ async fn test_pause_resume_individual_system_flows() { assert_eq!(0, harness.configuration_events_count()); // Configure GC schedule - let gc_schedule: Schedule = Duration::try_minutes(30).unwrap().into(); + let gc_schedule: Schedule = Duration::minutes(30).into(); harness .set_system_flow_schedule(SystemFlowType::GC, gc_schedule.clone()) .await @@ -330,7 +330,7 @@ async fn test_modify() { // Make a dataset and configure daily ingestion schedule let foo_id = harness.create_root_dataset("foo").await; - let foo_ingest_schedule: Schedule = Duration::try_days(1).unwrap().into(); + let foo_ingest_schedule: Schedule = Duration::days(1).into(); harness .set_dataset_flow_schedule( foo_id.clone(), @@ -351,7 +351,7 @@ async fn test_modify() { assert_eq!(1, harness.configuration_events_count()); // Now make the schedule weekly - let foo_ingest_schedule_2: Schedule = Duration::try_weeks(1).unwrap().into(); + let foo_ingest_schedule_2: Schedule = Duration::weeks(1).into(); harness .set_dataset_flow_schedule( foo_id.clone(), @@ -381,7 +381,7 @@ async fn test_dataset_deleted() { // Make a dataset and configure daily ingestion schedule let foo_id = harness.create_root_dataset("foo").await; - let foo_ingest_schedule: Schedule = Duration::try_days(1).unwrap().into(); + let foo_ingest_schedule: Schedule = Duration::days(1).into(); harness .set_dataset_flow_schedule( foo_id.clone(), diff --git a/src/domain/flow-system/services/tests/tests/test_flow_executor_impl.rs b/src/domain/flow-system/services/tests/tests/test_flow_executor_impl.rs index 8e1cd3654a..a5ca4a730b 100644 --- a/src/domain/flow-system/services/tests/tests/test_flow_executor_impl.rs +++ b/src/domain/flow-system/services/tests/tests/test_flow_executor_impl.rs @@ -49,7 +49,7 @@ async fn test_read_initial_config_and_queue_without_waiting() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(60).unwrap().into(), + schedule_condition: Duration::milliseconds(60).into(), }, ) .await; @@ -58,7 +58,7 @@ async fn test_read_initial_config_and_queue_without_waiting() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -74,8 +74,8 @@ async fn test_read_initial_config_and_queue_without_waiting() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -88,8 +88,8 @@ async fn test_read_initial_config_and_queue_without_waiting() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(90).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(90), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -99,11 +99,11 @@ async fn test_read_initial_config_and_queue_without_waiting() { // Main simulation boundary - 120ms total // - "foo" should immediately schedule "task 0", since "foo" has never run yet - // - "task 0" will take action and complete, this will enqueue the next flow - // run for "foo" after full scheduling period + // - "task 0" will take action and complete, this will schedule the next flow + // run for "foo" after full period // - when that period is over, "task 1" should be scheduled // - "task 1" will take action and complete, enqueuing another flow - let sim_handle = harness.advance_time(Duration::try_milliseconds(120).unwrap()); + let sim_handle = harness.advance_time(Duration::milliseconds(120)); tokio::join!(foo_task0_handle, foo_task1_handle, sim_handle) } => Ok(()) } @@ -173,7 +173,7 @@ async fn test_read_initial_config_shouldnt_queue_in_recovery_case() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Configure ingestion schedule every 60ms, but use event store directly @@ -188,7 +188,7 @@ async fn test_read_initial_config_shouldnt_queue_in_recovery_case() { paused: false, rule: FlowConfigurationRule::IngestRule(IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(60).unwrap().into(), + schedule_condition: Duration::milliseconds(60).into(), }), } .into()], @@ -221,11 +221,17 @@ async fn test_read_initial_config_shouldnt_queue_in_recovery_case() { event_time: Utc::now(), flow_id, start_condition: FlowStartCondition::Schedule(FlowStartConditionSchedule { - wake_up_at: start_time + Duration::try_milliseconds(100).unwrap(), + wake_up_at: start_time + Duration::milliseconds(100), }), last_trigger_index: 0, } .into(), + FlowEventScheduledForActivation { + event_time: Utc::now(), + flow_id, + scheduled_for_activation_at: start_time + Duration::milliseconds(100), + } + .into(), ], ) .await @@ -244,8 +250,8 @@ async fn test_read_initial_config_shouldnt_queue_in_recovery_case() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(110).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(110), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -254,7 +260,7 @@ async fn test_read_initial_config_shouldnt_queue_in_recovery_case() { let foo_task0_handle = foo_task0_driver.run(); // Main simulation boundary - 130ms total - let sim_handle = harness.advance_time(Duration::try_milliseconds(130).unwrap()); + let sim_handle = harness.advance_time(Duration::milliseconds(130)); tokio::join!(foo_task0_handle, sim_handle) } => Ok(()) } @@ -295,8 +301,8 @@ async fn test_read_initial_config_shouldnt_queue_in_recovery_case() { async fn test_cron_config() { // Note: this test runs with 1s step, CRON does not apply to milliseconds let harness = FlowHarness::with_overrides(FlowHarnessOverrides { - awaiting_step: Some(Duration::try_seconds(1).unwrap()), - mandatory_throttling_period: Some(Duration::try_seconds(1).unwrap()), + awaiting_step: Some(Duration::seconds(1)), + mandatory_throttling_period: Some(Duration::seconds(1)), ..Default::default() }) .await; @@ -315,7 +321,7 @@ async fn test_cron_config() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_seconds(1).unwrap()) + .duration_round(Duration::seconds(1)) .unwrap(); let test_flow_listener = harness.catalog.get_one::().unwrap(); @@ -334,8 +340,8 @@ async fn test_cron_config() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_seconds(6).unwrap(), - finish_in_with: Some((Duration::try_seconds(1).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::seconds(6), + finish_in_with: Some((Duration::seconds(1), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -346,7 +352,7 @@ async fn test_cron_config() { // Main simulation script let main_handle = async { // Wait 2 s - harness.advance_time_custom_alignment(Duration::try_seconds(1).unwrap(), Duration::try_seconds(2).unwrap()).await; + harness.advance_time_custom_alignment(Duration::seconds(1), Duration::seconds(2)).await; // Enable CRON config (we are skipping moment 0s) harness @@ -364,11 +370,11 @@ async fn test_cron_config() { ) .await; test_flow_listener - .make_a_snapshot(start_time + Duration::try_seconds(1).unwrap()) + .make_a_snapshot(start_time + Duration::seconds(1)) .await; // Main simulation boundary - 12s total: at 10s 2nd scheduling happens; - harness.advance_time_custom_alignment(Duration::try_seconds(1).unwrap(), Duration::try_seconds(11).unwrap()).await; + harness.advance_time_custom_alignment(Duration::seconds(1), Duration::seconds(11)).await; }; tokio::join!(foo_task0_handle, main_handle) @@ -439,7 +445,7 @@ async fn test_manual_trigger() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(90).unwrap().into(), + schedule_condition: Duration::milliseconds(90).into(), }, ) .await; @@ -455,7 +461,7 @@ async fn test_manual_trigger() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -471,8 +477,8 @@ async fn test_manual_trigger() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -485,8 +491,8 @@ async fn test_manual_trigger() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(60).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(60), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -499,8 +505,8 @@ async fn test_manual_trigger() { task_id: TaskID::new(2), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "3")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(100).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(100), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: bar_id.clone(), fetch_uncacheable: false @@ -511,7 +517,7 @@ async fn test_manual_trigger() { // Manual trigger for "foo" at 40ms let trigger0_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: foo_flow_key, - run_since_start: Duration::try_milliseconds(40).unwrap(), + run_since_start: Duration::milliseconds(40), initiator_id: None, }); let trigger0_handle = trigger0_driver.run(); @@ -519,7 +525,7 @@ async fn test_manual_trigger() { // Manual trigger for "bar" at 80ms let trigger1_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: bar_flow_key, - run_since_start: Duration::try_milliseconds(80).unwrap(), + run_since_start: Duration::milliseconds(80), initiator_id: None, }); let trigger1_handle = trigger1_driver.run(); @@ -529,7 +535,7 @@ async fn test_manual_trigger() { // "foo": // - flow 0 => task 0 gets scheduled immediately at 0ms // - flow 0 => task 0 starts at 10ms and finishes running at 20ms - // - next flow => enqueued at 20ms to trigger in 1 period of 90ms - at 110ms + // - next flow => scheduled at 20ms to trigger in 1 period of 90ms - at 110ms // "bar": silent // Moment 40ms - manual foo trigger happens here: @@ -542,10 +548,10 @@ async fn test_manual_trigger() { // - flow 2 immediately scheduled // - task 2 gets scheduled at 80ms // - task 2 starts at 100ms and finishes at 110ms (ensure gap to fight against task execution order) - // - no next flow enqueued + // - no next flow scheduled // Stop at 180ms: "foo" flow 3 gets scheduled at 160ms - harness.advance_time(Duration::try_milliseconds(180).unwrap()).await; + harness.advance_time(Duration::milliseconds(180)).await; }; tokio::join!(task0_handle, task1_handle, task2_handle, trigger0_handle, trigger1_handle, main_handle) @@ -657,7 +663,7 @@ async fn test_ingest_trigger_with_ingest_config() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: true, - schedule_condition: Duration::try_milliseconds(90).unwrap().into(), + schedule_condition: Duration::milliseconds(90).into(), }, ) .await; @@ -673,7 +679,7 @@ async fn test_ingest_trigger_with_ingest_config() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -689,8 +695,8 @@ async fn test_ingest_trigger_with_ingest_config() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: true @@ -703,8 +709,8 @@ async fn test_ingest_trigger_with_ingest_config() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(60).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(60), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: true @@ -717,8 +723,8 @@ async fn test_ingest_trigger_with_ingest_config() { task_id: TaskID::new(2), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "3")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(100).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(100), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: bar_id.clone(), fetch_uncacheable: false @@ -729,7 +735,7 @@ async fn test_ingest_trigger_with_ingest_config() { // Manual trigger for "foo" at 40ms let trigger0_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: foo_flow_key, - run_since_start: Duration::try_milliseconds(40).unwrap(), + run_since_start: Duration::milliseconds(40), initiator_id: None, }); let trigger0_handle = trigger0_driver.run(); @@ -737,7 +743,7 @@ async fn test_ingest_trigger_with_ingest_config() { // Manual trigger for "bar" at 80ms let trigger1_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: bar_flow_key, - run_since_start: Duration::try_milliseconds(80).unwrap(), + run_since_start: Duration::milliseconds(80), initiator_id: None, }); let trigger1_handle = trigger1_driver.run(); @@ -747,7 +753,7 @@ async fn test_ingest_trigger_with_ingest_config() { // "foo": // - flow 0 => task 0 gets scheduled immediately at 0ms // - flow 0 => task 0 starts at 10ms and finishes running at 20ms - // - next flow => enqueued at 20ms to trigger in 1 period of 90ms - at 110ms + // - next flow => scheduled at 20ms to trigger in 1 period of 90ms - at 110ms // "bar": silent // Moment 40ms - manual foo trigger happens here: @@ -760,10 +766,10 @@ async fn test_ingest_trigger_with_ingest_config() { // - flow 2 immediately scheduled // - task 2 gets scheduled at 80ms // - task 2 starts at 100ms and finishes at 110ms (ensure gap to fight against task execution order) - // - no next flow enqueued + // - no next flow scheduled // Stop at 180ms: "foo" flow 3 gets scheduled at 110ms - harness.advance_time(Duration::try_milliseconds(180).unwrap()).await; + harness.advance_time(Duration::milliseconds(180)).await; }; tokio::join!(task0_handle, task1_handle, task2_handle, trigger0_handle, trigger1_handle, main_handle) @@ -881,7 +887,7 @@ async fn test_manual_trigger_compaction() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -897,8 +903,8 @@ async fn test_manual_trigger_compaction() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(20).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(20), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::HardCompactionDataset(HardCompactionDataset { dataset_id: foo_id.clone(), max_slice_size: None, @@ -912,8 +918,8 @@ async fn test_manual_trigger_compaction() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(60).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(60), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::HardCompactionDataset(HardCompactionDataset { dataset_id: bar_id.clone(), max_slice_size: None, @@ -926,7 +932,7 @@ async fn test_manual_trigger_compaction() { // Manual trigger for "foo" at 10ms let trigger0_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: foo_flow_key, - run_since_start: Duration::try_milliseconds(10).unwrap(), + run_since_start: Duration::milliseconds(10), initiator_id: None, }); let trigger0_handle = trigger0_driver.run(); @@ -934,7 +940,7 @@ async fn test_manual_trigger_compaction() { // Manual trigger for "bar" at 50ms let trigger1_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: bar_flow_key, - run_since_start: Duration::try_milliseconds(50).unwrap(), + run_since_start: Duration::milliseconds(50), initiator_id: None, }); let trigger1_handle = trigger1_driver.run(); @@ -948,7 +954,7 @@ async fn test_manual_trigger_compaction() { // - flow 1 trigger and finishes // - task 1 starts at 60ms, finishes at 70ms (leave some gap to fight with random order) - harness.advance_time(Duration::try_milliseconds(100).unwrap()).await; + harness.advance_time(Duration::milliseconds(100)).await; }; tokio::join!(task0_handle, task1_handle, trigger0_handle, trigger1_handle, main_handle) @@ -1049,7 +1055,7 @@ async fn test_manual_trigger_reset() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -1065,8 +1071,8 @@ async fn test_manual_trigger_reset() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(create_dataset_result.dataset_handle.id.clone()), - run_since_start: Duration::try_milliseconds(20).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(90).unwrap(), TaskOutcome::Success(TaskResult::ResetDatasetResult(TaskResetDatasetResult { new_head: Multihash::from_digest_sha3_256(b"new-head") })))), + run_since_start: Duration::milliseconds(20), + finish_in_with: Some((Duration::milliseconds(90), TaskOutcome::Success(TaskResult::ResetDatasetResult(TaskResetDatasetResult { new_head: Multihash::from_digest_sha3_256(b"new-head") })))), expected_logical_plan: LogicalPlan::Reset(ResetDataset { dataset_id: create_dataset_result.dataset_handle.id.clone(), // By deafult should reset to seed block @@ -1080,7 +1086,7 @@ async fn test_manual_trigger_reset() { // Manual trigger for "foo" at 10ms let trigger0_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: foo_flow_key, - run_since_start: Duration::try_milliseconds(10).unwrap(), + run_since_start: Duration::milliseconds(10), initiator_id: None, }); let trigger0_handle = trigger0_driver.run(); @@ -1089,7 +1095,7 @@ async fn test_manual_trigger_reset() { let main_handle = async { // Moment 20ms - manual foo trigger happens here: // - flow 0 gets trigger and finishes at 110ms - harness.advance_time(Duration::try_milliseconds(250).unwrap()).await; + harness.advance_time(Duration::milliseconds(250)).await; }; tokio::join!(task0_handle, trigger0_handle, main_handle) @@ -1191,7 +1197,7 @@ async fn test_reset_trigger_keep_metadata_compaction_for_derivatives() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -1204,7 +1210,7 @@ async fn test_reset_trigger_keep_metadata_compaction_for_derivatives() { _ = async { let trigger0_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: foo_flow_key, - run_since_start: Duration::try_milliseconds(10).unwrap(), + run_since_start: Duration::milliseconds(10), initiator_id: None, }); let trigger0_handle = trigger0_driver.run(); @@ -1214,8 +1220,8 @@ async fn test_reset_trigger_keep_metadata_compaction_for_derivatives() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(create_foo_result.dataset_handle.id.clone()), - run_since_start: Duration::try_milliseconds(20).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(70).unwrap(), TaskOutcome::Success(TaskResult::ResetDatasetResult(TaskResetDatasetResult { new_head: Multihash::from_digest_sha3_256(b"new-head") })))), + run_since_start: Duration::milliseconds(20), + finish_in_with: Some((Duration::milliseconds(70), TaskOutcome::Success(TaskResult::ResetDatasetResult(TaskResetDatasetResult { new_head: Multihash::from_digest_sha3_256(b"new-head") })))), expected_logical_plan: LogicalPlan::Reset(ResetDataset { dataset_id: create_foo_result.dataset_handle.id.clone(), new_head_hash: Some(dataset_blocks[1].0.clone()), @@ -1230,10 +1236,10 @@ async fn test_reset_trigger_keep_metadata_compaction_for_derivatives() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(foo_baz_id.clone()), - run_since_start: Duration::try_milliseconds(110).unwrap(), + run_since_start: Duration::milliseconds(110), finish_in_with: Some( ( - Duration::try_milliseconds(70).unwrap(), + Duration::milliseconds(70), TaskOutcome::Success(TaskResult::CompactionDatasetResult(TaskCompactionDatasetResult { compaction_result: CompactionResult::Success { old_head: Multihash::from_digest_sha3_256(b"old-slice-2"), @@ -1258,10 +1264,10 @@ async fn test_reset_trigger_keep_metadata_compaction_for_derivatives() { task_id: TaskID::new(2), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "2")]), dataset_id: Some(foo_bar_id.clone()), - run_since_start: Duration::try_milliseconds(200).unwrap(), + run_since_start: Duration::milliseconds(200), finish_in_with: Some( ( - Duration::try_milliseconds(40).unwrap(), + Duration::milliseconds(40), TaskOutcome::Success(TaskResult::CompactionDatasetResult(TaskCompactionDatasetResult { compaction_result: CompactionResult::Success { old_head: Multihash::from_digest_sha3_256(b"old-slice-3"), @@ -1283,7 +1289,7 @@ async fn test_reset_trigger_keep_metadata_compaction_for_derivatives() { // Main simulation script let main_handle = async { - harness.advance_time(Duration::try_milliseconds(300).unwrap()).await; + harness.advance_time(Duration::milliseconds(300)).await; }; // tokio::join!(trigger0_handle, task0_handle, main_handle) @@ -1396,7 +1402,7 @@ async fn test_manual_trigger_compaction_with_config() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -1412,8 +1418,8 @@ async fn test_manual_trigger_compaction_with_config() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(30).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(30), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::HardCompactionDataset(HardCompactionDataset { dataset_id: foo_id.clone(), max_slice_size: Some(max_slice_size), @@ -1425,7 +1431,7 @@ async fn test_manual_trigger_compaction_with_config() { let trigger0_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: foo_flow_key, - run_since_start: Duration::try_milliseconds(20).unwrap(), + run_since_start: Duration::milliseconds(20), initiator_id: None, }); let trigger0_handle = trigger0_driver.run(); @@ -1434,7 +1440,7 @@ async fn test_manual_trigger_compaction_with_config() { let main_handle = async { // Moment 30ms - manual foo trigger happens here: // - flow 0 trigger and finishes at 40ms - harness.advance_time(Duration::try_milliseconds(80).unwrap()).await; + harness.advance_time(Duration::milliseconds(80)).await; }; tokio::join!(task0_handle, trigger0_handle, main_handle) @@ -1524,7 +1530,7 @@ async fn test_full_hard_compaction_trigger_keep_metadata_compaction_for_derivati // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -1537,7 +1543,7 @@ async fn test_full_hard_compaction_trigger_keep_metadata_compaction_for_derivati _ = async { let trigger0_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: foo_flow_key, - run_since_start: Duration::try_milliseconds(10).unwrap(), + run_since_start: Duration::milliseconds(10), initiator_id: None, }); let trigger0_handle = trigger0_driver.run(); @@ -1547,10 +1553,10 @@ async fn test_full_hard_compaction_trigger_keep_metadata_compaction_for_derivati task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(20).unwrap(), + run_since_start: Duration::milliseconds(20), finish_in_with: Some( ( - Duration::try_milliseconds(70).unwrap(), + Duration::milliseconds(70), TaskOutcome::Success(TaskResult::CompactionDatasetResult(TaskCompactionDatasetResult { compaction_result: CompactionResult::Success { old_head: Multihash::from_digest_sha3_256(b"old-slice"), @@ -1575,10 +1581,10 @@ async fn test_full_hard_compaction_trigger_keep_metadata_compaction_for_derivati task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(foo_baz_id.clone()), - run_since_start: Duration::try_milliseconds(110).unwrap(), + run_since_start: Duration::milliseconds(110), finish_in_with: Some( ( - Duration::try_milliseconds(70).unwrap(), + Duration::milliseconds(70), TaskOutcome::Success(TaskResult::CompactionDatasetResult(TaskCompactionDatasetResult { compaction_result: CompactionResult::Success { old_head: Multihash::from_digest_sha3_256(b"old-slice-2"), @@ -1603,10 +1609,10 @@ async fn test_full_hard_compaction_trigger_keep_metadata_compaction_for_derivati task_id: TaskID::new(2), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "2")]), dataset_id: Some(foo_bar_id.clone()), - run_since_start: Duration::try_milliseconds(200).unwrap(), + run_since_start: Duration::milliseconds(200), finish_in_with: Some( ( - Duration::try_milliseconds(40).unwrap(), + Duration::milliseconds(40), TaskOutcome::Success(TaskResult::CompactionDatasetResult(TaskCompactionDatasetResult { compaction_result: CompactionResult::Success { old_head: Multihash::from_digest_sha3_256(b"old-slice-3"), @@ -1628,7 +1634,7 @@ async fn test_full_hard_compaction_trigger_keep_metadata_compaction_for_derivati // Main simulation script let main_handle = async { - harness.advance_time(Duration::try_milliseconds(300).unwrap()).await; + harness.advance_time(Duration::milliseconds(300)).await; }; tokio::join!(trigger0_handle, task0_handle, task1_handle, task2_handle, main_handle) @@ -1759,7 +1765,7 @@ async fn test_manual_trigger_keep_metadata_only_with_recursive_compaction() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -1772,7 +1778,7 @@ async fn test_manual_trigger_keep_metadata_only_with_recursive_compaction() { _ = async { let trigger0_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: foo_flow_key, - run_since_start: Duration::try_milliseconds(10).unwrap(), + run_since_start: Duration::milliseconds(10), initiator_id: None, }); let trigger0_handle = trigger0_driver.run(); @@ -1782,10 +1788,10 @@ async fn test_manual_trigger_keep_metadata_only_with_recursive_compaction() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(20).unwrap(), + run_since_start: Duration::milliseconds(20), finish_in_with: Some( ( - Duration::try_milliseconds(70).unwrap(), + Duration::milliseconds(70), TaskOutcome::Success(TaskResult::CompactionDatasetResult(TaskCompactionDatasetResult { compaction_result: CompactionResult::Success { old_head: Multihash::from_digest_sha3_256(b"old-slice"), @@ -1810,10 +1816,10 @@ async fn test_manual_trigger_keep_metadata_only_with_recursive_compaction() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(foo_bar_id.clone()), - run_since_start: Duration::try_milliseconds(110).unwrap(), + run_since_start: Duration::milliseconds(110), finish_in_with: Some( ( - Duration::try_milliseconds(70).unwrap(), + Duration::milliseconds(70), TaskOutcome::Success(TaskResult::CompactionDatasetResult(TaskCompactionDatasetResult { compaction_result: CompactionResult::Success { old_head: Multihash::from_digest_sha3_256(b"old-slice-2"), @@ -1838,10 +1844,10 @@ async fn test_manual_trigger_keep_metadata_only_with_recursive_compaction() { task_id: TaskID::new(2), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "2")]), dataset_id: Some(foo_bar_baz_id.clone()), - run_since_start: Duration::try_milliseconds(200).unwrap(), + run_since_start: Duration::milliseconds(200), finish_in_with: Some( ( - Duration::try_milliseconds(40).unwrap(), + Duration::milliseconds(40), TaskOutcome::Success(TaskResult::CompactionDatasetResult(TaskCompactionDatasetResult { compaction_result: CompactionResult::Success { old_head: Multihash::from_digest_sha3_256(b"old-slice-3"), @@ -1863,7 +1869,7 @@ async fn test_manual_trigger_keep_metadata_only_with_recursive_compaction() { // Main simulation script let main_handle = async { - harness.advance_time(Duration::try_milliseconds(300).unwrap()).await; + harness.advance_time(Duration::milliseconds(300)).await; }; tokio::join!(trigger0_handle, task0_handle, task1_handle, task2_handle, main_handle) @@ -1996,7 +2002,7 @@ async fn test_manual_trigger_keep_metadata_only_without_recursive_compaction() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -2009,7 +2015,7 @@ async fn test_manual_trigger_keep_metadata_only_without_recursive_compaction() { _ = async { let trigger0_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: foo_flow_key, - run_since_start: Duration::try_milliseconds(10).unwrap(), + run_since_start: Duration::milliseconds(10), initiator_id: None, }); let trigger0_handle = trigger0_driver.run(); @@ -2019,10 +2025,10 @@ async fn test_manual_trigger_keep_metadata_only_without_recursive_compaction() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(20).unwrap(), + run_since_start: Duration::milliseconds(20), finish_in_with: Some( ( - Duration::try_milliseconds(70).unwrap(), + Duration::milliseconds(70), TaskOutcome::Success(TaskResult::CompactionDatasetResult(TaskCompactionDatasetResult { compaction_result: CompactionResult::Success { old_head: Multihash::from_digest_sha3_256(b"old-slice"), @@ -2044,7 +2050,7 @@ async fn test_manual_trigger_keep_metadata_only_without_recursive_compaction() { // Main simulation script let main_handle = async { - harness.advance_time(Duration::try_milliseconds(150).unwrap()).await; + harness.advance_time(Duration::milliseconds(150)).await; }; tokio::join!(trigger0_handle, task0_handle, main_handle) @@ -2138,7 +2144,7 @@ async fn test_manual_trigger_keep_metadata_only_compaction_multiple_accounts() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -2154,8 +2160,8 @@ async fn test_manual_trigger_keep_metadata_only_compaction_multiple_accounts() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(70).unwrap(), TaskOutcome::Success(TaskResult::CompactionDatasetResult(TaskCompactionDatasetResult { + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(70), TaskOutcome::Success(TaskResult::CompactionDatasetResult(TaskCompactionDatasetResult { compaction_result: CompactionResult::Success { old_head: Multihash::from_digest_sha3_256(b"old-slice"), new_head: Multihash::from_digest_sha3_256(b"new-slice"), @@ -2173,7 +2179,7 @@ async fn test_manual_trigger_keep_metadata_only_compaction_multiple_accounts() { let trigger0_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: foo_flow_key, - run_since_start: Duration::try_milliseconds(10).unwrap(), + run_since_start: Duration::milliseconds(10), initiator_id: None, }); let trigger0_handle = trigger0_driver.run(); @@ -2183,9 +2189,9 @@ async fn test_manual_trigger_keep_metadata_only_compaction_multiple_accounts() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(foo_bar_id.clone()), - run_since_start: Duration::try_milliseconds(110).unwrap(), + run_since_start: Duration::milliseconds(110), // Send some PullResult with records to bypass batching condition - finish_in_with: Some((Duration::try_milliseconds(70).unwrap(), TaskOutcome::Success(TaskResult::CompactionDatasetResult(TaskCompactionDatasetResult { + finish_in_with: Some((Duration::milliseconds(70), TaskOutcome::Success(TaskResult::CompactionDatasetResult(TaskCompactionDatasetResult { compaction_result: CompactionResult::Success { old_head: Multihash::from_digest_sha3_256(b"old-slice"), new_head: Multihash::from_digest_sha3_256(b"new-slice"), @@ -2204,7 +2210,7 @@ async fn test_manual_trigger_keep_metadata_only_compaction_multiple_accounts() { // Main simulation script let main_handle = async { - harness.advance_time(Duration::try_milliseconds(400).unwrap()).await; + harness.advance_time(Duration::milliseconds(400)).await; }; tokio::join!(task0_handle, trigger0_handle, task1_handle, main_handle) @@ -2284,7 +2290,7 @@ async fn test_dataset_flow_configuration_paused_resumed_modified() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(50).unwrap().into(), + schedule_condition: Duration::milliseconds(50).into(), }, ) .await; @@ -2295,7 +2301,7 @@ async fn test_dataset_flow_configuration_paused_resumed_modified() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(80).unwrap().into(), + schedule_condition: Duration::milliseconds(80).into(), }, ) .await; @@ -2308,7 +2314,7 @@ async fn test_dataset_flow_configuration_paused_resumed_modified() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -2324,8 +2330,8 @@ async fn test_dataset_flow_configuration_paused_resumed_modified() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -2338,8 +2344,8 @@ async fn test_dataset_flow_configuration_paused_resumed_modified() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(20).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(20), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: bar_id.clone(), fetch_uncacheable: false @@ -2358,9 +2364,9 @@ async fn test_dataset_flow_configuration_paused_resumed_modified() { // - next flow 3 queued for 70ms (20+50) // 50ms: Pause both flow configs in between completion 2 first tasks and queuing - harness.advance_time(Duration::try_milliseconds(50).unwrap()).await; - harness.pause_dataset_flow(start_time + Duration::try_milliseconds(50).unwrap(), foo_id.clone(), DatasetFlowType::Ingest).await; - harness.pause_dataset_flow(start_time + Duration::try_milliseconds(50).unwrap(), bar_id.clone(), DatasetFlowType::Ingest).await; + harness.advance_time(Duration::milliseconds(50)).await; + harness.pause_dataset_flow(start_time + Duration::milliseconds(50), foo_id.clone(), DatasetFlowType::Ingest).await; + harness.pause_dataset_flow(start_time + Duration::milliseconds(50), bar_id.clone(), DatasetFlowType::Ingest).await; // 80ms: Wake up after initially planned "foo" scheduling but before planned "bar" scheduling: // - "foo": @@ -2369,18 +2375,18 @@ async fn test_dataset_flow_configuration_paused_resumed_modified() { // - "bar": // - gets a config update for period of 70ms // - get queued for 100ms (last success at 30ms + period of 70ms) - harness.advance_time(Duration::try_milliseconds(30).unwrap()).await; - harness.resume_dataset_flow(start_time + Duration::try_milliseconds(80).unwrap(), foo_id.clone(), DatasetFlowType::Ingest).await; - harness.set_dataset_flow_ingest(start_time + Duration::try_milliseconds(80).unwrap(), bar_id.clone(), DatasetFlowType::Ingest, IngestRule { + harness.advance_time(Duration::milliseconds(30)).await; + harness.resume_dataset_flow(start_time + Duration::milliseconds(80), foo_id.clone(), DatasetFlowType::Ingest).await; + harness.set_dataset_flow_ingest(start_time + Duration::milliseconds(80), bar_id.clone(), DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(70).unwrap().into(), + schedule_condition: Duration::milliseconds(70).into(), }).await; test_flow_listener - .make_a_snapshot(start_time + Duration::try_milliseconds(80).unwrap()) + .make_a_snapshot(start_time + Duration::milliseconds(80)) .await; // 120ms: finish - harness.advance_time(Duration::try_milliseconds(40).unwrap()).await; + harness.advance_time(Duration::milliseconds(40)).await; }; tokio::join!(task0_handle, task1_handle, main_handle) @@ -2513,7 +2519,7 @@ async fn test_respect_last_success_time_when_schedule_resumes() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(100).unwrap().into(), + schedule_condition: Duration::milliseconds(100).into(), }, ) .await; @@ -2525,7 +2531,7 @@ async fn test_respect_last_success_time_when_schedule_resumes() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(60).unwrap().into(), + schedule_condition: Duration::milliseconds(60).into(), }, ) .await; @@ -2541,7 +2547,7 @@ async fn test_respect_last_success_time_when_schedule_resumes() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -2557,8 +2563,8 @@ async fn test_respect_last_success_time_when_schedule_resumes() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -2571,8 +2577,8 @@ async fn test_respect_last_success_time_when_schedule_resumes() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(20).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(20), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: bar_id.clone(), fetch_uncacheable: false @@ -2591,28 +2597,28 @@ async fn test_respect_last_success_time_when_schedule_resumes() { // - next flow 3 queued for 90ms (30ms initiated + 60ms period) // 50ms: Pause flow config before next flow runs - harness.advance_time(Duration::try_milliseconds(50).unwrap()).await; - harness.pause_dataset_flow(start_time + Duration::try_milliseconds(50).unwrap(), foo_id.clone(), DatasetFlowType::Ingest).await; - harness.pause_dataset_flow(start_time + Duration::try_milliseconds(50).unwrap(), bar_id.clone(), DatasetFlowType::Ingest).await; + harness.advance_time(Duration::milliseconds(50)).await; + harness.pause_dataset_flow(start_time + Duration::milliseconds(50), foo_id.clone(), DatasetFlowType::Ingest).await; + harness.pause_dataset_flow(start_time + Duration::milliseconds(50), bar_id.clone(), DatasetFlowType::Ingest).await; // 100ms: Wake up after initially planned "bar" scheduling but before planned "foo" scheduling: // - "foo": // - resumed with period 100ms // - last success at 20ms - // - enqueued for 120ms (still wait a little bit since last success) + // - scheduled for 120ms (still wait a little bit since last success) // - "bar": // - resumed with period 60ms // - last success at 30ms // - gets scheduled immediately (waited longer than 30ms last success + 60ms period) - harness.advance_time(Duration::try_milliseconds(50).unwrap()).await; - harness.resume_dataset_flow(start_time + Duration::try_milliseconds(100).unwrap(), foo_id.clone(), DatasetFlowType::Ingest).await; - harness.resume_dataset_flow(start_time + Duration::try_milliseconds(100).unwrap(), bar_id.clone(), DatasetFlowType::Ingest).await; + harness.advance_time(Duration::milliseconds(50)).await; + harness.resume_dataset_flow(start_time + Duration::milliseconds(100), foo_id.clone(), DatasetFlowType::Ingest).await; + harness.resume_dataset_flow(start_time + Duration::milliseconds(100), bar_id.clone(), DatasetFlowType::Ingest).await; test_flow_listener - .make_a_snapshot(start_time + Duration::try_milliseconds(100).unwrap()) + .make_a_snapshot(start_time + Duration::milliseconds(100)) .await; // 150ms: finish - harness.advance_time(Duration::try_milliseconds(50).unwrap()).await; + harness.advance_time(Duration::milliseconds(50)).await; }; tokio::join!(task0_handle, task1_handle, main_handle) @@ -2745,7 +2751,7 @@ async fn test_dataset_deleted() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(50).unwrap().into(), + schedule_condition: Duration::milliseconds(50).into(), }, ) .await; @@ -2756,7 +2762,7 @@ async fn test_dataset_deleted() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(70).unwrap().into(), + schedule_condition: Duration::milliseconds(70).into(), }, ) .await; @@ -2769,7 +2775,7 @@ async fn test_dataset_deleted() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -2785,8 +2791,8 @@ async fn test_dataset_deleted() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -2799,8 +2805,8 @@ async fn test_dataset_deleted() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(20).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(20), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: bar_id.clone(), fetch_uncacheable: false @@ -2813,22 +2819,22 @@ async fn test_dataset_deleted() { // "foo": // - flow 0 scheduled at 0ms // - task 0 starts at 10ms, finishes at 20ms - // - flow 2 enqueued for 20ms + period = 70ms + // - flow 2 scheduled for 20ms + period = 70ms // "bar": // - flow 1 scheduled at 0ms // - task 1 starts at 20ms, finishes at 30ms - // - flow 3 enqueued for 30ms + period = 100ms + // - flow 3 scheduled for 30ms + period = 100ms // 50ms: deleting "foo" in QUEUED state - harness.advance_time(Duration::try_milliseconds(50).unwrap()).await; + harness.advance_time(Duration::milliseconds(50)).await; harness.delete_dataset(&foo_id).await; // 120ms: deleting "bar" in SCHEDULED state - harness.advance_time(Duration::try_milliseconds(70).unwrap()).await; + harness.advance_time(Duration::milliseconds(70)).await; harness.delete_dataset(&bar_id).await; // 140ms: finish - harness.advance_time(Duration::try_milliseconds(20).unwrap()).await; + harness.advance_time(Duration::milliseconds(20)).await; }; tokio::join!(task0_handle, task1_handle, main_handle) @@ -2947,7 +2953,7 @@ async fn test_task_completions_trigger_next_loop_on_success() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(40).unwrap().into(), + schedule_condition: Duration::milliseconds(40).into(), }, ) .await; @@ -2965,7 +2971,7 @@ async fn test_task_completions_trigger_next_loop_on_success() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -2981,8 +2987,8 @@ async fn test_task_completions_trigger_next_loop_on_success() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -2995,8 +3001,8 @@ async fn test_task_completions_trigger_next_loop_on_success() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(20).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Failed(TaskError::Empty))), + run_since_start: Duration::milliseconds(20), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Failed(TaskError::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: bar_id.clone(), fetch_uncacheable: false @@ -3009,8 +3015,8 @@ async fn test_task_completions_trigger_next_loop_on_success() { task_id: TaskID::new(2), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "2")]), dataset_id: Some(baz_id.clone()), - run_since_start: Duration::try_milliseconds(30).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Cancelled)), + run_since_start: Duration::milliseconds(30), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Cancelled)), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: baz_id.clone(), fetch_uncacheable: false @@ -3024,18 +3030,18 @@ async fn test_task_completions_trigger_next_loop_on_success() { // "foo": // - flow 0 scheduled at 0ms // - task 0 starts at 10ms, finishes at 20ms - // - next flow 3 enqueued for 20ms + period = 60ms + // - next flow 3 scheduled for 20ms + period = 60ms // "bar": // - flow 1 scheduled at 0ms // - task 1 starts at 20ms, finishes at 30ms with failure - // - next flow not enqueued + // - next flow not scheduled // "baz": // - flow 2 scheduled at 0ms // - task 2 starts at 30ms, finishes at 40ms with cancellation - // - next flow not enqueued + // - next flow not scheduled // 80ms: the succeeded dataset schedule another update - harness.advance_time(Duration::try_milliseconds(80).unwrap()).await; + harness.advance_time(Duration::milliseconds(80)).await; }; tokio::join!(task0_handle, task1_handle, task2_handle, main_handle) @@ -3172,7 +3178,7 @@ async fn test_derived_dataset_triggered_initially_and_after_input_change() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(80).unwrap().into(), + schedule_condition: Duration::milliseconds(80).into(), }, ) .await; @@ -3182,7 +3188,7 @@ async fn test_derived_dataset_triggered_initially_and_after_input_change() { harness.now_datetime(), bar_id.clone(), DatasetFlowType::ExecuteTransform, - TransformRule::new_checked(1, Duration::try_seconds(1).unwrap()).unwrap(), + TransformRule::new_checked(1, Duration::seconds(1)).unwrap(), ) .await; @@ -3197,7 +3203,7 @@ async fn test_derived_dataset_triggered_initially_and_after_input_change() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -3213,8 +3219,8 @@ async fn test_derived_dataset_triggered_initially_and_after_input_change() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -3227,9 +3233,9 @@ async fn test_derived_dataset_triggered_initially_and_after_input_change() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(20).unwrap(), + run_since_start: Duration::milliseconds(20), // Send some PullResult with records to bypass batching condition - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"old-slice")), new_head: Multihash::from_digest_sha3_256(b"new-slice"), @@ -3247,9 +3253,9 @@ async fn test_derived_dataset_triggered_initially_and_after_input_change() { task_id: TaskID::new(2), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "2")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(110).unwrap(), + run_since_start: Duration::milliseconds(110), // Send some PullResult with records to bypass batching condition - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"new-slice")), new_head: Multihash::from_digest_sha3_256(b"newest-slice"), @@ -3267,8 +3273,8 @@ async fn test_derived_dataset_triggered_initially_and_after_input_change() { task_id: TaskID::new(3), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "3")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(130).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(130), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: bar_id.clone(), fetch_uncacheable: false @@ -3279,7 +3285,7 @@ async fn test_derived_dataset_triggered_initially_and_after_input_change() { // Main simulation script let main_handle = async { - harness.advance_time(Duration::try_milliseconds(220).unwrap()).await; + harness.advance_time(Duration::milliseconds(220)).await; }; tokio::join!(task0_handle, task1_handle, task2_handle, task3_handle, main_handle) @@ -3400,10 +3406,8 @@ async fn test_derived_dataset_triggered_initially_and_after_input_change() { #[test_log::test(tokio::test)] async fn test_throttling_manual_triggers() { let harness = FlowHarness::with_overrides(FlowHarnessOverrides { - awaiting_step: Some(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()), - mandatory_throttling_period: Some( - Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS * 10).unwrap(), - ), + awaiting_step: Some(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)), + mandatory_throttling_period: Some(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS * 10)), ..Default::default() }) .await; @@ -3428,7 +3432,7 @@ async fn test_throttling_manual_triggers() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -3442,7 +3446,7 @@ async fn test_throttling_manual_triggers() { // Manual trigger for "foo" at 20ms let trigger0_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: foo_flow_key.clone(), - run_since_start: Duration::try_milliseconds(20).unwrap(), + run_since_start: Duration::milliseconds(20), initiator_id: None, }); let trigger0_handle = trigger0_driver.run(); @@ -3450,7 +3454,7 @@ async fn test_throttling_manual_triggers() { // Manual trigger for "foo" at 30ms let trigger1_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: foo_flow_key.clone(), - run_since_start: Duration::try_milliseconds(30).unwrap(), + run_since_start: Duration::milliseconds(30), initiator_id: None, }); let trigger1_handle = trigger1_driver.run(); @@ -3458,7 +3462,7 @@ async fn test_throttling_manual_triggers() { // Manual trigger for "foo" at 70ms let trigger2_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: foo_flow_key, - run_since_start: Duration::try_milliseconds(70).unwrap(), + run_since_start: Duration::milliseconds(70), initiator_id: None, }); let trigger2_handle = trigger2_driver.run(); @@ -3468,8 +3472,8 @@ async fn test_throttling_manual_triggers() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(40).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(40), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -3479,11 +3483,11 @@ async fn test_throttling_manual_triggers() { // Main simulation script let main_handle = async { - harness.advance_time(Duration::try_milliseconds(100).unwrap()).await; + harness.advance_time(Duration::milliseconds(100)).await; test_flow_listener - .make_a_snapshot(start_time + Duration::try_milliseconds(100).unwrap()) + .make_a_snapshot(start_time + Duration::milliseconds(100)) .await; - harness.advance_time(Duration::try_milliseconds(70).unwrap()).await; + harness.advance_time(Duration::milliseconds(70)).await; }; tokio::join!(trigger0_handle, trigger1_handle, trigger2_handle, task0_handle, main_handle) @@ -3529,9 +3533,9 @@ async fn test_throttling_manual_triggers() { #[test_log::test(tokio::test)] async fn test_throttling_derived_dataset_with_2_parents() { let harness = FlowHarness::with_overrides(FlowHarnessOverrides { - awaiting_step: Some(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()), // 10ms, + awaiting_step: Some(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)), // 10ms, mandatory_throttling_period: Some( - Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS * 10).unwrap(), + Duration::milliseconds(SCHEDULING_ALIGNMENT_MS * 10), ), /* 100ms */ mock_dataset_changes: Some(MockDatasetChangesService::with_increment_since( DatasetIntervalIncrement { @@ -3577,7 +3581,7 @@ async fn test_throttling_derived_dataset_with_2_parents() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(50).unwrap().into(), + schedule_condition: Duration::milliseconds(50).into(), }, ) .await; @@ -3589,7 +3593,7 @@ async fn test_throttling_derived_dataset_with_2_parents() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(150).unwrap().into(), + schedule_condition: Duration::milliseconds(150).into(), }, ) .await; @@ -3599,7 +3603,7 @@ async fn test_throttling_derived_dataset_with_2_parents() { harness.now_datetime(), baz_id.clone(), DatasetFlowType::ExecuteTransform, - TransformRule::new_checked(1, Duration::try_hours(24).unwrap()).unwrap(), + TransformRule::new_checked(1, Duration::hours(24)).unwrap(), ) .await; @@ -3615,7 +3619,7 @@ async fn test_throttling_derived_dataset_with_2_parents() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -3631,8 +3635,8 @@ async fn test_throttling_derived_dataset_with_2_parents() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"foo-old-slice")), new_head: Multihash::from_digest_sha3_256(b"foo-new-slice"), @@ -3650,8 +3654,8 @@ async fn test_throttling_derived_dataset_with_2_parents() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(20).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { + run_since_start: Duration::milliseconds(20), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"bar-old-slice")), new_head: Multihash::from_digest_sha3_256(b"fbar-new-slice"), @@ -3669,8 +3673,8 @@ async fn test_throttling_derived_dataset_with_2_parents() { task_id: TaskID::new(2), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "2")]), dataset_id: Some(baz_id.clone()), - run_since_start: Duration::try_milliseconds(30).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(20).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(30), + finish_in_with: Some((Duration::milliseconds(20), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: baz_id.clone(), fetch_uncacheable: false @@ -3683,8 +3687,8 @@ async fn test_throttling_derived_dataset_with_2_parents() { task_id: TaskID::new(3), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "3")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(130).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ + run_since_start: Duration::milliseconds(130), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"foo-new-slice")), new_head: Multihash::from_digest_sha3_256(b"foo-newest-slice"), @@ -3702,8 +3706,8 @@ async fn test_throttling_derived_dataset_with_2_parents() { task_id: TaskID::new(4), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "5")]), dataset_id: Some(baz_id.clone()), - run_since_start: Duration::try_milliseconds(160).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(160), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: baz_id.clone(), fetch_uncacheable: false @@ -3716,8 +3720,8 @@ async fn test_throttling_derived_dataset_with_2_parents() { task_id: TaskID::new(5), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "4")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(190).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { + run_since_start: Duration::milliseconds(190), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"bar-new-slice")), new_head: Multihash::from_digest_sha3_256(b"bar-newest-slice"), @@ -3744,29 +3748,29 @@ async fn test_throttling_derived_dataset_with_2_parents() { // - baz not queued as pending already, trigger recorded // - baz: // - task 2 starts at 30ms, finishes at 50ms, flow 2 completes at 50ms - // - no continuation enqueued + // - no continuation scheduled // Stage 1: foo runs next flow // - foo: // - flow 3 scheduled at 120ms // - task 3 starts at 130ms, finishes at 140ms, flow 3 completes at 140ms - // - baz flow 5 enqueued as derived for 150ms: max(140ms initiated, last attempt 50ms + throttling 100ms) - // - foo flow 6 enqueued for 240ms: 140ms initiated + max(period 50ms, throttling 100ms) + // - baz flow 5 scheduled as derived for 150ms: max(140ms initiated, last attempt 50ms + throttling 100ms) + // - foo flow 6 scheduled for 240ms: 140ms initiated + max(period 50ms, throttling 100ms) // Stage 2: baz executes triggered by foo // - baz: // - flow 5 scheduled at 150ms // - task 4 starts at 160ms, finishes at 170ms, flow 5 completes at 170ms - // - no continuation enqueued + // - no continuation scheduled // Stage 3: bar runs next flow // - bar // - flow 4 schedules at 180ms // - task 5 starts at 190ms, finishes at 200ms, flow 4 completes at 200ms - // - baz flow 7 enqueued as derived for 270ms: max(200ms initiated, last attempt 170ms + hrottling 100ms) - // - bar flow 8 enqueued for 350ms: 200ms initiated + max (period 150ms, throttling 100ms) + // - baz flow 7 scheduled as derived for 270ms: max(200ms initiated, last attempt 170ms + hrottling 100ms) + // - bar flow 8 scheduled for 350ms: 200ms initiated + max (period 150ms, throttling 100ms) - harness.advance_time(Duration::try_milliseconds(400).unwrap()).await; + harness.advance_time(Duration::milliseconds(400)).await; }; tokio::join!(task0_handle, task1_handle, task2_handle, task3_handle, task4_handle, task5_handle, main_handle) @@ -4064,7 +4068,7 @@ async fn test_batching_condition_records_reached() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(50).unwrap().into(), + schedule_condition: Duration::milliseconds(50).into(), }, ) .await; @@ -4074,7 +4078,7 @@ async fn test_batching_condition_records_reached() { harness.now_datetime(), bar_id.clone(), DatasetFlowType::ExecuteTransform, - TransformRule::new_checked(10, Duration::try_milliseconds(120).unwrap()).unwrap(), + TransformRule::new_checked(10, Duration::milliseconds(120)).unwrap(), ) .await; @@ -4089,7 +4093,7 @@ async fn test_batching_condition_records_reached() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -4105,8 +4109,8 @@ async fn test_batching_condition_records_reached() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"foo-old-slice")), new_head: Multihash::from_digest_sha3_256(b"foo-new-slice"), @@ -4124,8 +4128,8 @@ async fn test_batching_condition_records_reached() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(20).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { + run_since_start: Duration::milliseconds(20), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"bar-old-slice")), new_head: Multihash::from_digest_sha3_256(b"bar-new-slice"), @@ -4143,8 +4147,8 @@ async fn test_batching_condition_records_reached() { task_id: TaskID::new(2), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "2")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(80).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ + run_since_start: Duration::milliseconds(80), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"foo-new-slice")), new_head: Multihash::from_digest_sha3_256(b"foo-new-slice-2"), @@ -4162,8 +4166,8 @@ async fn test_batching_condition_records_reached() { task_id: TaskID::new(3), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "4")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(150).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ + run_since_start: Duration::milliseconds(150), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"foo-new-slice-2")), new_head: Multihash::from_digest_sha3_256(b"foo-new-slice-3"), @@ -4181,8 +4185,8 @@ async fn test_batching_condition_records_reached() { task_id: TaskID::new(4), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "3")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(170).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ + run_since_start: Duration::milliseconds(170), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"bar-new-slice")), new_head: Multihash::from_digest_sha3_256(b"bar-new-slice-2"), @@ -4197,7 +4201,7 @@ async fn test_batching_condition_records_reached() { // Main simulation script let main_handle = async { - harness.advance_time(Duration::try_milliseconds(400).unwrap()).await; + harness.advance_time(Duration::milliseconds(400)).await; }; tokio::join!(task0_handle, task1_handle, task2_handle, task3_handle, task4_handle, main_handle) @@ -4394,7 +4398,7 @@ async fn test_batching_condition_timeout() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(50).unwrap().into(), + schedule_condition: Duration::milliseconds(50).into(), }, ) .await; @@ -4404,7 +4408,7 @@ async fn test_batching_condition_timeout() { harness.now_datetime(), bar_id.clone(), DatasetFlowType::ExecuteTransform, - TransformRule::new_checked(10, Duration::try_milliseconds(150).unwrap()).unwrap(), + TransformRule::new_checked(10, Duration::milliseconds(150)).unwrap(), ) .await; @@ -4419,7 +4423,7 @@ async fn test_batching_condition_timeout() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -4435,8 +4439,8 @@ async fn test_batching_condition_timeout() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"foo-old-slice")), new_head: Multihash::from_digest_sha3_256(b"foo-new-slice"), @@ -4454,8 +4458,8 @@ async fn test_batching_condition_timeout() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(20).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { + run_since_start: Duration::milliseconds(20), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"bar-old-slice")), new_head: Multihash::from_digest_sha3_256(b"bar-new-slice"), @@ -4473,8 +4477,8 @@ async fn test_batching_condition_timeout() { task_id: TaskID::new(2), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "2")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(80).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ + run_since_start: Duration::milliseconds(80), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"foo-new-slice")), new_head: Multihash::from_digest_sha3_256(b"foo-new-slice-2"), @@ -4494,8 +4498,8 @@ async fn test_batching_condition_timeout() { task_id: TaskID::new(4), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "3")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(250).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ + run_since_start: Duration::milliseconds(250), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"bar-new-slice")), new_head: Multihash::from_digest_sha3_256(b"bar-new-slice-2"), @@ -4510,7 +4514,7 @@ async fn test_batching_condition_timeout() { // Main simulation script let main_handle = async { - harness.advance_time(Duration::try_milliseconds(400).unwrap()).await; + harness.advance_time(Duration::milliseconds(400)).await; }; tokio::join!(task0_handle, task1_handle, task2_handle, task4_handle, main_handle) @@ -4675,7 +4679,7 @@ async fn test_batching_condition_watermark() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(40).unwrap().into(), + schedule_condition: Duration::milliseconds(40).into(), }, ) .await; @@ -4685,7 +4689,7 @@ async fn test_batching_condition_watermark() { harness.now_datetime(), bar_id.clone(), DatasetFlowType::ExecuteTransform, - TransformRule::new_checked(10, Duration::try_milliseconds(200).unwrap()).unwrap(), + TransformRule::new_checked(10, Duration::milliseconds(200)).unwrap(), ) .await; @@ -4700,7 +4704,7 @@ async fn test_batching_condition_watermark() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -4716,8 +4720,8 @@ async fn test_batching_condition_watermark() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"foo-old-slice")), new_head: Multihash::from_digest_sha3_256(b"foo-new-slice"), @@ -4735,8 +4739,8 @@ async fn test_batching_condition_watermark() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(20).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { + run_since_start: Duration::milliseconds(20), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"bar-old-slice")), new_head: Multihash::from_digest_sha3_256(b"bar-new-slice"), @@ -4754,8 +4758,8 @@ async fn test_batching_condition_watermark() { task_id: TaskID::new(2), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "2")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(70).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ + run_since_start: Duration::milliseconds(70), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"foo-new-slice")), new_head: Multihash::from_digest_sha3_256(b"foo-new-slice-2"), @@ -4775,8 +4779,8 @@ async fn test_batching_condition_watermark() { task_id: TaskID::new(4), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "3")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(290).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ + run_since_start: Duration::milliseconds(290), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"bar-new-slice")), new_head: Multihash::from_digest_sha3_256(b"bar-new-slice-2"), @@ -4791,7 +4795,7 @@ async fn test_batching_condition_watermark() { // Main simulation script let main_handle = async { - harness.advance_time(Duration::try_milliseconds(400).unwrap()).await; + harness.advance_time(Duration::milliseconds(400)).await; }; tokio::join!(task0_handle, task1_handle, task2_handle, task4_handle, main_handle) @@ -5013,7 +5017,7 @@ async fn test_batching_condition_with_2_inputs() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(80).unwrap().into(), + schedule_condition: Duration::milliseconds(80).into(), }, ) .await; @@ -5025,7 +5029,7 @@ async fn test_batching_condition_with_2_inputs() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(120).unwrap().into(), + schedule_condition: Duration::milliseconds(120).into(), }, ) .await; @@ -5035,7 +5039,7 @@ async fn test_batching_condition_with_2_inputs() { harness.now_datetime(), baz_id.clone(), DatasetFlowType::ExecuteTransform, - TransformRule::new_checked(15, Duration::try_milliseconds(200).unwrap()).unwrap(), + TransformRule::new_checked(15, Duration::milliseconds(200)).unwrap(), ) .await; @@ -5051,7 +5055,7 @@ async fn test_batching_condition_with_2_inputs() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -5067,8 +5071,8 @@ async fn test_batching_condition_with_2_inputs() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"foo-old-slice")), new_head: Multihash::from_digest_sha3_256(b"foo-new-slice"), @@ -5086,8 +5090,8 @@ async fn test_batching_condition_with_2_inputs() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(20).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { + run_since_start: Duration::milliseconds(20), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"bar-old-slice")), new_head: Multihash::from_digest_sha3_256(b"bar-new-slice"), @@ -5105,8 +5109,8 @@ async fn test_batching_condition_with_2_inputs() { task_id: TaskID::new(2), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "2")]), dataset_id: Some(baz_id.clone()), - run_since_start: Duration::try_milliseconds(30).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ + run_since_start: Duration::milliseconds(30), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"baz-old-slice")), new_head: Multihash::from_digest_sha3_256(b"baz-new-slice"), @@ -5124,8 +5128,8 @@ async fn test_batching_condition_with_2_inputs() { task_id: TaskID::new(3), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "3")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(110).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { + run_since_start: Duration::milliseconds(110), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"foo-new-slice")), new_head: Multihash::from_digest_sha3_256(b"foo-new-slice-2"), @@ -5143,8 +5147,8 @@ async fn test_batching_condition_with_2_inputs() { task_id: TaskID::new(4), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "4")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(160).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ + run_since_start: Duration::milliseconds(160), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"bar-new-slice")), new_head: Multihash::from_digest_sha3_256(b"bar-new-slice-2"), @@ -5162,8 +5166,8 @@ async fn test_batching_condition_with_2_inputs() { task_id: TaskID::new(5), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "6")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(210).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { + run_since_start: Duration::milliseconds(210), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult { pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"foo-new-slice")), new_head: Multihash::from_digest_sha3_256(b"foo-new-slice-2"), @@ -5181,8 +5185,8 @@ async fn test_batching_condition_with_2_inputs() { task_id: TaskID::new(6), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "5")]), dataset_id: Some(baz_id.clone()), - run_since_start: Duration::try_milliseconds(230).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ + run_since_start: Duration::milliseconds(230), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::UpdateDatasetResult(TaskUpdateDatasetResult{ pull_result: PullResult::Updated { old_head: Some(Multihash::from_digest_sha3_256(b"baz-new-slice")), new_head: Multihash::from_digest_sha3_256(b"baz-new-slice-2"), @@ -5197,7 +5201,7 @@ async fn test_batching_condition_with_2_inputs() { // Main simulation script let main_handle = async { - harness.advance_time(Duration::try_milliseconds(400).unwrap()).await; + harness.advance_time(Duration::milliseconds(400)).await; }; tokio::join!(task0_handle, task1_handle, task2_handle, task3_handle, task4_handle, task5_handle, task6_handle, main_handle) @@ -5522,7 +5526,7 @@ async fn test_list_all_flow_initiators() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -5538,8 +5542,8 @@ async fn test_list_all_flow_initiators() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(20).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(20), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::HardCompactionDataset(HardCompactionDataset { dataset_id: foo_id.clone(), max_slice_size: None, @@ -5553,8 +5557,8 @@ async fn test_list_all_flow_initiators() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(60).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(60), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::HardCompactionDataset(HardCompactionDataset { dataset_id: bar_id.clone(), max_slice_size: None, @@ -5567,7 +5571,7 @@ async fn test_list_all_flow_initiators() { // Manual trigger for "foo" at 10ms let trigger0_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: foo_flow_key, - run_since_start: Duration::try_milliseconds(10).unwrap(), + run_since_start: Duration::milliseconds(10), initiator_id: Some(foo_account_id.clone()), }); let trigger0_handle = trigger0_driver.run(); @@ -5575,7 +5579,7 @@ async fn test_list_all_flow_initiators() { // Manual trigger for "bar" at 50ms let trigger1_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: bar_flow_key, - run_since_start: Duration::try_milliseconds(50).unwrap(), + run_since_start: Duration::milliseconds(50), initiator_id: Some(bar_account_id.clone()), }); let trigger1_handle = trigger1_driver.run(); @@ -5589,7 +5593,7 @@ async fn test_list_all_flow_initiators() { // - flow 1 trigger and finishes // - task 1 starts at 60ms, finishes at 70ms (leave some gap to fight with random order) - harness.advance_time(Duration::try_milliseconds(100).unwrap()).await; + harness.advance_time(Duration::milliseconds(100)).await; }; tokio::join!(task0_handle, task1_handle, trigger0_handle, trigger1_handle, main_handle) @@ -5689,7 +5693,7 @@ async fn test_list_all_datasets_with_flow() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -5705,8 +5709,8 @@ async fn test_list_all_datasets_with_flow() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(20).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(20), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::HardCompactionDataset(HardCompactionDataset { dataset_id: foo_id.clone(), max_slice_size: None, @@ -5720,8 +5724,8 @@ async fn test_list_all_datasets_with_flow() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(bar_id.clone()), - run_since_start: Duration::try_milliseconds(60).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(60), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::HardCompactionDataset(HardCompactionDataset { dataset_id: bar_id.clone(), max_slice_size: None, @@ -5734,7 +5738,7 @@ async fn test_list_all_datasets_with_flow() { // Manual trigger for "foo" at 10ms let trigger0_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: foo_flow_key, - run_since_start: Duration::try_milliseconds(10).unwrap(), + run_since_start: Duration::milliseconds(10), initiator_id: Some(foo_account_id.clone()), }); let trigger0_handle = trigger0_driver.run(); @@ -5742,7 +5746,7 @@ async fn test_list_all_datasets_with_flow() { // Manual trigger for "bar" at 50ms let trigger1_driver = harness.manual_flow_trigger_driver(ManualFlowTriggerArgs { flow_key: bar_flow_key, - run_since_start: Duration::try_milliseconds(50).unwrap(), + run_since_start: Duration::milliseconds(50), initiator_id: Some(bar_account_id.clone()), }); let trigger1_handle = trigger1_driver.run(); @@ -5756,7 +5760,7 @@ async fn test_list_all_datasets_with_flow() { // - flow 1 trigger and finishes // - task 1 starts at 60ms, finishes at 70ms (leave some gap to fight with random order) - harness.advance_time(Duration::try_milliseconds(100).unwrap()).await; + harness.advance_time(Duration::milliseconds(100)).await; }; tokio::join!(task0_handle, task1_handle, trigger0_handle, trigger1_handle, main_handle) @@ -5835,7 +5839,7 @@ async fn test_abort_flow_before_scheduling_tasks() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(100).unwrap().into(), + schedule_condition: Duration::milliseconds(100).into(), }, ) .await; @@ -5844,7 +5848,7 @@ async fn test_abort_flow_before_scheduling_tasks() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual aborts script @@ -5860,8 +5864,8 @@ async fn test_abort_flow_before_scheduling_tasks() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -5872,11 +5876,11 @@ async fn test_abort_flow_before_scheduling_tasks() { // Manual abort for "foo" at 80ms let abort0_driver = harness.manual_flow_abort_driver(ManualFlowAbortArgs { flow_id: FlowID::new(1), - abort_since_start: Duration::try_milliseconds(80).unwrap(), + abort_since_start: Duration::milliseconds(80), }); let abort0_handle = abort0_driver.run(); - let sim_handle = harness.advance_time(Duration::try_milliseconds(150).unwrap()); + let sim_handle = harness.advance_time(Duration::milliseconds(150)); tokio::join!(foo_task0_handle, abort0_handle, sim_handle) } => Ok(()) } @@ -5938,7 +5942,7 @@ async fn test_abort_flow_after_scheduling_still_waiting_for_executor() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(50).unwrap().into(), + schedule_condition: Duration::milliseconds(50).into(), }, ) .await; @@ -5947,7 +5951,7 @@ async fn test_abort_flow_after_scheduling_still_waiting_for_executor() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -5963,8 +5967,8 @@ async fn test_abort_flow_after_scheduling_still_waiting_for_executor() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(10).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(10), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -5975,11 +5979,11 @@ async fn test_abort_flow_after_scheduling_still_waiting_for_executor() { // Manual abort for "foo" at 90ms let abort0_driver = harness.manual_flow_abort_driver(ManualFlowAbortArgs { flow_id: FlowID::new(1), - abort_since_start: Duration::try_milliseconds(90).unwrap(), + abort_since_start: Duration::milliseconds(90), }); let abort0_handle = abort0_driver.run(); - let sim_handle = harness.advance_time(Duration::try_milliseconds(150).unwrap()); + let sim_handle = harness.advance_time(Duration::milliseconds(150)); tokio::join!(foo_task0_handle, abort0_handle, sim_handle) } => Ok(()) } @@ -6046,7 +6050,7 @@ async fn test_abort_flow_after_task_running_has_started() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(50).unwrap().into(), + schedule_condition: Duration::milliseconds(50).into(), }, ) .await; @@ -6055,7 +6059,7 @@ async fn test_abort_flow_after_task_running_has_started() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -6071,8 +6075,8 @@ async fn test_abort_flow_after_task_running_has_started() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(100).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(100), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -6083,11 +6087,11 @@ async fn test_abort_flow_after_task_running_has_started() { // Manual abort for "foo" at 50ms, which is within task run period let abort0_driver = harness.manual_flow_abort_driver(ManualFlowAbortArgs { flow_id: FlowID::new(0), - abort_since_start: Duration::try_milliseconds(50).unwrap(), + abort_since_start: Duration::milliseconds(50), }); let abort0_handle = abort0_driver.run(); - let sim_handle = harness.advance_time(Duration::try_milliseconds(150).unwrap()); + let sim_handle = harness.advance_time(Duration::milliseconds(150)); tokio::join!(foo_task0_handle, abort0_handle, sim_handle) } => Ok(()) } @@ -6143,7 +6147,7 @@ async fn test_abort_flow_after_task_finishes() { DatasetFlowType::Ingest, IngestRule { fetch_uncacheable: false, - schedule_condition: Duration::try_milliseconds(50).unwrap().into(), + schedule_condition: Duration::milliseconds(50).into(), }, ) .await; @@ -6152,7 +6156,7 @@ async fn test_abort_flow_after_task_finishes() { // Remember start time let start_time = harness .now_datetime() - .duration_round(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()) + .duration_round(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)) .unwrap(); // Run scheduler concurrently with manual triggers script @@ -6168,8 +6172,8 @@ async fn test_abort_flow_after_task_finishes() { task_id: TaskID::new(0), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "0")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(10).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(20).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(10), + finish_in_with: Some((Duration::milliseconds(20), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -6182,8 +6186,8 @@ async fn test_abort_flow_after_task_finishes() { task_id: TaskID::new(1), task_metadata: TaskMetadata::from(vec![(METADATA_TASK_FLOW_ID, "1")]), dataset_id: Some(foo_id.clone()), - run_since_start: Duration::try_milliseconds(90).unwrap(), - finish_in_with: Some((Duration::try_milliseconds(20).unwrap(), TaskOutcome::Success(TaskResult::Empty))), + run_since_start: Duration::milliseconds(90), + finish_in_with: Some((Duration::milliseconds(20), TaskOutcome::Success(TaskResult::Empty))), expected_logical_plan: LogicalPlan::UpdateDataset(UpdateDataset { dataset_id: foo_id.clone(), fetch_uncacheable: false @@ -6194,11 +6198,11 @@ async fn test_abort_flow_after_task_finishes() { // Manual abort for "foo" at 50ms, which is after flow 0 has finished, and before flow 1 has started let abort0_driver = harness.manual_flow_abort_driver(ManualFlowAbortArgs { flow_id: FlowID::new(0), - abort_since_start: Duration::try_milliseconds(50).unwrap(), + abort_since_start: Duration::milliseconds(50), }); let abort0_handle = abort0_driver.run(); - let sim_handle = harness.advance_time(Duration::try_milliseconds(150).unwrap()); + let sim_handle = harness.advance_time(Duration::milliseconds(150)); tokio::join!(foo_task0_handle, foo_task1_handle, abort0_handle, sim_handle) } => Ok(()) } diff --git a/src/domain/flow-system/services/tests/tests/utils/flow_harness_shared.rs b/src/domain/flow-system/services/tests/tests/utils/flow_harness_shared.rs index 6a62946b15..c87d8c7e63 100644 --- a/src/domain/flow-system/services/tests/tests/utils/flow_harness_shared.rs +++ b/src/domain/flow-system/services/tests/tests/utils/flow_harness_shared.rs @@ -94,11 +94,14 @@ impl FlowHarness { let awaiting_step = overrides .awaiting_step - .unwrap_or(Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap()); + .unwrap_or(Duration::milliseconds(SCHEDULING_ALIGNMENT_MS)); - let mandatory_throttling_period = overrides.mandatory_throttling_period.unwrap_or( - Duration::try_milliseconds(SCHEDULING_MANDATORY_THROTTLING_PERIOD_MS).unwrap(), - ); + let mandatory_throttling_period = + overrides + .mandatory_throttling_period + .unwrap_or(Duration::milliseconds( + SCHEDULING_MANDATORY_THROTTLING_PERIOD_MS, + )); let mock_dataset_changes = overrides.mock_dataset_changes.unwrap_or_default(); @@ -443,7 +446,7 @@ impl FlowHarness { pub async fn advance_time(&self, time_quantum: Duration) { self.advance_time_custom_alignment( - Duration::try_milliseconds(SCHEDULING_ALIGNMENT_MS).unwrap(), + Duration::milliseconds(SCHEDULING_ALIGNMENT_MS), time_quantum, ) .await; diff --git a/src/domain/flow-system/services/tests/tests/utils/flow_system_test_listener.rs b/src/domain/flow-system/services/tests/tests/utils/flow_system_test_listener.rs index e64edc23b1..5e5518a235 100644 --- a/src/domain/flow-system/services/tests/tests/utils/flow_system_test_listener.rs +++ b/src/domain/flow-system/services/tests/tests/utils/flow_system_test_listener.rs @@ -262,7 +262,7 @@ impl MessageConsumerT for FlowSystemTestListener { FlowProgressMessage::Running(e) => self.make_a_snapshot(e.event_time).await, FlowProgressMessage::Finished(e) => self.make_a_snapshot(e.event_time).await, FlowProgressMessage::Cancelled(e) => self.make_a_snapshot(e.event_time).await, - FlowProgressMessage::Enqueued(_) => {} + FlowProgressMessage::Scheduled(_) => {} } Ok(()) } diff --git a/src/infra/core/src/repos/object_repository_s3.rs b/src/infra/core/src/repos/object_repository_s3.rs index 9b0f191ded..d0b6e95915 100644 --- a/src/infra/core/src/repos/object_repository_s3.rs +++ b/src/infra/core/src/repos/object_repository_s3.rs @@ -168,9 +168,7 @@ where hash: &Multihash, opts: ExternalTransferOpts, ) -> Result { - let expires_in = opts - .expiration - .unwrap_or(chrono::Duration::try_seconds(3600).unwrap()); + let expires_in = opts.expiration.unwrap_or(chrono::Duration::seconds(3600)); let presigned_conf = PresigningConfig::builder() .expires_in(expires_in.to_std().unwrap()) @@ -200,9 +198,7 @@ where hash: &Multihash, opts: ExternalTransferOpts, ) -> Result { - let expires_in = opts - .expiration - .unwrap_or(chrono::Duration::try_seconds(3600).unwrap()); + let expires_in = opts.expiration.unwrap_or(chrono::Duration::seconds(3600)); let presigned_conf = PresigningConfig::builder() .expires_in(expires_in.to_std().unwrap()) diff --git a/src/infra/flow-system/inmem/src/flow/inmem_flow_event_store.rs b/src/infra/flow-system/inmem/src/flow/inmem_flow_event_store.rs index 1c2c37b488..7217d235b1 100644 --- a/src/infra/flow-system/inmem/src/flow/inmem_flow_event_store.rs +++ b/src/infra/flow-system/inmem/src/flow/inmem_flow_event_store.rs @@ -8,8 +8,9 @@ // by the Apache License, Version 2.0. use std::collections::hash_map::Entry; -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; +use chrono::{DateTime, Utc}; use database_common::PaginationOpts; use dill::*; use kamu_flow_system::{BorrowedFlowKeyDataset, *}; @@ -34,6 +35,8 @@ struct State { flow_key_by_flow_id: HashMap, dataset_flow_last_run_stats: HashMap, system_flow_last_run_stats: HashMap, + flows_by_scheduled_for_activation_time: BTreeMap, BTreeSet>, + scheduled_for_activation_time_by_flow_id: HashMap>, } impl State { @@ -258,6 +261,57 @@ impl InMemoryFlowEventStore { }; } } + + // Manage scheduled time changes- insertions + if let FlowEvent::ScheduledForActivation(e) = &event { + // Remove any possible previous enqueuing + Self::remove_flow_scheduling_record(state, e.flow_id); + // make new record + Self::insert_flow_scheduling_record(state, e.flow_id, e.scheduled_for_activation_at); + } + // and removals + else if let FlowEvent::Aborted(_) | FlowEvent::TaskScheduled(_) = &event { + let flow_id = event.flow_id(); + Self::remove_flow_scheduling_record(state, flow_id); + } + } + + fn insert_flow_scheduling_record( + state: &mut State, + flow_id: FlowID, + scheduled_for_activation_at: DateTime, + ) { + // Update direct lookup + state + .flows_by_scheduled_for_activation_time + .entry(scheduled_for_activation_at) + .and_modify(|flow_ids| { + flow_ids.insert(flow_id); + }) + .or_insert_with(|| BTreeSet::from([flow_id])); + + // Update reverse lookup + state + .scheduled_for_activation_time_by_flow_id + .insert(flow_id, scheduled_for_activation_at); + } + + fn remove_flow_scheduling_record(state: &mut State, flow_id: FlowID) { + if let Some(scheduled_for_activation_at) = state + .scheduled_for_activation_time_by_flow_id + .remove(&flow_id) + { + let flow_ids = state + .flows_by_scheduled_for_activation_time + .get_mut(&scheduled_for_activation_at) + .unwrap(); + flow_ids.remove(&flow_id); + if flow_ids.is_empty() { + state + .flows_by_scheduled_for_activation_time + .remove(&scheduled_for_activation_at); + } + } } } @@ -389,6 +443,30 @@ impl FlowEventStore for InMemoryFlowEventStore { .unwrap_or_default()) } + /// Returns nearest time when one or more flows are scheduled for activation + async fn nearest_flow_activation_moment(&self) -> Result>, InternalError> { + let state = self.inner.as_state(); + let g = state.lock().unwrap(); + Ok(g.flows_by_scheduled_for_activation_time + .keys() + .next() + .copied()) + } + + /// Returns flows scheduled for activation at the given time + async fn get_flows_scheduled_for_activation_at( + &self, + scheduled_for_activation_at: DateTime, + ) -> Result, InternalError> { + let state = self.inner.as_state(); + let g = state.lock().unwrap(); + + Ok(g.flows_by_scheduled_for_activation_time + .get(&scheduled_for_activation_at) + .map(|flow_ids| flow_ids.iter().copied().collect()) + .unwrap_or_default()) + } + #[tracing::instrument(level = "debug", skip_all, fields(%dataset_id, ?filters, ?pagination))] fn get_all_flow_ids_by_dataset( &self, diff --git a/src/infra/flow-system/inmem/tests/tests/test_inmem_flow_event_store.rs b/src/infra/flow-system/inmem/tests/tests/test_inmem_flow_event_store.rs index 622d48b3d4..5d7cd80868 100644 --- a/src/infra/flow-system/inmem/tests/tests/test_inmem_flow_event_store.rs +++ b/src/infra/flow-system/inmem/tests/tests/test_inmem_flow_event_store.rs @@ -260,6 +260,33 @@ database_transactional_test!( harness = InMemoryFlowEventStoreHarness ); +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +database_transactional_test!( + storage = inmem, + fixture = + kamu_flow_system_repo_tests::test_flow_event_store::test_flow_activation_visibility_at_different_stages_through_success_path, + harness = InMemoryFlowEventStoreHarness +); + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +database_transactional_test!( + storage = inmem, + fixture = + kamu_flow_system_repo_tests::test_flow_event_store::test_flow_activation_visibility_when_aborted_before_activation, + harness = InMemoryFlowEventStoreHarness +); + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +database_transactional_test!( + storage = inmem, + fixture = + kamu_flow_system_repo_tests::test_flow_event_store::test_flow_activation_multiple_flows, + harness = InMemoryFlowEventStoreHarness +); + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Harness //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/infra/flow-system/postgres/.sqlx/query-03a5ca688456ac1c619ed1fba97da4ff4638450031bf974ec57055d7755c8e81.json b/src/infra/flow-system/postgres/.sqlx/query-03a5ca688456ac1c619ed1fba97da4ff4638450031bf974ec57055d7755c8e81.json new file mode 100644 index 0000000000..052ea94250 --- /dev/null +++ b/src/infra/flow-system/postgres/.sqlx/query-03a5ca688456ac1c619ed1fba97da4ff4638450031bf974ec57055d7755c8e81.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT f.flow_id as flow_id\n FROM flows f\n WHERE\n f.scheduled_for_activation_at = $1 AND\n f.flow_status = 'waiting'::flow_status_type\n ORDER BY f.flow_id ASC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "flow_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Timestamptz" + ] + }, + "nullable": [ + false + ] + }, + "hash": "03a5ca688456ac1c619ed1fba97da4ff4638450031bf974ec57055d7755c8e81" +} diff --git a/src/infra/flow-system/postgres/.sqlx/query-eb2fd83f0a62ed5546e4af19b9a6f9fe8019e7bc1b5dc7ef650af0ef9886aeb5.json b/src/infra/flow-system/postgres/.sqlx/query-66f90f578e5c0d0e4b40fc058ffd9ed5ab8492ca105655daa6d0fd9beb02c7b1.json similarity index 57% rename from src/infra/flow-system/postgres/.sqlx/query-eb2fd83f0a62ed5546e4af19b9a6f9fe8019e7bc1b5dc7ef650af0ef9886aeb5.json rename to src/infra/flow-system/postgres/.sqlx/query-66f90f578e5c0d0e4b40fc058ffd9ed5ab8492ca105655daa6d0fd9beb02c7b1.json index dfae77db2f..b14bed550d 100644 --- a/src/infra/flow-system/postgres/.sqlx/query-eb2fd83f0a62ed5546e4af19b9a6f9fe8019e7bc1b5dc7ef650af0ef9886aeb5.json +++ b/src/infra/flow-system/postgres/.sqlx/query-66f90f578e5c0d0e4b40fc058ffd9ed5ab8492ca105655daa6d0fd9beb02c7b1.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE flows\n SET flow_status = $2, last_event_id = $3\n WHERE flow_id = $1 AND (\n last_event_id IS NULL AND CAST($4 as BIGINT) IS NULL OR\n last_event_id IS NOT NULL AND CAST($4 as BIGINT) IS NOT NULL AND last_event_id = $4\n )\n RETURNING flow_id\n ", + "query": "\n UPDATE flows\n SET flow_status = $2, last_event_id = $3, scheduled_for_activation_at = $4\n WHERE flow_id = $1 AND (\n last_event_id IS NULL AND CAST($5 as BIGINT) IS NULL OR\n last_event_id IS NOT NULL AND CAST($5 as BIGINT) IS NOT NULL AND last_event_id = $5\n )\n RETURNING flow_id\n ", "describe": { "columns": [ { @@ -25,6 +25,7 @@ } }, "Int8", + "Timestamptz", "Int8" ] }, @@ -32,5 +33,5 @@ false ] }, - "hash": "eb2fd83f0a62ed5546e4af19b9a6f9fe8019e7bc1b5dc7ef650af0ef9886aeb5" + "hash": "66f90f578e5c0d0e4b40fc058ffd9ed5ab8492ca105655daa6d0fd9beb02c7b1" } diff --git a/src/infra/flow-system/postgres/.sqlx/query-ed899492a3b7dc735cd0dda739a42850899165f03d6bcd851884ac973a76587c.json b/src/infra/flow-system/postgres/.sqlx/query-ed899492a3b7dc735cd0dda739a42850899165f03d6bcd851884ac973a76587c.json new file mode 100644 index 0000000000..4ee656a9ab --- /dev/null +++ b/src/infra/flow-system/postgres/.sqlx/query-ed899492a3b7dc735cd0dda739a42850899165f03d6bcd851884ac973a76587c.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT f.scheduled_for_activation_at as activation_time\n FROM flows f\n WHERE\n f.scheduled_for_activation_at IS NOT NULL AND\n f.flow_status = 'waiting'::flow_status_type\n ORDER BY f.scheduled_for_activation_at ASC\n LIMIT 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "activation_time", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true + ] + }, + "hash": "ed899492a3b7dc735cd0dda739a42850899165f03d6bcd851884ac973a76587c" +} diff --git a/src/infra/flow-system/postgres/Cargo.toml b/src/infra/flow-system/postgres/Cargo.toml index 4ef56964db..3e9e8b4c59 100644 --- a/src/infra/flow-system/postgres/Cargo.toml +++ b/src/infra/flow-system/postgres/Cargo.toml @@ -40,6 +40,7 @@ sqlx = { version = "0.8", default-features = false, features = [ "chrono" ] } tokio-stream = { version = "0.1", default-features = false } +tracing = { version = "0.1", default-features = false } [dev-dependencies] database-common-macros = { workspace = true } diff --git a/src/infra/flow-system/postgres/src/postgres_flow_event_store.rs b/src/infra/flow-system/postgres/src/postgres_flow_event_store.rs index e5020dfeb4..b92cffe8e1 100644 --- a/src/infra/flow-system/postgres/src/postgres_flow_event_store.rs +++ b/src/infra/flow-system/postgres/src/postgres_flow_event_store.rs @@ -9,6 +9,7 @@ use std::collections::HashSet; +use chrono::{DateTime, Utc}; use database_common::{PaginationOpts, TransactionRef, TransactionRefT}; use dill::*; use futures::TryStreamExt; @@ -105,13 +106,19 @@ impl PostgresFlowEventStore { // Determine if we have a status change between these events let mut maybe_latest_status = None; + let mut maybe_scheduled_for_activation_at = None; for event in events { if let Some(new_status) = event.new_status() { maybe_latest_status = Some(new_status); } + if let FlowEvent::ScheduledForActivation(e) = event { + maybe_scheduled_for_activation_at = Some(e.scheduled_for_activation_at); + } else if let FlowEvent::Aborted(_) | FlowEvent::TaskScheduled(_) = event { + maybe_scheduled_for_activation_at = None; + } } - // We either have determined the lateststatus, or should read the previous + // We either have determined the latest status, or should read the previous let latest_status = if let Some(latest_status) = maybe_latest_status { latest_status } else { @@ -134,16 +141,17 @@ impl PostgresFlowEventStore { let rows = sqlx::query!( r#" UPDATE flows - SET flow_status = $2, last_event_id = $3 + SET flow_status = $2, last_event_id = $3, scheduled_for_activation_at = $4 WHERE flow_id = $1 AND ( - last_event_id IS NULL AND CAST($4 as BIGINT) IS NULL OR - last_event_id IS NOT NULL AND CAST($4 as BIGINT) IS NOT NULL AND last_event_id = $4 + last_event_id IS NULL AND CAST($5 as BIGINT) IS NULL OR + last_event_id IS NOT NULL AND CAST($5 as BIGINT) IS NOT NULL AND last_event_id = $5 ) RETURNING flow_id "#, flow_id, latest_status as FlowStatus, last_event_id, + maybe_scheduled_for_activation_at, maybe_prev_stored_event_id, ) .fetch_all(connection_mut) @@ -488,6 +496,61 @@ impl FlowEventStore for PostgresFlowEventStore { }) } + /// Returns nearest time when one or more flows are scheduled for activation + async fn nearest_flow_activation_moment(&self) -> Result>, InternalError> { + let mut tr = self.transaction.lock().await; + + let connection_mut = tr.connection_mut().await?; + let maybe_activation_time = sqlx::query!( + r#" + SELECT f.scheduled_for_activation_at as activation_time + FROM flows f + WHERE + f.scheduled_for_activation_at IS NOT NULL AND + f.flow_status = 'waiting'::flow_status_type + ORDER BY f.scheduled_for_activation_at ASC + LIMIT 1 + "#, + ) + .map(|result| { + result + .activation_time + .expect("NULL values filtered by query") + }) + .fetch_optional(connection_mut) + .await + .int_err()?; + + Ok(maybe_activation_time) + } + + /// Returns flows scheduled for activation at the given time + async fn get_flows_scheduled_for_activation_at( + &self, + scheduled_for_activation_at: DateTime, + ) -> Result, InternalError> { + let mut tr = self.transaction.lock().await; + + let connection_mut = tr.connection_mut().await?; + let flow_ids = sqlx::query!( + r#" + SELECT f.flow_id as flow_id + FROM flows f + WHERE + f.scheduled_for_activation_at = $1 AND + f.flow_status = 'waiting'::flow_status_type + ORDER BY f.flow_id ASC + "#, + scheduled_for_activation_at, + ) + .map(|row| FlowID::try_from(row.flow_id).unwrap()) + .fetch_all(connection_mut) + .await + .int_err()?; + + Ok(flow_ids) + } + fn get_all_flow_ids_by_dataset( &self, dataset_id: &DatasetID, diff --git a/src/infra/flow-system/postgres/tests/tests/test_postgres_flow_event_store.rs b/src/infra/flow-system/postgres/tests/tests/test_postgres_flow_event_store.rs index 69cedb2e06..8f6a1e6f55 100644 --- a/src/infra/flow-system/postgres/tests/tests/test_postgres_flow_event_store.rs +++ b/src/infra/flow-system/postgres/tests/tests/test_postgres_flow_event_store.rs @@ -262,6 +262,33 @@ database_transactional_test!( harness = PostgresFlowEventStoreHarness ); +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +database_transactional_test!( + storage = postgres, + fixture = + kamu_flow_system_repo_tests::test_flow_event_store::test_flow_activation_visibility_at_different_stages_through_success_path, + harness = PostgresFlowEventStoreHarness +); + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +database_transactional_test!( + storage = postgres, + fixture = + kamu_flow_system_repo_tests::test_flow_event_store::test_flow_activation_visibility_when_aborted_before_activation, + harness = PostgresFlowEventStoreHarness +); + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +database_transactional_test!( + storage = postgres, + fixture = + kamu_flow_system_repo_tests::test_flow_event_store::test_flow_activation_multiple_flows, + harness = PostgresFlowEventStoreHarness +); + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Harness //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/infra/flow-system/repo-tests/src/test_flow_event_store.rs b/src/infra/flow-system/repo-tests/src/test_flow_event_store.rs index d1cccd5600..a767be6a09 100644 --- a/src/infra/flow-system/repo-tests/src/test_flow_event_store.rs +++ b/src/infra/flow-system/repo-tests/src/test_flow_event_store.rs @@ -11,7 +11,7 @@ use std::assert_matches::assert_matches; use std::collections::HashSet; use std::sync::Arc; -use chrono::{Duration, Utc}; +use chrono::{Duration, SubsecRound, Utc}; use database_common::PaginationOpts; use dill::Catalog; use futures::TryStreamExt; @@ -1807,6 +1807,382 @@ pub async fn test_event_store_concurrent_modification(catalog: &Catalog) { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +pub async fn test_flow_activation_visibility_at_different_stages_through_success_path( + catalog: &Catalog, +) { + let event_store = catalog.get_one::().unwrap(); + + let flow_id = event_store.new_flow_id().await.unwrap(); + let dataset_id = DatasetID::new_seeded_ed25519(b"foo"); + let flow_key = FlowKey::dataset(dataset_id, DatasetFlowType::Ingest); + + let start_moment = Utc::now().trunc_subsecs(6); + let activation_moment = start_moment + Duration::minutes(1); + + let last_event_id = event_store + .save_events( + &flow_id, + None, + vec![FlowEventInitiated { + event_time: start_moment, + flow_key, + flow_id, + trigger: FlowTrigger::AutoPolling(FlowTriggerAutoPolling { + trigger_time: start_moment, + }), + config_snapshot: None, + } + .into()], + ) + .await + .unwrap(); + + let maybe_nearest_activation_time = event_store.nearest_flow_activation_moment().await.unwrap(); + assert!(maybe_nearest_activation_time.is_none()); + + let last_event_id = event_store + .save_events( + &flow_id, + Some(last_event_id), + vec![FlowEventStartConditionUpdated { + flow_id, + event_time: Utc::now(), + start_condition: FlowStartCondition::Schedule(FlowStartConditionSchedule { + wake_up_at: activation_moment, + }), + last_trigger_index: 0, + } + .into()], + ) + .await + .unwrap(); + + let maybe_nearest_activation_time = event_store.nearest_flow_activation_moment().await.unwrap(); + assert!(maybe_nearest_activation_time.is_none()); + assert_eq!( + event_store + .get_flows_scheduled_for_activation_at(activation_moment) + .await + .unwrap(), + vec![] + ); + + let last_event_id = event_store + .save_events( + &flow_id, + Some(last_event_id), + vec![FlowEventScheduledForActivation { + flow_id, + event_time: Utc::now(), + scheduled_for_activation_at: activation_moment, + } + .into()], + ) + .await + .unwrap(); + + let maybe_nearest_activation_time = event_store.nearest_flow_activation_moment().await.unwrap(); + assert_eq!(maybe_nearest_activation_time, Some(activation_moment)); + assert_eq!( + event_store + .get_flows_scheduled_for_activation_at(activation_moment) + .await + .unwrap(), + vec![flow_id] + ); + + let last_event_id = event_store + .save_events( + &flow_id, + Some(last_event_id), + vec![FlowEventTaskScheduled { + flow_id, + event_time: activation_moment + Duration::milliseconds(100), + task_id: TaskID::new(1), + } + .into()], + ) + .await + .unwrap(); + + let maybe_nearest_activation_time = event_store.nearest_flow_activation_moment().await.unwrap(); + assert!(maybe_nearest_activation_time.is_none()); + assert_eq!( + event_store + .get_flows_scheduled_for_activation_at(activation_moment) + .await + .unwrap(), + vec![] + ); + + let last_event_id = event_store + .save_events( + &flow_id, + Some(last_event_id), + vec![FlowEventTaskRunning { + flow_id, + event_time: activation_moment + Duration::milliseconds(500), + task_id: TaskID::new(1), + } + .into()], + ) + .await + .unwrap(); + + let maybe_nearest_activation_time = event_store.nearest_flow_activation_moment().await.unwrap(); + assert!(maybe_nearest_activation_time.is_none()); + assert_eq!( + event_store + .get_flows_scheduled_for_activation_at(activation_moment) + .await + .unwrap(), + vec![] + ); + + event_store + .save_events( + &flow_id, + Some(last_event_id), + vec![FlowEventTaskFinished { + flow_id, + event_time: activation_moment + Duration::milliseconds(1500), + task_id: TaskID::new(1), + task_outcome: TaskOutcome::Success(TaskResult::Empty), + } + .into()], + ) + .await + .unwrap(); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +pub async fn test_flow_activation_visibility_when_aborted_before_activation(catalog: &Catalog) { + let event_store = catalog.get_one::().unwrap(); + + let flow_id = event_store.new_flow_id().await.unwrap(); + let dataset_id = DatasetID::new_seeded_ed25519(b"foo"); + let flow_key = FlowKey::dataset(dataset_id, DatasetFlowType::Ingest); + + let start_moment = Utc::now().trunc_subsecs(6); + let activation_moment = start_moment + Duration::minutes(1); + let abortion_moment = start_moment + Duration::seconds(30); + + let last_event_id = event_store + .save_events( + &flow_id, + None, + vec![ + FlowEventInitiated { + event_time: start_moment, + flow_key, + flow_id, + trigger: FlowTrigger::AutoPolling(FlowTriggerAutoPolling { + trigger_time: start_moment, + }), + config_snapshot: None, + } + .into(), + FlowEventStartConditionUpdated { + flow_id, + event_time: Utc::now(), + start_condition: FlowStartCondition::Schedule(FlowStartConditionSchedule { + wake_up_at: activation_moment, + }), + last_trigger_index: 0, + } + .into(), + FlowEventScheduledForActivation { + flow_id, + event_time: Utc::now(), + scheduled_for_activation_at: activation_moment, + } + .into(), + ], + ) + .await + .unwrap(); + + let maybe_nearest_activation_time = event_store.nearest_flow_activation_moment().await.unwrap(); + assert_eq!(maybe_nearest_activation_time, Some(activation_moment)); + assert_eq!( + event_store + .get_flows_scheduled_for_activation_at(activation_moment) + .await + .unwrap(), + vec![flow_id] + ); + + event_store + .save_events( + &flow_id, + Some(last_event_id), + vec![FlowEventAborted { + event_time: abortion_moment, + flow_id, + } + .into()], + ) + .await + .unwrap(); + + let maybe_nearest_activation_time = event_store.nearest_flow_activation_moment().await.unwrap(); + assert!(maybe_nearest_activation_time.is_none()); + assert_eq!( + event_store + .get_flows_scheduled_for_activation_at(activation_moment) + .await + .unwrap(), + vec![] + ); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +pub async fn test_flow_activation_multiple_flows(catalog: &Catalog) { + let event_store = catalog.get_one::().unwrap(); + + let dataset_id_foo = DatasetID::new_seeded_ed25519(b"foo"); + let dataset_id_bar = DatasetID::new_seeded_ed25519(b"bar"); + let dataset_id_baz = DatasetID::new_seeded_ed25519(b"baz"); + + let flow_id_foo = event_store.new_flow_id().await.unwrap(); + let flow_id_bar = event_store.new_flow_id().await.unwrap(); + let flow_id_baz = event_store.new_flow_id().await.unwrap(); + + let flow_key_foo = FlowKey::dataset(dataset_id_foo, DatasetFlowType::Ingest); + let flow_key_bar = FlowKey::dataset(dataset_id_bar, DatasetFlowType::Ingest); + let flow_key_baz = FlowKey::dataset(dataset_id_baz, DatasetFlowType::Ingest); + + let start_moment = Utc::now().trunc_subsecs(6); + let activation_moment_1 = start_moment + Duration::minutes(1); + let activation_moment_2 = start_moment + Duration::minutes(2); + + event_store + .save_events( + &flow_id_foo, + None, + vec![ + FlowEventInitiated { + event_time: start_moment, + flow_key: flow_key_foo, + flow_id: flow_id_foo, + trigger: FlowTrigger::AutoPolling(FlowTriggerAutoPolling { + trigger_time: start_moment, + }), + config_snapshot: None, + } + .into(), + FlowEventStartConditionUpdated { + flow_id: flow_id_foo, + event_time: Utc::now(), + start_condition: FlowStartCondition::Schedule(FlowStartConditionSchedule { + wake_up_at: activation_moment_1, + }), + last_trigger_index: 0, + } + .into(), + FlowEventScheduledForActivation { + flow_id: flow_id_foo, + event_time: Utc::now(), + scheduled_for_activation_at: activation_moment_1, + } + .into(), + ], + ) + .await + .unwrap(); + + event_store + .save_events( + &flow_id_bar, + None, + vec![ + FlowEventInitiated { + event_time: start_moment, + flow_key: flow_key_bar, + flow_id: flow_id_bar, + trigger: FlowTrigger::AutoPolling(FlowTriggerAutoPolling { + trigger_time: start_moment, + }), + config_snapshot: None, + } + .into(), + FlowEventStartConditionUpdated { + flow_id: flow_id_bar, + event_time: Utc::now(), + start_condition: FlowStartCondition::Schedule(FlowStartConditionSchedule { + wake_up_at: activation_moment_1, + }), + last_trigger_index: 0, + } + .into(), + FlowEventScheduledForActivation { + flow_id: flow_id_bar, + event_time: Utc::now(), + scheduled_for_activation_at: activation_moment_1, + } + .into(), + ], + ) + .await + .unwrap(); + + event_store + .save_events( + &flow_id_baz, + None, + vec![ + FlowEventInitiated { + event_time: start_moment, + flow_key: flow_key_baz, + flow_id: flow_id_baz, + trigger: FlowTrigger::AutoPolling(FlowTriggerAutoPolling { + trigger_time: start_moment, + }), + config_snapshot: None, + } + .into(), + FlowEventStartConditionUpdated { + flow_id: flow_id_baz, + event_time: Utc::now(), + start_condition: FlowStartCondition::Schedule(FlowStartConditionSchedule { + wake_up_at: activation_moment_2, + }), + last_trigger_index: 0, + } + .into(), + FlowEventScheduledForActivation { + flow_id: flow_id_baz, + event_time: Utc::now(), + scheduled_for_activation_at: activation_moment_2, + } + .into(), + ], + ) + .await + .unwrap(); + + let maybe_nearest_activation_time = event_store.nearest_flow_activation_moment().await.unwrap(); + assert_eq!(maybe_nearest_activation_time, Some(activation_moment_1)); + assert_eq!( + event_store + .get_flows_scheduled_for_activation_at(activation_moment_1) + .await + .unwrap(), + vec![flow_id_foo, flow_id_bar] + ); + assert_eq!( + event_store + .get_flows_scheduled_for_activation_at(activation_moment_2) + .await + .unwrap(), + vec![flow_id_baz] + ); +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + struct DatasetTestCase { dataset_id: DatasetID, ingest_flow_ids: TestFlowIDs, @@ -2100,7 +2476,7 @@ impl<'a> DatasetFlowGenerator<'a> { let flow_id: u64 = flow.flow_id.into(); flow.on_task_finished( - flow.timing.running_since.unwrap() + Duration::try_minutes(10).unwrap(), + flow.timing.running_since.unwrap() + Duration::minutes(10), TaskID::new(flow_id * 2 + 1), outcome, ) @@ -2169,7 +2545,7 @@ impl SystemFlowGenerator { let flow_id: u64 = flow.flow_id.into(); flow.on_task_finished( - flow.timing.running_since.unwrap() + Duration::try_minutes(10).unwrap(), + flow.timing.running_since.unwrap() + Duration::minutes(10), TaskID::new(flow_id * 2 + 1), outcome, ) @@ -2185,26 +2561,32 @@ fn drive_flow_to_status(flow: &mut Flow, expected_status: FlowStatus) { let start_moment = Utc::now(); flow.set_relevant_start_condition( - start_moment + Duration::try_seconds(1).unwrap(), + start_moment + Duration::seconds(1), FlowStartCondition::Schedule(FlowStartConditionSchedule { - wake_up_at: start_moment + Duration::try_minutes(1).unwrap(), + wake_up_at: start_moment + Duration::minutes(1), }), ) .unwrap(); + flow.schedule_for_activation( + start_moment + Duration::seconds(1), + start_moment + Duration::minutes(1), + ) + .unwrap(); + if expected_status != FlowStatus::Waiting { // Derived task id from flow id just to ensure unique values let flow_id: u64 = flow.flow_id.into(); let task_id = TaskID::new(flow_id * 2 + 1); - flow.on_task_scheduled(start_moment + Duration::try_minutes(5).unwrap(), task_id) + flow.on_task_scheduled(start_moment + Duration::minutes(5), task_id) .unwrap(); - flow.on_task_running(start_moment + Duration::try_minutes(7).unwrap(), task_id) + flow.on_task_running(start_moment + Duration::minutes(7), task_id) .unwrap(); if expected_status == FlowStatus::Finished { flow.on_task_finished( - start_moment + Duration::try_minutes(10).unwrap(), + start_moment + Duration::minutes(10), task_id, TaskOutcome::Success(TaskResult::Empty), ) diff --git a/src/infra/flow-system/sqlite/.sqlx/query-379da668e4d617c8bd2f384a45ac74f18c4e37e81cb05d69181caaff1d6f45fc.json b/src/infra/flow-system/sqlite/.sqlx/query-379da668e4d617c8bd2f384a45ac74f18c4e37e81cb05d69181caaff1d6f45fc.json new file mode 100644 index 0000000000..9605b99e52 --- /dev/null +++ b/src/infra/flow-system/sqlite/.sqlx/query-379da668e4d617c8bd2f384a45ac74f18c4e37e81cb05d69181caaff1d6f45fc.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "\n UPDATE flows\n SET flow_status = $2, last_event_id = $3, scheduled_for_activation_at = $4\n WHERE flow_id = $1 AND (\n last_event_id IS NULL AND CAST($5 as INT8) IS NULL OR\n last_event_id IS NOT NULL AND CAST($5 as INT8) IS NOT NULL AND last_event_id = $5\n )\n RETURNING flow_id\n ", + "describe": { + "columns": [ + { + "name": "flow_id", + "ordinal": 0, + "type_info": "Integer" + } + ], + "parameters": { + "Right": 5 + }, + "nullable": [ + false + ] + }, + "hash": "379da668e4d617c8bd2f384a45ac74f18c4e37e81cb05d69181caaff1d6f45fc" +} diff --git a/src/infra/flow-system/sqlite/.sqlx/query-7686af1119ee85b9019d157497b0508f8fdc9f47e03ea87dd34214513b392a3c.json b/src/infra/flow-system/sqlite/.sqlx/query-7686af1119ee85b9019d157497b0508f8fdc9f47e03ea87dd34214513b392a3c.json new file mode 100644 index 0000000000..58d67866c9 --- /dev/null +++ b/src/infra/flow-system/sqlite/.sqlx/query-7686af1119ee85b9019d157497b0508f8fdc9f47e03ea87dd34214513b392a3c.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT f.scheduled_for_activation_at as \"activation_time: _\"\n FROM flows f\n WHERE\n f.scheduled_for_activation_at IS NOT NULL AND\n f.flow_status = 'waiting'\n ORDER BY f.scheduled_for_activation_at ASC\n LIMIT 1\n ", + "describe": { + "columns": [ + { + "name": "activation_time: _", + "ordinal": 0, + "type_info": "Null" + } + ], + "parameters": { + "Right": 0 + }, + "nullable": [ + true + ] + }, + "hash": "7686af1119ee85b9019d157497b0508f8fdc9f47e03ea87dd34214513b392a3c" +} diff --git a/src/infra/flow-system/sqlite/.sqlx/query-c7b1895e06e6920f7868b251c0e92e604e9881d2c02cce177a7e03945c334018.json b/src/infra/flow-system/sqlite/.sqlx/query-c7b1895e06e6920f7868b251c0e92e604e9881d2c02cce177a7e03945c334018.json deleted file mode 100644 index 515d5e55bd..0000000000 --- a/src/infra/flow-system/sqlite/.sqlx/query-c7b1895e06e6920f7868b251c0e92e604e9881d2c02cce177a7e03945c334018.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "SQLite", - "query": "\n UPDATE flows\n SET flow_status = $2, last_event_id = $3\n WHERE flow_id = $1 AND (\n last_event_id IS NULL AND CAST($4 as INT8) IS NULL OR\n last_event_id IS NOT NULL AND CAST($4 as INT8) IS NOT NULL AND last_event_id = $4\n )\n RETURNING flow_id\n ", - "describe": { - "columns": [ - { - "name": "flow_id", - "ordinal": 0, - "type_info": "Integer" - } - ], - "parameters": { - "Right": 4 - }, - "nullable": [ - false - ] - }, - "hash": "c7b1895e06e6920f7868b251c0e92e604e9881d2c02cce177a7e03945c334018" -} diff --git a/src/infra/flow-system/sqlite/.sqlx/query-deeb6fe445c44ad051ad615751e9970c70bd270c7fc4a444039ef7699c7dd95e.json b/src/infra/flow-system/sqlite/.sqlx/query-deeb6fe445c44ad051ad615751e9970c70bd270c7fc4a444039ef7699c7dd95e.json new file mode 100644 index 0000000000..a984670424 --- /dev/null +++ b/src/infra/flow-system/sqlite/.sqlx/query-deeb6fe445c44ad051ad615751e9970c70bd270c7fc4a444039ef7699c7dd95e.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT f.flow_id as flow_id\n FROM flows f\n WHERE\n f.scheduled_for_activation_at = $1 AND\n f.flow_status = 'waiting'\n ORDER BY f.flow_id ASC\n ", + "describe": { + "columns": [ + { + "name": "flow_id", + "ordinal": 0, + "type_info": "Integer" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + false + ] + }, + "hash": "deeb6fe445c44ad051ad615751e9970c70bd270c7fc4a444039ef7699c7dd95e" +} diff --git a/src/infra/flow-system/sqlite/Cargo.toml b/src/infra/flow-system/sqlite/Cargo.toml index 4f058db268..b4ef51fd58 100644 --- a/src/infra/flow-system/sqlite/Cargo.toml +++ b/src/infra/flow-system/sqlite/Cargo.toml @@ -40,6 +40,7 @@ sqlx = { version = "0.8", default-features = false, features = [ "chrono" ] } tokio-stream = { version = "0.1", default-features = false } +tracing = { version = "0.1", default-features = false } [dev-dependencies] database-common-macros = { workspace = true } diff --git a/src/infra/flow-system/sqlite/src/sqlite_flow_event_store.rs b/src/infra/flow-system/sqlite/src/sqlite_flow_event_store.rs index 14c9ee98bc..7e1e7c0508 100644 --- a/src/infra/flow-system/sqlite/src/sqlite_flow_event_store.rs +++ b/src/infra/flow-system/sqlite/src/sqlite_flow_event_store.rs @@ -120,10 +120,16 @@ impl SqliteFlowEventStore { // Determine if we have a status change between these events let mut maybe_latest_status = None; + let mut maybe_scheduled_for_activation_at = None; for event in events { if let Some(new_status) = event.new_status() { maybe_latest_status = Some(new_status); } + if let FlowEvent::ScheduledForActivation(e) = event { + maybe_scheduled_for_activation_at = Some(e.scheduled_for_activation_at); + } else if let FlowEvent::Aborted(_) | FlowEvent::TaskScheduled(_) = event { + maybe_scheduled_for_activation_at = None; + } } // We either have determined the lateststatus, or should read the previous @@ -149,16 +155,17 @@ impl SqliteFlowEventStore { let rows = sqlx::query!( r#" UPDATE flows - SET flow_status = $2, last_event_id = $3 + SET flow_status = $2, last_event_id = $3, scheduled_for_activation_at = $4 WHERE flow_id = $1 AND ( - last_event_id IS NULL AND CAST($4 as INT8) IS NULL OR - last_event_id IS NOT NULL AND CAST($4 as INT8) IS NOT NULL AND last_event_id = $4 + last_event_id IS NULL AND CAST($5 as INT8) IS NULL OR + last_event_id IS NOT NULL AND CAST($5 as INT8) IS NOT NULL AND last_event_id = $5 ) RETURNING flow_id "#, flow_id, latest_status, last_event_id, + maybe_scheduled_for_activation_at, maybe_prev_stored_event_id, ) .fetch_all(connection_mut) @@ -542,6 +549,68 @@ impl FlowEventStore for SqliteFlowEventStore { }) } + /// Returns nearest time when one or more flows are scheduled for activation + async fn nearest_flow_activation_moment(&self) -> Result>, InternalError> { + let mut tr = self.transaction.lock().await; + + #[derive(Debug, sqlx::FromRow, PartialEq, Eq)] + #[allow(dead_code)] + pub struct ActivationRow { + pub activation_time: Option>, + } + + let connection_mut = tr.connection_mut().await?; + let maybe_activation_time = sqlx::query_as!( + ActivationRow, + r#" + SELECT f.scheduled_for_activation_at as "activation_time: _" + FROM flows f + WHERE + f.scheduled_for_activation_at IS NOT NULL AND + f.flow_status = 'waiting' + ORDER BY f.scheduled_for_activation_at ASC + LIMIT 1 + "#, + ) + .map(|result| { + result + .activation_time + .expect("NULL values filtered by query") + }) + .fetch_optional(connection_mut) + .await + .int_err()?; + + Ok(maybe_activation_time) + } + + /// Returns flows scheduled for activation at the given time + async fn get_flows_scheduled_for_activation_at( + &self, + scheduled_for_activation_at: DateTime, + ) -> Result, InternalError> { + let mut tr = self.transaction.lock().await; + + let connection_mut = tr.connection_mut().await?; + let flow_ids = sqlx::query!( + r#" + SELECT f.flow_id as flow_id + FROM flows f + WHERE + f.scheduled_for_activation_at = $1 AND + f.flow_status = 'waiting' + ORDER BY f.flow_id ASC + "#, + scheduled_for_activation_at, + ) + .map(|row| FlowID::try_from(row.flow_id).unwrap()) + .fetch_all(connection_mut) + .await + .int_err()?; + + Ok(flow_ids) + } + fn get_all_flow_ids_by_dataset( &self, dataset_id: &DatasetID, diff --git a/src/infra/flow-system/sqlite/tests/tests/test_sqlite_flow_event_store.rs b/src/infra/flow-system/sqlite/tests/tests/test_sqlite_flow_event_store.rs index 115c676aa0..28769ae920 100644 --- a/src/infra/flow-system/sqlite/tests/tests/test_sqlite_flow_event_store.rs +++ b/src/infra/flow-system/sqlite/tests/tests/test_sqlite_flow_event_store.rs @@ -262,6 +262,33 @@ database_transactional_test!( harness = SqliteFlowEventStoreHarness ); +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +database_transactional_test!( + storage = sqlite, + fixture = + kamu_flow_system_repo_tests::test_flow_event_store::test_flow_activation_visibility_at_different_stages_through_success_path, + harness = SqliteFlowEventStoreHarness +); + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +database_transactional_test!( + storage = sqlite, + fixture = + kamu_flow_system_repo_tests::test_flow_event_store::test_flow_activation_visibility_when_aborted_before_activation, + harness = SqliteFlowEventStoreHarness +); + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +database_transactional_test!( + storage = sqlite, + fixture = + kamu_flow_system_repo_tests::test_flow_event_store::test_flow_activation_multiple_flows, + harness = SqliteFlowEventStoreHarness +); + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Harness //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/utils/time-source/tests/tests/test_time_source.rs b/src/utils/time-source/tests/tests/test_time_source.rs index 15c1db701a..a517c820b8 100644 --- a/src/utils/time-source/tests/tests/test_time_source.rs +++ b/src/utils/time-source/tests/tests/test_time_source.rs @@ -23,12 +23,12 @@ async fn test_fake_sleep_stable_order() { let system_time_source = FakeSystemTimeSource::new(t0); let mut sleep_futures = vec![ - Some(system_time_source.sleep(Duration::try_seconds(120).unwrap())), // 1 - Some(system_time_source.sleep(Duration::try_seconds(60).unwrap())), // 2 - Some(system_time_source.sleep(Duration::try_seconds(90).unwrap())), // 3 - Some(system_time_source.sleep(Duration::try_seconds(120).unwrap())), // 4 - Some(system_time_source.sleep(Duration::try_seconds(150).unwrap())), // 5 - Some(system_time_source.sleep(Duration::try_seconds(60).unwrap())), // 6 + Some(system_time_source.sleep(Duration::seconds(120))), // 1 + Some(system_time_source.sleep(Duration::seconds(60))), // 2 + Some(system_time_source.sleep(Duration::seconds(90))), // 3 + Some(system_time_source.sleep(Duration::seconds(120))), // 4 + Some(system_time_source.sleep(Duration::seconds(150))), // 5 + Some(system_time_source.sleep(Duration::seconds(60))), // 6 ]; assert_eq!( @@ -36,9 +36,9 @@ async fn test_fake_sleep_stable_order() { [false, false, false, false, false, false] ); - let dt = Duration::try_seconds(150).unwrap(); + let dt = Duration::seconds(150); let t = t0 + dt; - let ready_future_ids = system_time_source.advance(Duration::try_seconds(150).unwrap()); + let ready_future_ids = system_time_source.advance(Duration::seconds(150)); assert_eq!( check_futures_for_completion(&mut sleep_futures), @@ -53,7 +53,7 @@ async fn test_fake_sleep_without_simulate_time_passage() { let t0 = point_in_time_in_a_parallel_universe(); let system_time_source = FakeSystemTimeSource::new(t0); - let wake_after_1_sec_fut = system_time_source.sleep(Duration::try_seconds(1).unwrap()); + let wake_after_1_sec_fut = system_time_source.sleep(Duration::seconds(1)); let sleep_result_or_timeout = timeout(StdDuration::from_millis(50), wake_after_1_sec_fut).await; assert_matches!(sleep_result_or_timeout, Err(_)); @@ -65,13 +65,11 @@ async fn test_fake_sleep_with_lacking_simulate_time_passage() { let t0 = point_in_time_in_a_parallel_universe(); let system_time_source = FakeSystemTimeSource::new(t0); - let mut sleep_futures = vec![Some( - system_time_source.sleep(Duration::try_seconds(60).unwrap()), - )]; + let mut sleep_futures = vec![Some(system_time_source.sleep(Duration::seconds(60)))]; assert_eq!(check_futures_for_completion(&mut sleep_futures), [false]); - let dt = Duration::try_seconds(30).unwrap(); + let dt = Duration::seconds(30); let t = t0 + dt; system_time_source.advance(dt); @@ -91,10 +89,10 @@ async fn test_fake_sleep_with_several_simulate_time_passage() { // - c) 120 seconds // - d) 120 seconds let mut sleep_futures = vec![ - Some(system_time_source.sleep(Duration::try_seconds(60).unwrap())), - Some(system_time_source.sleep(Duration::try_seconds(90).unwrap())), - Some(system_time_source.sleep(Duration::try_seconds(120).unwrap())), - Some(system_time_source.sleep(Duration::try_seconds(120).unwrap())), + Some(system_time_source.sleep(Duration::seconds(60))), + Some(system_time_source.sleep(Duration::seconds(90))), + Some(system_time_source.sleep(Duration::seconds(120))), + Some(system_time_source.sleep(Duration::seconds(120))), ]; assert_eq!( @@ -107,7 +105,7 @@ async fn test_fake_sleep_with_several_simulate_time_passage() { // - b) +00:30/01:30 - waiting // - c) +00:30/02:00 - waiting // - d) +00:30/02:00 - waiting - let dt = Duration::try_seconds(30).unwrap(); + let dt = Duration::seconds(30); let t = t0 + dt; system_time_source.advance(dt); @@ -123,7 +121,7 @@ async fn test_fake_sleep_with_several_simulate_time_passage() { // - b) +01:00/01:30 - waiting // - c) +01:00/02:00 - waiting // - d) +01:00/02:00 - waiting - let dt = Duration::try_seconds(30).unwrap(); + let dt = Duration::seconds(30); let t = t + dt; system_time_source.advance(dt); @@ -139,7 +137,7 @@ async fn test_fake_sleep_with_several_simulate_time_passage() { // - b) +01:30/01:30 - done // - c) +01:30/02:00 - waiting // - d) +01:30/02:00 - waiting - let dt = Duration::try_seconds(30).unwrap(); + let dt = Duration::seconds(30); let t = t + dt; system_time_source.advance(dt); @@ -155,7 +153,7 @@ async fn test_fake_sleep_with_several_simulate_time_passage() { // - b) +01:30/01:30 - done (before) // - c) +01:30/02:00 - done // - d) +01:30/02:00 - done - let dt = Duration::try_seconds(30).unwrap(); + let dt = Duration::seconds(30); let t = t + dt; system_time_source.advance(dt); @@ -175,8 +173,8 @@ async fn test_fake_sleep_with_simulate_exceeding_passage() { // - a) 60 seconds // - b) 90 seconds let mut sleep_futures = vec![ - Some(system_time_source.sleep(Duration::try_seconds(60).unwrap())), - Some(system_time_source.sleep(Duration::try_seconds(90).unwrap())), + Some(system_time_source.sleep(Duration::seconds(60))), + Some(system_time_source.sleep(Duration::seconds(90))), ]; assert_eq!( @@ -187,7 +185,7 @@ async fn test_fake_sleep_with_simulate_exceeding_passage() { // 2. Simulate +90 seconds: // - a) +01:30/01:00 - 30-second overrun // - b) +01:30/01:30 - done - let dt = Duration::try_seconds(90).unwrap(); + let dt = Duration::seconds(90); let t = t0 + dt; system_time_source.advance(dt);