From 4fa74735125c7efaf19f6424f0f8e5661f9400da Mon Sep 17 00:00:00 2001 From: Cristian Herghelegiu Date: Mon, 6 Jan 2025 23:06:49 +0200 Subject: [PATCH 01/12] Add multiple features for jobs engine like parent child dependencies, throttling with sleep between requests, timeout for processing --- examples/examples_jobs_engine.h | 104 +++++++++++++++++++++++++++----- include/impl/jobs_item_impl.h | 1 + include/jobs_config.h | 9 +++ include/jobs_engine.h | 14 +++++ 4 files changed, 112 insertions(+), 16 deletions(-) diff --git a/examples/examples_jobs_engine.h b/examples/examples_jobs_engine.h index faa97cb..5620a21 100644 --- a/examples/examples_jobs_engine.h +++ b/examples/examples_jobs_engine.h @@ -16,14 +16,36 @@ namespace examples::jobs_engine { { std::cout << "Jobs Engine example 1\n"; + // + // create a complex example for each type 1,2,3 (like web requests) + // - priorities + // - dependencies calls (parent-child relationship) + // (with children for calls that take long time like database access or less time like cache server) + // - coalesce calls (multiple (later) calls resolved when first one is called) + // - timeout + // - throttle (call with some wait-sleep interval in between) + // + // type1 will create 2 children and will be finished when at least 1 is finished + // (this will demonstrate OR and custom children function) + // type2 will create 2 children and will be finished whenn ALL children are finished + // (this will demonstrate the default case AND and default children function) + // type3 will have no children and one call will timeout + // enum class JobsType { kJobsType1, - kJobsType2 + kJobsType2, + kJobsType3, + kJobsDatabase, + kJobsCache, }; + enum class JobsGroupType { - kJobsGroup1 + kJobsGroup12, + kJobsGroup3, + kJobsGroupDatabase, + kJobsGroupCache, }; using Request = std::pair; @@ -34,23 +56,35 @@ namespace examples::jobs_engine { std::cout << "this function is defined without the engine params, called for " << (int)items[0]->type << "\n"; }; + // + // config object for priorities, groups, types, threads, timeouts, sleeps, etc + // JobsEng::JobsConfig config{ - .m_engine = {.m_threads_count = 0 /*dont start any thread yet*/, - .m_config_prio = {.priorities = {{small::EnumPriorities::kHighest, 2}, - {small::EnumPriorities::kHigh, 2}, - {small::EnumPriorities::kNormal, 2}, - {small::EnumPriorities::kLow, 1}}}}, // overall config with default priorities - .m_default_processing_function = jobs_processing_function, // default processing function, better use jobs.add_default_processing_function to set it - .m_groups = { - {JobsGroupType::kJobsGroup1, {.m_threads_count = 1}}}, // config by jobs group - .m_types = { - {JobsType::kJobsType1, {.m_group = JobsGroupType::kJobsGroup1}}, - {JobsType::kJobsType2, {.m_group = JobsGroupType::kJobsGroup1}}, - }}; - - // create jobs engine + .m_engine = {.m_threads_count = 0 /*dont start any thread yet*/, // TODO add thread count for wait_for_children processing and finished, default 1/2 here use 1 due to coalesce + .m_config_prio = {.priorities = {{small::EnumPriorities::kHighest, 2}, + {small::EnumPriorities::kHigh, 2}, + {small::EnumPriorities::kNormal, 2}, + {small::EnumPriorities::kLow, 1}}}}, // overall config with default priorities + + .m_default_processing_function = jobs_processing_function, // default processing function, better use jobs.add_default_processing_function to set it + + .m_groups = {{JobsGroupType::kJobsGroup12, {.m_threads_count = 1}}, // config by jobs group // TODO add sleep_between_requests + {JobsGroupType::kJobsGroup3, {.m_threads_count = 1}}, + {JobsGroupType::kJobsGroupDatabase, {.m_threads_count = 1}}, + {JobsGroupType::kJobsGroupCache, {.m_threads_count = 1}}}, + + .m_types = {{JobsType::kJobsType1, {.m_group = JobsGroupType::kJobsGroup12}}, // + {JobsType::kJobsType2, {.m_group = JobsGroupType::kJobsGroup12}}, // + {JobsType::kJobsType3, {.m_group = JobsGroupType::kJobsGroup3}}, // TODO add timeout for job + {JobsType::kJobsDatabase, {.m_group = JobsGroupType::kJobsGroupDatabase}}, // TODO add timeout for job + {JobsType::kJobsCache, {.m_group = JobsGroupType::kJobsGroupCache}}}}; // TODO add timeout for job + + // + // create jobs engine with the above config + // JobsEng jobs(config); + // TODO add config as param so the sleep after can be overridden jobs.add_default_processing_function([](auto &j /*this jobs engine*/, const auto &jobs_items) { for (auto &item : jobs_items) { std::cout << "thread " << std::this_thread::get_id() @@ -67,6 +101,7 @@ namespace examples::jobs_engine { small::sleep(30); }); + // TODO add config as param so the sleep after can be overridden // add specific function for job1 (calling the function from jobs intead of config allows to pass the engine and extra param) jobs.add_job_processing_function(JobsType::kJobsType1, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto b /*extra param b*/) { for (auto &item : jobs_items) { @@ -80,12 +115,49 @@ namespace examples::jobs_engine { << " ref count " << item.use_count() << " time " << small::toISOString(small::timeNow()) << "\n"; + // TODO add 2 more children jobs for current one for database and server cache + // TODO save somewhere in an unordered_map the database requests - the problem is that jobid is received after push_jobs + // TODO save type1 requests into a promises unordered_map + // TODO for type 2 only database (add another processing function) } small::sleep(30); }, 5 /*param b*/); + // TODO daca as vrea sa folosesc un alt job_server cum modelez asa incat jobul dintr-o parte sa ramana intr-o stare ca si cand ar avea copii si + // TODO sa se face un request in alta parte si ala cand se termina pe finish (sau daca e worker thread in functia de procesare) sa faca set state + // TODO set state merge daca e doar o dependinta, daca sunt mai multe atunci ar tb o functie custom - childProcessing (desi are sau nu are children - sau cum fac un dummy children - poate cu thread_count 0?) + + // add specific function for job2 + jobs.add_job_processing_function(JobsType::kJobsType2, [](auto &j /*this jobs engine*/, const auto &jobs_items) { + for (auto &item : jobs_items) { + std::cout << "thread " << std::this_thread::get_id() + << " JOB2 processing " + << "{" + << " type=" << (int)item->type + << " req.int=" << item->request.first << "," + << " req.str=\"" << item->request.second << "\"" + << "}" + << " ref count " << item.use_count() + << " time " << small::toISOString(small::timeNow()) + << "\n"; + // TODO for type 2 only database children (add another processing function) + } + // TODO config to wait after request (even if it is not specified in the global config - so custom throttle) + small::sleep(30); }); + + // TODO add function for database where demonstrate coalesce of 3 items (sleep 1000) + // TODO add function for cache server - no coalesce for demo purposes (sleep 500) so 3rd parent items is finished due to database and not cache server + + // TODO add function for custom wait children and demonstrate set progress to another item + // TODO add function for custom finished (for type1 to set the promises completed) + JobsEng::JobsID jobs_id{}; std::vector jobs_ids; + // TODO create a promises/futures unordered_map for type1 requests and wait later + + // show coalesce for children database requests + std::unordered_map web_requests; + // push jobs.queue().push_back(small::EnumPriorities::kNormal, JobsType::kJobsType1, {1, "normal"}, &jobs_id); jobs.queue().push_back(small::EnumPriorities::kHigh, JobsType::kJobsType2, {2, "high"}, &jobs_id); diff --git a/include/impl/jobs_item_impl.h b/include/impl/jobs_item_impl.h index 684d5e1..01e6bed 100644 --- a/include/impl/jobs_item_impl.h +++ b/include/impl/jobs_item_impl.h @@ -33,6 +33,7 @@ namespace small::jobsimpl { std::atomic progress{}; // progress 0-100 for state kInProgress JobsRequestT request{}; // request needed for processing function JobsResponseT response{}; // where the results are saved (for the finished callback if exists) + // TODO add parents and children ids explicit jobs_item() = default; explicit jobs_item(const JobsID &jobs_id, const JobsTypeT &jobs_type, const JobsRequestT &jobs_request) diff --git a/include/jobs_config.h b/include/jobs_config.h index 81d95dc..6c6a60f 100644 --- a/include/jobs_config.h +++ b/include/jobs_config.h @@ -18,6 +18,8 @@ namespace small { { using JobsItem = typename small::jobsimpl::jobs_item; using ProcessingFunction = std::function> &)>; + // TODO add WaitChildrenFunction + // TODO add FinishedFunction // config for the entire jobs engine struct ConfigJobsEngine @@ -32,6 +34,8 @@ namespace small { JobsGroupT m_group{}; // job type group (multiple job types can be configured to same group) bool m_has_processing_function{false}; // use default processing function ProcessingFunction m_processing_function{}; // processing Function + // TODO add WaitChildrenFunction + // TODO add FinishedFunction }; // config for the job group (where job types can be grouped) @@ -62,6 +66,8 @@ namespace small { it_f->second.m_has_processing_function = true; it_f->second.m_processing_function = processing_function; } + // TODO add job WaitChildrenFunction + // TODO add job FinishedFunction inline void apply_default_processing_function() { @@ -71,5 +77,8 @@ namespace small { } } } + + // TODO apply WaitChildrenFunction - the function must be passed as parameter from engine + // TODO apply FinishedFunction - the function must be passed as parameter from engine }; } // namespace small diff --git a/include/jobs_engine.h b/include/jobs_engine.h index 9fb17b0..93bd940 100644 --- a/include/jobs_engine.h +++ b/include/jobs_engine.h @@ -252,6 +252,8 @@ namespace small { // setup jobs types m_config.apply_default_processing_function(); + // TODO apply default WaitChildrenFunction - pass the function as parameter using std::bind(this) + // TODO apply default FinishedFunction - pass the function as parameter using std::bind(this) for (auto &[jobs_type, jobs_type_config] : m_config.m_types) { m_queue.add_jobs_type(jobs_type, jobs_type_config.m_group); } @@ -301,6 +303,7 @@ namespace small { for (auto &jobs_item : jobs_items) { elems_by_type[jobs_item->type].reserve(jobs_items.size()); elems_by_type[jobs_item->type].push_back(jobs_item); + // TODO mark the items as in progress } } @@ -311,17 +314,28 @@ namespace small { continue; } + // TODO pass the config parameter // process specific jobs by type it_cfg_type->second.m_processing_function(jobs); + + // TODO marks the items as either wait for children (if it has children) or finished + // TODO put in proper thread for processing children and finished work (1/2 thread(s) for each - better to have a config for it?) + // TODO the worker thread is configured for jobgroup, children and finished are not part of those - a solution is to add a pair or internal_group } + // TODO move this delete on the finished thread for (auto &jobs_id : vec_ids) { m_queue.jobs_del(jobs_id); } + // TODO for sleep after requests use worker_thread delay item -> check if has_items should be set properly + // TODO if sleep is set set has_items to true to force the sleep, but also a last time sleep so if there too much time and are no items dont continue + return ret; } + // TODO external set state for a job moves it to proper wait for children or finished + // // inner function for activate the jobs from queue // From b74225b7b71dd68888c6c01bbc4f6d5d057beca7 Mon Sep 17 00:00:00 2001 From: Cristian Herghelegiu Date: Tue, 7 Jan 2025 21:03:32 +0200 Subject: [PATCH 02/12] Add multiple features for jobs engine like parent child dependencies, throttling with sleep between requests, timeout for processing --- README.md | 4 +- include/jobs_config.h | 135 +++++++++++++++++++++++++++++++----------- 2 files changed, 104 insertions(+), 35 deletions(-) diff --git a/README.md b/README.md index 0e2a308..622f49b 100644 --- a/README.md +++ b/README.md @@ -427,7 +427,7 @@ JobsEng::JobsConfig config{ // create jobs engine JobsEng jobs(config); ... -jobs.add_default_processing_function([](auto &j /*this jobs engine*/, const auto &jobs_items) { +jobs.add_default_function_processing([](auto &j /*this jobs engine*/, const auto &jobs_items) { for (auto &item : jobs_items) { ... } @@ -435,7 +435,7 @@ jobs.add_default_processing_function([](auto &j /*this jobs engine*/, const auto }); ... // add specific function for job1 (calling the function from jobs intead of config allows to pass the engine and extra param) -jobs.add_job_processing_function(JobsType::kJobsType1, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto b /*extra param b*/) { +jobs.add_job_function_processing(JobsType::kJobsType1, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto b /*extra param b*/) { for (auto &item : jobs_items) { ... } diff --git a/include/jobs_config.h b/include/jobs_config.h index 6c6a60f..a0f1770 100644 --- a/include/jobs_config.h +++ b/include/jobs_config.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include "prio_queue.h" @@ -16,69 +17,137 @@ namespace small { template struct jobs_config { - using JobsItem = typename small::jobsimpl::jobs_item; - using ProcessingFunction = std::function> &)>; - // TODO add WaitChildrenFunction - // TODO add FinishedFunction + using JobsItem = typename small::jobsimpl::jobs_item; // config for the entire jobs engine struct ConfigJobsEngine { - int m_threads_count{8}; // how many total threads for processing + int m_threads_count{8}; // how many total threads for processing + int m_threads_count_finished{2}; // how many threads (out of total m_threads_count) to use for processing finished states small::config_prio_queue m_config_prio{}; }; + // config for the job group (where job types can be grouped) + struct ConfigJobsGroup + { + int m_threads_count{1}; // how many threads for processing (out of the global threads) + int m_bulk_count{1}; // how many objects are processed at once + std::optional m_delay_next_request{}; // if need to delay the next request processing to have some throtelling + }; + + // to be passed to processing function + struct ConfigProcessing + { + std::optional m_delay_next_request{}; // if need to delay the next request processing to have some throtelling + }; + + // functions + using FunctionProcessing = std::function> &, ConfigProcessing &config)>; + using FunctionOnChildrenFinished = std::function> &)>; + using FunctionFinished = std::function> &)>; + // config for an individual job type struct ConfigJobsType { - JobsGroupT m_group{}; // job type group (multiple job types can be configured to same group) - bool m_has_processing_function{false}; // use default processing function - ProcessingFunction m_processing_function{}; // processing Function - // TODO add WaitChildrenFunction - // TODO add FinishedFunction + JobsGroupT m_group{}; // job type group (multiple job types can be configured to same group) + std::optional m_timeout{}; // if need to delay the next request processing to have some throtelling + bool m_has_function_processing{false}; // use default processing function + bool m_has_function_on_children_finished{false}; // use default function for children finished + bool m_has_function_finished{false}; // use default finished function + FunctionProcessing m_function_processing{}; // processing Function for jobs items + FunctionOnChildrenFinished m_function_on_children_finished{}; // function called for a parent when a child is finished + FunctionFinished m_function_finished{}; // function called when jobs items are finished }; - // config for the job group (where job types can be grouped) - struct ConfigJobsGroup + ConfigJobsEngine m_engine{}; // config for entire engine (threads, priorities, etc) + FunctionProcessing m_default_function_processing{}; // default processing function + FunctionOnChildrenFinished m_default_function_on_children_finished{}; // default function to call for a parent when children are finished + FunctionFinished m_default_function_finished{}; // default function to call when jobs items are finished + std::unordered_map m_groups; // config by jobs group + std::unordered_map m_types; // config by jobs type + + // + // add default processing function + // + inline void add_default_function_processing(FunctionProcessing function_processing) { - int m_threads_count{1}; // how many threads for processing (out of the global threads) - int m_bulk_count{1}; // how many objects are processed at once - }; + m_default_function_processing = function_processing; + apply_default_function_processing(); + } - ConfigJobsEngine m_engine{}; // config for entire engine (threads, priorities, etc) - ProcessingFunction m_default_processing_function{}; // default processing function - std::unordered_map m_groups; // config by jobs group - std::unordered_map m_types; // config by jobs type + inline void add_default_function_on_children_finished(FunctionOnChildrenFinished function_on_children_finished) + { + m_default_function_on_children_finished = function_on_children_finished; + apply_default_function_on_children_finished(); + } - // default processing function - inline void add_default_processing_function(ProcessingFunction processing_function) + inline void add_default_function_finished(FunctionFinished function_finished) { - m_default_processing_function = processing_function; - apply_default_processing_function(); + m_default_function_finished = function_finished; + apply_default_function_finished(); } - inline void add_job_processing_function(const JobsTypeT &jobs_type, ProcessingFunction processing_function) + // + // add job functions + // + inline void add_job_function_processing(const JobsTypeT &jobs_type, FunctionProcessing function_processing) { auto it_f = m_types.find(jobs_type); if (it_f == m_types.end()) { return; } - it_f->second.m_has_processing_function = true; - it_f->second.m_processing_function = processing_function; + it_f->second.m_has_function_processing = true; + it_f->second.m_function_processing = function_processing; } - // TODO add job WaitChildrenFunction - // TODO add job FinishedFunction - inline void apply_default_processing_function() + inline void add_job_function_on_children_finished(const JobsTypeT &jobs_type, FunctionOnChildrenFinished function_on_children_finished) + { + auto it_f = m_types.find(jobs_type); + if (it_f == m_types.end()) { + return; + } + it_f->second.m_has_function_on_children_finished = true; + it_f->second.m_function_on_children_finished = function_on_children_finished; + } + + inline void add_job_function_finished(const JobsTypeT &jobs_type, FunctionFinished function_finished) + { + auto it_f = m_types.find(jobs_type); + if (it_f == m_types.end()) { + return; + } + it_f->second.m_has_function_finished = true; + it_f->second.m_function_finished = function_finished; + } + + // + // apply default function where it is not set a specific one + // + inline void apply_default_function_processing() + { + for (auto &[type, jobs_type_config] : m_types) { + if (jobs_type_config.m_has_function_processing == false) { + jobs_type_config.m_function_processing = m_default_function_processing; + } + } + } + + inline void apply_default_function_on_children_finished() { for (auto &[type, jobs_type_config] : m_types) { - if (jobs_type_config.m_has_processing_function == false) { - jobs_type_config.m_processing_function = m_default_processing_function; + if (jobs_type_config.m_has_function_on_children_finished == false) { + jobs_type_config.m_function_on_children_finished = m_default_function_on_children_finished; } } } - // TODO apply WaitChildrenFunction - the function must be passed as parameter from engine - // TODO apply FinishedFunction - the function must be passed as parameter from engine + inline void apply_default_function_finished() + { + for (auto &[type, jobs_type_config] : m_types) { + if (jobs_type_config.m_has_function_finished == false) { + jobs_type_config.m_function_finished = m_default_function_finished; + } + } + } }; } // namespace small From b4a8feef3db1e7c08c0f2ca9dfcf34107d65c220 Mon Sep 17 00:00:00 2001 From: Cristian Herghelegiu Date: Tue, 7 Jan 2025 21:04:50 +0200 Subject: [PATCH 03/12] Add multiple features for jobs engine like parent child dependencies, throttling with sleep between requests, timeout for processing --- include/impl/jobs_item_impl.h | 78 ++++++++++++++++++++++------------- include/worker_thread.h | 6 +-- 2 files changed, 52 insertions(+), 32 deletions(-) diff --git a/include/impl/jobs_item_impl.h b/include/impl/jobs_item_impl.h index 01e6bed..8d32341 100644 --- a/include/impl/jobs_item_impl.h +++ b/include/impl/jobs_item_impl.h @@ -10,15 +10,16 @@ #include "../base_lock.h" namespace small::jobsimpl { - // a job can be in the following states + // a job can be in the following states (order is important because it may progress only to higher states) enum class EnumJobsState : unsigned int { kNone = 0, kInProgress, + kWaitChildren, kFinished, + kTimeout, kFailed, kCancelled, - kTimeout }; // a job item @@ -27,44 +28,49 @@ namespace small::jobsimpl { { using JobsID = unsigned long long; - JobsID id{}; // job unique id - JobsTypeT type{}; // job type - std::atomic state{EnumJobsState::kNone}; // job state - std::atomic progress{}; // progress 0-100 for state kInProgress - JobsRequestT request{}; // request needed for processing function - JobsResponseT response{}; // where the results are saved (for the finished callback if exists) - // TODO add parents and children ids + JobsID m_id{}; // job unique id + JobsTypeT m_type{}; // job type + std::atomic m_state{EnumJobsState::kNone}; // job state + std::atomic m_progress{}; // progress 0-100 for state kInProgress + std::vector m_parentIDs{}; // for dependencies relationships parent-child + std::vector m_childrenIDs{}; // for dependencies relationships parent-child + JobsRequestT m_request{}; // request needed for processing function + JobsResponseT m_response{}; // where the results are saved (for the finished callback if exists) explicit jobs_item() = default; explicit jobs_item(const JobsID &jobs_id, const JobsTypeT &jobs_type, const JobsRequestT &jobs_request) - : id(jobs_id), type(jobs_type), request(jobs_request) {} + : m_id(jobs_id), m_type(jobs_type), m_request(jobs_request) {} explicit jobs_item(const JobsTypeT &jobs_type, const JobsRequestT &jobs_request) - : type(jobs_type), request(jobs_request) {} + : m_type(jobs_type), m_request(jobs_request) {} explicit jobs_item(const JobsID &jobs_id, const JobsTypeT &jobs_type, JobsRequestT &&jobs_request) - : id(jobs_id), type(jobs_type), request(std::forward(jobs_request)) {} + : m_id(jobs_id), m_type(jobs_type), m_request(std::forward(jobs_request)) {} explicit jobs_item(const JobsTypeT &jobs_type, JobsRequestT &&jobs_request) - : type(jobs_type), request(std::forward(jobs_request)) {} + : m_type(jobs_type), m_request(std::forward(jobs_request)) {} jobs_item(const jobs_item &other) { operator=(other); }; jobs_item(jobs_item &&other) noexcept { operator=(other); }; jobs_item &operator=(const jobs_item &other) { - id = other.id; - type = other.type; - state = other.state.load(); - progress = other.progress.load(); - request = other.request; - response = other.response; + m_id = other.m_id; + m_type = other.m_type; + m_state = other.m_state.load(); + m_progress = other.m_progress.load(); + m_parentIDs = other.m_parentIDs; + m_childrenIDs = other.m_childrenIDs; + m_request = other.m_request; + m_response = other.m_response; return *this; } jobs_item &operator=(jobs_item &&other) noexcept { - id = std::move(other.id); - type = std::move(other.type); - state = other.state.load(); - progress = other.progress.load(); - request = std::move(other.request); - response = std::move(other.response); + m_id = std::move(other.m_id); + m_type = std::move(other.m_type); + m_state = other.m_state.load(); + m_progress = other.m_progress.load(); + m_parentIDs = std::move(other.m_parentIDs); + m_childrenIDs = std::move(other.m_childrenIDs); + m_request = std::move(other.m_request); + m_response = std::move(other.m_response); return *this; } @@ -74,27 +80,41 @@ namespace small::jobsimpl { inline void set_state(const EnumJobsState &new_state) { for (;;) { - EnumJobsState current_state = state.load(); + EnumJobsState current_state = m_state.load(); if (current_state >= new_state) { return; } - if (state.compare_exchange_weak(current_state, new_state)) { + if (m_state.compare_exchange_weak(current_state, new_state)) { return; } } } + // clang-format off + inline void set_state_inprogress () { set_state(EnumJobsState::kInProgress); } + inline void set_state_waitchildren () { set_state(EnumJobsState::kWaitChildren); } + inline void set_state_finished () { set_state(EnumJobsState::kFinished); } + inline void set_state_timeout () { set_state(EnumJobsState::kTimeout); } + inline void set_state_failed () { set_state(EnumJobsState::kFailed); } + inline void set_state_cancelled () { set_state(EnumJobsState::kCancelled); } + + inline bool is_state_inprogress () { return m_state.load() == EnumJobsState::kInProgress; } + inline bool is_state_finished () { return m_state.load() == EnumJobsState::kFinished; } + inline bool is_state_timeout () { return m_state.load() == EnumJobsState::kTimeout; } + // clang-format on + // // set job progress (can only increase) // + inline void set_progress(const int &new_progress) { for (;;) { - int current_progress = progress.load(); + int current_progress = m_progress.load(); if (current_progress >= new_progress) { return; } - if (progress.compare_exchange_weak(current_progress, new_progress)) { + if (m_progress.compare_exchange_weak(current_progress, new_progress)) { return; } } diff --git a/include/worker_thread.h b/include/worker_thread.h index d110e44..057f9b1 100644 --- a/include/worker_thread.h +++ b/include/worker_thread.h @@ -77,7 +77,7 @@ namespace small { template worker_thread(const config_worker_thread config, _Callable function, Args... extra_parameters) : m_config(config), - m_processing_function(std::bind(std::forward<_Callable>(function), std::ref(*this), std::placeholders::_1 /*item*/, std::forward(extra_parameters)...)) + m_function_processing(std::bind(std::forward<_Callable>(function), std::ref(*this), std::placeholders::_1 /*item*/, std::forward(extra_parameters)...)) { // auto start threads if count > 0 otherwise threads should be manually started if (config.threads_count) { @@ -310,7 +310,7 @@ namespace small { // callback for queue_items inline void process_items(std::vector &&items) { - m_processing_function(std::forward>(items)); // bind the std::placeholders::_1 + m_function_processing(std::forward>(items)); // bind the std::placeholders::_1 } private: @@ -320,6 +320,6 @@ namespace small { config_worker_thread m_config; // config small::lock_queue_thread> m_queue_items{*this}; // queue of items small::time_queue_thread> m_delayed_items{*this}; // queue of delayed items - std::function &)> m_processing_function{}; // processing Function + std::function &)> m_function_processing{}; // processing Function }; } // namespace small From 164c3d165ea181a1ea98fe4956881b4b8bc3e245 Mon Sep 17 00:00:00 2001 From: Cristian Herghelegiu Date: Tue, 7 Jan 2025 21:07:10 +0200 Subject: [PATCH 04/12] Add multiple features for jobs engine like parent child dependencies, throttling with sleep between requests, timeout for processing --- include/impl/jobs_queue_impl.h | 69 ++++++++++++++++++++++++++++------ 1 file changed, 57 insertions(+), 12 deletions(-) diff --git a/include/impl/jobs_queue_impl.h b/include/impl/jobs_queue_impl.h index b3007ac..074ef56 100644 --- a/include/impl/jobs_queue_impl.h +++ b/include/impl/jobs_queue_impl.h @@ -9,7 +9,7 @@ namespace small::jobsimpl { // - // small queue helper class for jobs (parent caller must implement 'jobs_activate') + // small queue helper class for jobs (parent caller must implement 'jobs_activate', 'jobs_finished') // template class jobs_queue @@ -92,7 +92,7 @@ namespace small::jobsimpl { // config job types // m_types_queues will be initialized in the initial setup phase and will be accessed without locking afterwards // - inline bool add_jobs_type(const JobsTypeT &jobs_type, const JobsGroupT &jobs_group) + inline bool add_jobs_type(const JobsTypeT &jobs_type, const JobsGroupT &jobs_group, const std::optional &jobs_timeout) { auto it_g = m_groups_queues.find(jobs_group); if (it_g == m_groups_queues.end()) { @@ -100,6 +100,9 @@ namespace small::jobsimpl { } m_types_queues[jobs_type] = &it_g->second; + if (jobs_timeout) { + m_types_timeouts[jobs_type] = *jobs_timeout; + } return true; } @@ -123,7 +126,7 @@ namespace small::jobsimpl { *jobs_id = id; } - return jobs_activate(priority, jobs_item->type, id); + return jobs_activate(priority, jobs_item->m_type, id); } inline std::size_t push_back(const JobsPrioT &priority, const std::vector> &jobs_items, std::vector *jobs_ids) @@ -176,7 +179,7 @@ namespace small::jobsimpl { if (jobs_id) { *jobs_id = id; } - return m_delayed_items.queue().push_delay_for(__rtime, {priority, jobs_item->type, id}); + return m_delayed_items.queue().push_delay_for(__rtime, {priority, jobs_item->m_type, id}); } template @@ -197,7 +200,7 @@ namespace small::jobsimpl { if (jobs_id) { *jobs_id = id; } - return m_delayed_items.queue().push_delay_until(__atime, {priority, jobs_item->type, id}); + return m_delayed_items.queue().push_delay_until(__atime, {priority, jobs_item->m_type, id}); } inline std::size_t push_back_delay_until(const std::chrono::time_point &__atime, const JobsPrioT &priority, const JobsTypeT &jobs_type, JobsRequestT &&jobs_req, JobsID *jobs_id = nullptr) @@ -319,6 +322,26 @@ namespace small::jobsimpl { m_jobs.erase(jobs_id); } + // set the jobs as timeout if it is not finished until now + inline std::vector> jobs_timeout(std::vector &&jobs_ids) + { + std::vector> jobs_items = jobs_get(jobs_ids); + std::vector> timeout_items; + timeout_items.reserve(jobs_items.size()); + + for (auto &jobs_item : jobs_items) { + // set the jobs as timeout if it is not finished until now + if (!jobs_item->state.is_state_finished()) { + jobs_item->set_state_timeout(); + if (jobs_item->is_state_timeout()) { + timeout_items.push_back(jobs_item); + } + } + } + + return timeout_items; + } + private: // some prevention jobs_queue(const jobs_queue &) = delete; @@ -336,10 +359,16 @@ namespace small::jobsimpl { ++m_jobs_seq_id; - JobsID id = m_jobs_seq_id; - jobs_item->id = id; + JobsID id = m_jobs_seq_id; + jobs_item->m_id = id; m_jobs.emplace(id, jobs_item); + // add it to the timeout queue + auto it_timeout = m_types_timeouts.find(jobs_item->m_type); + if (it_timeout != m_types_timeouts.end()) { + m_timeout_queue.queue().push_delay_for(it_timeout->second, id); + } + return id; } @@ -378,18 +407,34 @@ namespace small::jobsimpl { return count; } + // + // inner thread function for timeout items + // + using JobsQueueTimeout = small::time_queue_thread; + friend JobsQueueTimeout; + + inline std::size_t push_back(std::vector &&items) + { + auto jobs_finished = jobs_timeout(items); + m_parent_caller.jobs_finished(jobs_finished); + return items.size(); + } + private: // // members // - mutable small::base_lock m_lock; // global locker - std::atomic m_jobs_seq_id{}; // to get the next jobs id - std::unordered_map> m_jobs; // current jobs - std::unordered_map m_groups_queues; // map of queues by group - std::unordered_map m_types_queues; // optimize to have queues by type (which reference queues by group) + mutable small::base_lock m_lock; // global locker + std::atomic m_jobs_seq_id{}; // to get the next jobs id + std::unordered_map> m_jobs; // current jobs + std::unordered_map m_groups_queues; // map of queues by group + std::unordered_map m_types_queues; // optimize to have queues by type (which reference queues by group) + std::unordered_map m_types_timeouts; // timeouts for types JobQueueDelayedT m_delayed_items{*this}; // queue of delayed items + JobsQueueTimeout m_timeout_queue{*this}; // for timeout elements + ParentCallerT &m_parent_caller; // jobs engine }; } // namespace small::jobsimpl From 4c438dc6c790f814c9120d7ae99500c12602f9e6 Mon Sep 17 00:00:00 2001 From: Cristian Herghelegiu Date: Tue, 7 Jan 2025 21:13:52 +0200 Subject: [PATCH 05/12] Add multiple features for jobs engine like parent child dependencies, throttling with sleep between requests, timeout for processing --- examples/examples_jobs_engine.h | 91 ++++++++++++++--------- include/jobs_engine.h | 128 +++++++++++++++++++++++++------- 2 files changed, 157 insertions(+), 62 deletions(-) diff --git a/examples/examples_jobs_engine.h b/examples/examples_jobs_engine.h index 5620a21..334d513 100644 --- a/examples/examples_jobs_engine.h +++ b/examples/examples_jobs_engine.h @@ -51,66 +51,84 @@ namespace examples::jobs_engine { using Request = std::pair; using JobsEng = small::jobs_engine; - auto jobs_processing_function = [](const std::vector> &items) { + auto jobs_function_processing = [](const std::vector> &items, JobsEng::JobsConfig::ConfigProcessing & /* config */) { // this functions is defined without the engine params (it is here just for the example) - std::cout << "this function is defined without the engine params, called for " << (int)items[0]->type << "\n"; + std::cout << "this function is defined without the engine params, called for " << (int)items[0]->m_type << "\n"; }; // // config object for priorities, groups, types, threads, timeouts, sleeps, etc // JobsEng::JobsConfig config{ - .m_engine = {.m_threads_count = 0 /*dont start any thread yet*/, // TODO add thread count for wait_for_children processing and finished, default 1/2 here use 1 due to coalesce - .m_config_prio = {.priorities = {{small::EnumPriorities::kHighest, 2}, - {small::EnumPriorities::kHigh, 2}, - {small::EnumPriorities::kNormal, 2}, - {small::EnumPriorities::kLow, 1}}}}, // overall config with default priorities - - .m_default_processing_function = jobs_processing_function, // default processing function, better use jobs.add_default_processing_function to set it - - .m_groups = {{JobsGroupType::kJobsGroup12, {.m_threads_count = 1}}, // config by jobs group // TODO add sleep_between_requests - {JobsGroupType::kJobsGroup3, {.m_threads_count = 1}}, - {JobsGroupType::kJobsGroupDatabase, {.m_threads_count = 1}}, - {JobsGroupType::kJobsGroupCache, {.m_threads_count = 1}}}, - - .m_types = {{JobsType::kJobsType1, {.m_group = JobsGroupType::kJobsGroup12}}, // - {JobsType::kJobsType2, {.m_group = JobsGroupType::kJobsGroup12}}, // - {JobsType::kJobsType3, {.m_group = JobsGroupType::kJobsGroup3}}, // TODO add timeout for job - {JobsType::kJobsDatabase, {.m_group = JobsGroupType::kJobsGroupDatabase}}, // TODO add timeout for job - {JobsType::kJobsCache, {.m_group = JobsGroupType::kJobsGroupCache}}}}; // TODO add timeout for job + .m_engine = {.m_threads_count = 0, // dont start any thread yet + .m_threads_count_finished = 1, // override how many threads to use for internal processing of finished states + .m_config_prio = {.priorities = {{small::EnumPriorities::kHighest, 2}, + {small::EnumPriorities::kHigh, 2}, + {small::EnumPriorities::kNormal, 2}, + {small::EnumPriorities::kLow, 1}}}}, // overall config with default priorities + + .m_default_function_processing = jobs_function_processing, // default processing function, better use jobs.add_default_function_processing to set it + + .m_groups = {{JobsGroupType::kJobsGroup12, {.m_threads_count = 1}}, // config by jobs group + {JobsGroupType::kJobsGroup3, {.m_threads_count = 1, .m_delay_next_request = std::chrono::milliseconds(30)}}, + {JobsGroupType::kJobsGroupDatabase, {.m_threads_count = 1}}, // these requests will coalesce results for demo purposes + {JobsGroupType::kJobsGroupCache, {.m_threads_count = 0}}}, // no threads !!, these requests are executed outside of jobs engine for demo purposes + + .m_types = {{JobsType::kJobsType1, {.m_group = JobsGroupType::kJobsGroup12}}, + {JobsType::kJobsType2, {.m_group = JobsGroupType::kJobsGroup12}}, + {JobsType::kJobsType3, {.m_group = JobsGroupType::kJobsGroup3, .m_timeout = std::chrono::milliseconds(500)}}, + {JobsType::kJobsDatabase, {.m_group = JobsGroupType::kJobsGroupDatabase}}, + {JobsType::kJobsCache, {.m_group = JobsGroupType::kJobsGroupCache}}}}; // // create jobs engine with the above config // JobsEng jobs(config); - // TODO add config as param so the sleep after can be overridden - jobs.add_default_processing_function([](auto &j /*this jobs engine*/, const auto &jobs_items) { + // create a cache server (with workers to simulate access to it) + // (as an external engine outside the jobs engine for demo purposes) + small::worker_thread cache_server({.threads_count = 1}, [](auto &w /*this*/, const auto &items) { + // process item using the workers lock (not recommended) + + for (auto &i : items) { + std::cout << "thread " << std::this_thread::get_id() + << " processing cache {" << i << "}" << "\n"; + + // TODO mark the jobs id associated as succeeded (for demo purposes to avoid creating other structures) + } + // sleep long enough + small::sleep(500); + }); + + // default processing used for job type 3 with custom delay in between requests + // one request will succeed and one request will timeout for demo purposes + jobs.add_default_function_processing([](auto &j /*this jobs engine*/, const auto &jobs_items, auto &jobs_config) { for (auto &item : jobs_items) { std::cout << "thread " << std::this_thread::get_id() << " DEFAULT processing " << "{" - << " type=" << (int)item->type - << " req.int=" << item->request.first << "," - << " req.str=\"" << item->request.second << "\"" + << " type=" << (int)item->m_type + << " req.int=" << item->m_request.first << "," + << " req.str=\"" << item->m_request.second << "\"" << "}" << " ref count " << item.use_count() << " time " << small::toISOString(small::timeNow()) << "\n"; } - small::sleep(30); + + // set a custom delay (timeout for job3 is 500 ms) + jobs_config.m_delay_next_request = std::chrono::milliseconds(1000); }); - // TODO add config as param so the sleep after can be overridden // add specific function for job1 (calling the function from jobs intead of config allows to pass the engine and extra param) - jobs.add_job_processing_function(JobsType::kJobsType1, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto b /*extra param b*/) { + jobs.add_job_function_processing(JobsType::kJobsType1, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto & /* config */, auto b /*extra param b*/) { for (auto &item : jobs_items) { std::cout << "thread " << std::this_thread::get_id() << " JOB1 processing " << "{" - << " type=" << (int)item->type - << " req.int=" << item->request.first << "," - << " req.str=\"" << item->request.second << "\"" + << " type=" << (int)item->m_type + << " req.int=" << item->m_request.first << "," + << " req.str=\"" << item->m_request.second << "\"" << "}" << " ref count " << item.use_count() << " time " << small::toISOString(small::timeNow()) @@ -123,18 +141,18 @@ namespace examples::jobs_engine { small::sleep(30); }, 5 /*param b*/); // TODO daca as vrea sa folosesc un alt job_server cum modelez asa incat jobul dintr-o parte sa ramana intr-o stare ca si cand ar avea copii si - // TODO sa se face un request in alta parte si ala cand se termina pe finish (sau daca e worker thread in functia de procesare) sa faca set state + // TODO sa se faca un request in alta parte si ala cand se termina pe finish (sau daca e worker thread in functia de procesare) sa faca set state // TODO set state merge daca e doar o dependinta, daca sunt mai multe atunci ar tb o functie custom - childProcessing (desi are sau nu are children - sau cum fac un dummy children - poate cu thread_count 0?) // add specific function for job2 - jobs.add_job_processing_function(JobsType::kJobsType2, [](auto &j /*this jobs engine*/, const auto &jobs_items) { + jobs.add_job_function_processing(JobsType::kJobsType2, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto & /* config */) { for (auto &item : jobs_items) { std::cout << "thread " << std::this_thread::get_id() << " JOB2 processing " << "{" - << " type=" << (int)item->type - << " req.int=" << item->request.first << "," - << " req.str=\"" << item->request.second << "\"" + << " type=" << (int)item->m_type + << " req.int=" << item->m_request.first << "," + << " req.str=\"" << item->m_request.second << "\"" << "}" << " ref count " << item.use_count() << " time " << small::toISOString(small::timeNow()) @@ -158,6 +176,7 @@ namespace examples::jobs_engine { // show coalesce for children database requests std::unordered_map web_requests; + // TODO type3 one request will succeed and one request will timeout for demo purposes // push jobs.queue().push_back(small::EnumPriorities::kNormal, JobsType::kJobsType1, {1, "normal"}, &jobs_id); jobs.queue().push_back(small::EnumPriorities::kHigh, JobsType::kJobsType2, {2, "high"}, &jobs_id); diff --git a/include/jobs_engine.h b/include/jobs_engine.h index 93bd940..53581a6 100644 --- a/include/jobs_engine.h +++ b/include/jobs_engine.h @@ -33,14 +33,14 @@ // // create jobs engine // JobsEng jobs(config); // -// jobs.add_default_processing_function([](auto &j /*this jobs engine*/, const auto &jobs_items) { +// jobs.add_default_function_processing([](auto &j /*this jobs engine*/, const auto &jobs_items) { // for (auto &item : jobs_items) { // ... // } // }); // // // add specific function for job1 -// jobs.add_job_processing_function(JobsType::kJobsType1, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto b /*extra param b*/) { +// jobs.add_job_function_processing(JobsType::kJobsType1, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto b /*extra param b*/) { // for (auto &item : jobs_items) { // ... // } @@ -69,14 +69,16 @@ namespace small { class jobs_engine { public: - using ThisJobsEngine = small::jobs_engine; - using JobsConfig = small::jobs_config; - using JobsItem = small::jobsimpl::jobs_item; - using JobsQueue = small::jobsimpl::jobs_queue; - using JobsID = typename JobsItem::JobsID; - using TimeClock = typename JobsQueue::TimeClock; - using TimeDuration = typename JobsQueue::TimeDuration; - using ProcessingFunction = typename JobsConfig::ProcessingFunction; + using ThisJobsEngine = typename small::jobs_engine; + using JobsConfig = typename small::jobs_config; + using JobsItem = typename small::jobsimpl::jobs_item; + using JobsQueue = typename small::jobsimpl::jobs_queue; + using JobsID = typename JobsItem::JobsID; + using TimeClock = typename JobsQueue::TimeClock; + using TimeDuration = typename JobsQueue::TimeDuration; + using FunctionProcessing = typename JobsConfig::FunctionProcessing; + using FunctionOnChildrenFinished = typename JobsConfig::FunctionOnChildrenFinished; + using FunctionFinished = typename JobsConfig::FunctionFinished; public: // @@ -160,17 +162,42 @@ namespace small { apply_config(); } - // processing function + // override default jobs function template - inline void add_default_processing_function(_Callable processing_function, Args... extra_parameters) + inline void add_default_function_processing(_Callable function_processing, Args... extra_parameters) { - m_config.add_default_processing_function(std::bind(std::forward<_Callable>(processing_function), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::forward(extra_parameters)...)); + m_config.add_default_function_processing(std::bind(std::forward<_Callable>(function_processing), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::placeholders::_2 /*config*/, std::forward(extra_parameters)...)); } template - inline void add_job_processing_function(const JobsTypeT &jobs_type, _Callable processing_function, Args... extra_parameters) + inline void add_default_function_on_children_finished(_Callable function_on_children_finished, Args... extra_parameters) { - m_config.add_job_processing_function(jobs_type, std::bind(std::forward<_Callable>(processing_function), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::forward(extra_parameters)...)); + m_config.add_default_function_on_children_finished(std::bind(std::forward<_Callable>(function_on_children_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::placeholders::_2 /*config*/, std::forward(extra_parameters)...)); + } + + template + inline void add_default_function_finished(_Callable function_finished, Args... extra_parameters) + { + m_config.add_default_function_finished(std::bind(std::forward<_Callable>(function_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::placeholders::_2 /*config*/, std::forward(extra_parameters)...)); + } + + // specific jobs functions + template + inline void add_job_function_processing(const JobsTypeT &jobs_type, _Callable function_processing, Args... extra_parameters) + { + m_config.add_job_function_processing(jobs_type, std::bind(std::forward<_Callable>(function_processing), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::placeholders::_2 /*config*/, std::forward(extra_parameters)...)); + } + + template + inline void add_job_function_on_children_finished(const JobsTypeT &jobs_type, _Callable function_on_children_finished, Args... extra_parameters) + { + m_config.add_job_function_on_children_finished(jobs_type, std::bind(std::forward<_Callable>(function_on_children_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::forward(extra_parameters)...)); + } + + template + inline void add_job_function_finished(const JobsTypeT &jobs_type, _Callable function_finished, Args... extra_parameters) + { + m_config.add_job_function_finished(jobs_type, std::bind(std::forward<_Callable>(function_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::forward(extra_parameters)...)); } // @@ -251,11 +278,19 @@ namespace small { } // setup jobs types - m_config.apply_default_processing_function(); - // TODO apply default WaitChildrenFunction - pass the function as parameter using std::bind(this) - // TODO apply default FinishedFunction - pass the function as parameter using std::bind(this) + if (!m_config.m_default_function_on_children_finished) { + m_config.m_default_function_on_children_finished = std::bind(&jobs_engine::jobs_on_children_finished, this, std::placeholders::_1 /*jobs_items*/); + } + if (!m_config.m_default_function_finished) { + m_config.m_default_function_finished = std::bind(&jobs_engine::jobs_finished, this, std::placeholders::_1 /*jobs_items*/); + } + + m_config.apply_default_function_processing(); + m_config.apply_default_function_on_children_finished(); + m_config.apply_default_function_finished(); + for (auto &[jobs_type, jobs_type_config] : m_config.m_types) { - m_queue.add_jobs_type(jobs_type, jobs_type_config.m_group); + m_queue.add_jobs_type(jobs_type, jobs_type_config.m_group, jobs_type_config.m_timeout); } // auto start threads if count > 0 otherwise threads should be manually started @@ -281,6 +316,10 @@ namespace small { int bulk_count = std::max(it_cfg_grp->second.m_bulk_count, 1); + // delay request + typename JobsConfig::ConfigProcessing group_config{}; + group_config.m_delay_next_request = it_cfg_grp->second.m_delay_next_request; + // get items to process auto *q = m_queue.get_group_queue(jobs_group); if (!q) { @@ -301,9 +340,14 @@ namespace small { // get jobs std::vector> jobs_items = m_queue.jobs_get(vec_ids); for (auto &jobs_item : jobs_items) { - elems_by_type[jobs_item->type].reserve(jobs_items.size()); - elems_by_type[jobs_item->type].push_back(jobs_item); - // TODO mark the items as in progress + elems_by_type[jobs_item->m_type].reserve(jobs_items.size()); + + // mark the item as in progress + jobs_item->set_state_inprogress(); + // execute if it is still in progress (may be moved to higher states due to external factors like cancel, timeout, finish early due to other job, etc) + if (jobs_item->is_state_inprogress()) { + elems_by_type[jobs_item->m_type].push_back(jobs_item); + } } } @@ -314,11 +358,26 @@ namespace small { continue; } - // TODO pass the config parameter + // TODO if an item has timeout add it to a time queue with callback that marks that item as timeout + // TODO timeout should be set only if it is not finished/failed/cancelled + // process specific jobs by type - it_cfg_type->second.m_processing_function(jobs); + typename JobsConfig::ConfigProcessing type_config; + it_cfg_type->second.m_function_processing(jobs, type_config); + + // get the max for config + if (!group_config.m_delay_next_request) { + group_config.m_delay_next_request = type_config.m_delay_next_request; + } else { + if (type_config.m_delay_next_request) { + group_config.m_delay_next_request = std::max(group_config.m_delay_next_request, type_config.m_delay_next_request); + } + } // TODO marks the items as either wait for children (if it has children) or finished + // mark the item as in wait for children of finished + // if in callback the state is set to failed, cancelled or timeout setting to finish wont succeed because if less value than those + // jobs_item->set_state(small::EnumJobsState::kInProgress); // TODO put in proper thread for processing children and finished work (1/2 thread(s) for each - better to have a config for it?) // TODO the worker thread is configured for jobgroup, children and finished are not part of those - a solution is to add a pair or internal_group } @@ -328,8 +387,9 @@ namespace small { m_queue.jobs_del(jobs_id); } - // TODO for sleep after requests use worker_thread delay item -> check if has_items should be set properly - // TODO if sleep is set set has_items to true to force the sleep, but also a last time sleep so if there too much time and are no items dont continue + // TODO group_config.m_delay_next_request + // TODO for delay after requests use worker_thread delay item -> check if has_items should be set properly + // TODO if delay is set set has_items to true to force the sleep, but also a last time sleep so if there too much time and are no items dont continue return ret; } @@ -346,6 +406,22 @@ namespace small { m_thread_pool.job_start(m_config.m_types[jobs_type].m_group); } + inline void jobs_finished(const std::vector> &jobs_items) + { + // TODO call the custom function from config if exists + + for (auto &jobs_item : jobs_items) { + m_queue.jobs_del(jobs_item->id); + } + } + + inline void jobs_on_children_finished(const std::vector> &jobs_children) + { + // TODO update parent state and progress + // for (auto &jobs_child : jobs_children) { + // } + } + private: // // members From 86a8600c1059ed953a6fc91f29709ad9e3b5c9ce Mon Sep 17 00:00:00 2001 From: Cristian Herghelegiu Date: Tue, 7 Jan 2025 21:20:15 +0200 Subject: [PATCH 06/12] Add multiple features for jobs engine like parent child dependencies, throttling with sleep between requests, timeout for processing --- examples/examples_jobs_engine.h | 11 +++++------ include/jobs_config.h | 3 +-- include/jobs_engine.h | 10 +++------- 3 files changed, 9 insertions(+), 15 deletions(-) diff --git a/examples/examples_jobs_engine.h b/examples/examples_jobs_engine.h index 334d513..10a7e19 100644 --- a/examples/examples_jobs_engine.h +++ b/examples/examples_jobs_engine.h @@ -60,12 +60,11 @@ namespace examples::jobs_engine { // config object for priorities, groups, types, threads, timeouts, sleeps, etc // JobsEng::JobsConfig config{ - .m_engine = {.m_threads_count = 0, // dont start any thread yet - .m_threads_count_finished = 1, // override how many threads to use for internal processing of finished states - .m_config_prio = {.priorities = {{small::EnumPriorities::kHighest, 2}, - {small::EnumPriorities::kHigh, 2}, - {small::EnumPriorities::kNormal, 2}, - {small::EnumPriorities::kLow, 1}}}}, // overall config with default priorities + .m_engine = {.m_threads_count = 0, // dont start any thread yet + .m_config_prio = {.priorities = {{small::EnumPriorities::kHighest, 2}, + {small::EnumPriorities::kHigh, 2}, + {small::EnumPriorities::kNormal, 2}, + {small::EnumPriorities::kLow, 1}}}}, // overall config with default priorities .m_default_function_processing = jobs_function_processing, // default processing function, better use jobs.add_default_function_processing to set it diff --git a/include/jobs_config.h b/include/jobs_config.h index a0f1770..ebef9bd 100644 --- a/include/jobs_config.h +++ b/include/jobs_config.h @@ -22,8 +22,7 @@ namespace small { // config for the entire jobs engine struct ConfigJobsEngine { - int m_threads_count{8}; // how many total threads for processing - int m_threads_count_finished{2}; // how many threads (out of total m_threads_count) to use for processing finished states + int m_threads_count{8}; // how many total threads for processing small::config_prio_queue m_config_prio{}; }; diff --git a/include/jobs_engine.h b/include/jobs_engine.h index 53581a6..a9f6917 100644 --- a/include/jobs_engine.h +++ b/include/jobs_engine.h @@ -281,9 +281,6 @@ namespace small { if (!m_config.m_default_function_on_children_finished) { m_config.m_default_function_on_children_finished = std::bind(&jobs_engine::jobs_on_children_finished, this, std::placeholders::_1 /*jobs_items*/); } - if (!m_config.m_default_function_finished) { - m_config.m_default_function_finished = std::bind(&jobs_engine::jobs_finished, this, std::placeholders::_1 /*jobs_items*/); - } m_config.apply_default_function_processing(); m_config.apply_default_function_on_children_finished(); @@ -358,9 +355,6 @@ namespace small { continue; } - // TODO if an item has timeout add it to a time queue with callback that marks that item as timeout - // TODO timeout should be set only if it is not finished/failed/cancelled - // process specific jobs by type typename JobsConfig::ConfigProcessing type_config; it_cfg_type->second.m_function_processing(jobs, type_config); @@ -395,6 +389,7 @@ namespace small { } // TODO external set state for a job moves it to proper wait for children or finished + // TODO add functions jobs_cancel, jobs_finish(response), jobs_failed(response) // // inner function for activate the jobs from queue @@ -409,9 +404,10 @@ namespace small { inline void jobs_finished(const std::vector> &jobs_items) { // TODO call the custom function from config if exists + // (this may be called from multiple places - queue timeout, do_action finished, above set state cancel, finish, ) for (auto &jobs_item : jobs_items) { - m_queue.jobs_del(jobs_item->id); + m_queue.jobs_del(jobs_item->m_id); } } From 2c671e612aec55c2786fcb6a6783cb64d5d170c0 Mon Sep 17 00:00:00 2001 From: Cristian Herghelegiu Date: Fri, 10 Jan 2025 21:55:54 +0200 Subject: [PATCH 07/12] Add multiple features for jobs engine like parent child dependencies, throttling with sleep between requests, timeout for processing --- README.md | 4 +- examples/examples_jobs_engine.h | 8 +-- include/impl/jobs_engine_thread_pool_impl.h | 2 +- include/impl/jobs_queue_impl.h | 77 ++++++++++----------- include/jobs_config.h | 12 ++-- include/jobs_engine.h | 70 ++++++++++++++----- 6 files changed, 100 insertions(+), 73 deletions(-) diff --git a/README.md b/README.md index 622f49b..0eeadfe 100644 --- a/README.md +++ b/README.md @@ -427,7 +427,7 @@ JobsEng::JobsConfig config{ // create jobs engine JobsEng jobs(config); ... -jobs.add_default_function_processing([](auto &j /*this jobs engine*/, const auto &jobs_items) { +jobs.config_default_function_processing([](auto &j /*this jobs engine*/, const auto &jobs_items) { for (auto &item : jobs_items) { ... } @@ -435,7 +435,7 @@ jobs.add_default_function_processing([](auto &j /*this jobs engine*/, const auto }); ... // add specific function for job1 (calling the function from jobs intead of config allows to pass the engine and extra param) -jobs.add_job_function_processing(JobsType::kJobsType1, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto b /*extra param b*/) { +jobs.config_jobs_function_processing(JobsType::kJobsType1, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto b /*extra param b*/) { for (auto &item : jobs_items) { ... } diff --git a/examples/examples_jobs_engine.h b/examples/examples_jobs_engine.h index 10a7e19..f0f0a05 100644 --- a/examples/examples_jobs_engine.h +++ b/examples/examples_jobs_engine.h @@ -66,7 +66,7 @@ namespace examples::jobs_engine { {small::EnumPriorities::kNormal, 2}, {small::EnumPriorities::kLow, 1}}}}, // overall config with default priorities - .m_default_function_processing = jobs_function_processing, // default processing function, better use jobs.add_default_function_processing to set it + .m_default_function_processing = jobs_function_processing, // default processing function, better use jobs.config_default_function_processing to set it .m_groups = {{JobsGroupType::kJobsGroup12, {.m_threads_count = 1}}, // config by jobs group {JobsGroupType::kJobsGroup3, {.m_threads_count = 1, .m_delay_next_request = std::chrono::milliseconds(30)}}, @@ -101,7 +101,7 @@ namespace examples::jobs_engine { // default processing used for job type 3 with custom delay in between requests // one request will succeed and one request will timeout for demo purposes - jobs.add_default_function_processing([](auto &j /*this jobs engine*/, const auto &jobs_items, auto &jobs_config) { + jobs.config_default_function_processing([](auto &j /*this jobs engine*/, const auto &jobs_items, auto &jobs_config) { for (auto &item : jobs_items) { std::cout << "thread " << std::this_thread::get_id() << " DEFAULT processing " @@ -120,7 +120,7 @@ namespace examples::jobs_engine { }); // add specific function for job1 (calling the function from jobs intead of config allows to pass the engine and extra param) - jobs.add_job_function_processing(JobsType::kJobsType1, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto & /* config */, auto b /*extra param b*/) { + jobs.config_jobs_function_processing(JobsType::kJobsType1, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto & /* config */, auto b /*extra param b*/) { for (auto &item : jobs_items) { std::cout << "thread " << std::this_thread::get_id() << " JOB1 processing " @@ -144,7 +144,7 @@ namespace examples::jobs_engine { // TODO set state merge daca e doar o dependinta, daca sunt mai multe atunci ar tb o functie custom - childProcessing (desi are sau nu are children - sau cum fac un dummy children - poate cu thread_count 0?) // add specific function for job2 - jobs.add_job_function_processing(JobsType::kJobsType2, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto & /* config */) { + jobs.config_jobs_function_processing(JobsType::kJobsType2, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto & /* config */) { for (auto &item : jobs_items) { std::cout << "thread " << std::this_thread::get_id() << " JOB2 processing " diff --git a/include/impl/jobs_engine_thread_pool_impl.h b/include/impl/jobs_engine_thread_pool_impl.h index 28d941e..bcb9743 100644 --- a/include/impl/jobs_engine_thread_pool_impl.h +++ b/include/impl/jobs_engine_thread_pool_impl.h @@ -54,7 +54,7 @@ namespace small::jobsimpl { // config processing by job group type // this should be done in the initial setup phase once // - inline void add_job_group(const JobGroupT &job_group, const int &threads_count) + inline void config_jobs_group(const JobGroupT &job_group, const int &threads_count) { m_scheduler[job_group].m_threads_count = threads_count; } diff --git a/include/impl/jobs_queue_impl.h b/include/impl/jobs_queue_impl.h index 074ef56..8107bec 100644 --- a/include/impl/jobs_queue_impl.h +++ b/include/impl/jobs_queue_impl.h @@ -83,7 +83,7 @@ namespace small::jobsimpl { // config groups // m_groups_queues will be initialized in the initial setup phase and will be accessed without locking afterwards // - inline void add_jobs_group(const JobsGroupT &job_group, const small::config_prio_queue &config_prio) + inline void config_jobs_group(const JobsGroupT &job_group, const small::config_prio_queue &config_prio) { m_groups_queues[job_group] = JobsQueue{config_prio}; } @@ -92,7 +92,7 @@ namespace small::jobsimpl { // config job types // m_types_queues will be initialized in the initial setup phase and will be accessed without locking afterwards // - inline bool add_jobs_type(const JobsTypeT &jobs_type, const JobsGroupT &jobs_group, const std::optional &jobs_timeout) + inline bool config_jobs_type(const JobsTypeT &jobs_type, const JobsGroupT &jobs_group, const std::optional &jobs_timeout) { auto it_g = m_groups_queues.find(jobs_group); if (it_g == m_groups_queues.end()) { @@ -161,6 +161,8 @@ namespace small::jobsimpl { return push_back(priority, std::make_shared(jobs_type, std::forward(jobs_req)), jobs_id); } + // TODO add push_back_child() + // no emplace_back do to returning the jobs_id // @@ -208,6 +210,8 @@ namespace small::jobsimpl { return push_back_delay_until(__atime, priority, std::make_shared(jobs_type, std::forward(jobs_req)), jobs_id); } + // TODO add push_back_child_....() + // clang-format off // // signal exit @@ -291,6 +295,7 @@ namespace small::jobsimpl { // // get group queue + // called from parent jobs engine // inline JobsQueue *get_group_queue(const JobsGroupT &jobs_group) { @@ -298,6 +303,10 @@ namespace small::jobsimpl { return it != m_groups_queues.end() ? &it->second : nullptr; } + // + // get job items + // called from parent jobs engine + // inline std::vector> jobs_get(const std::vector &jobs_ids) { std::vector> jobs_items; @@ -316,42 +325,8 @@ namespace small::jobsimpl { return jobs_items; // will be moved } - inline void jobs_del(const JobsID &jobs_id) - { - std::unique_lock l(m_lock); - m_jobs.erase(jobs_id); - } - - // set the jobs as timeout if it is not finished until now - inline std::vector> jobs_timeout(std::vector &&jobs_ids) - { - std::vector> jobs_items = jobs_get(jobs_ids); - std::vector> timeout_items; - timeout_items.reserve(jobs_items.size()); - - for (auto &jobs_item : jobs_items) { - // set the jobs as timeout if it is not finished until now - if (!jobs_item->state.is_state_finished()) { - jobs_item->set_state_timeout(); - if (jobs_item->is_state_timeout()) { - timeout_items.push_back(jobs_item); - } - } - } - - return timeout_items; - } - - private: - // some prevention - jobs_queue(const jobs_queue &) = delete; - jobs_queue(jobs_queue &&) = delete; - jobs_queue &operator=(const jobs_queue &) = delete; - jobs_queue &operator=(jobs_queue &&__t) = delete; - - private: // - // add job items + // add jobs item // inline JobsID jobs_add(std::shared_ptr jobs_item) { @@ -372,7 +347,9 @@ namespace small::jobsimpl { return id; } + // // activate the jobs + // inline std::size_t jobs_activate(const JobsPrioT &priority, const JobsTypeT &jobs_type, const JobsID &jobs_id) { std::size_t ret = 0; @@ -393,8 +370,26 @@ namespace small::jobsimpl { return ret; } + // + // delete jobs item + // + inline void jobs_del(const JobsID &jobs_id) + { + std::unique_lock l(m_lock); + m_jobs.erase(jobs_id); + } + + private: + // some prevention + jobs_queue(const jobs_queue &) = delete; + jobs_queue(jobs_queue &&) = delete; + jobs_queue &operator=(const jobs_queue &) = delete; + jobs_queue &operator=(jobs_queue &&__t) = delete; + + private: // // inner thread function for delayed items + // called from m_delayed_items // friend JobQueueDelayedT; @@ -409,15 +404,15 @@ namespace small::jobsimpl { // // inner thread function for timeout items + // called from m_timeout_queue // using JobsQueueTimeout = small::time_queue_thread; friend JobsQueueTimeout; - inline std::size_t push_back(std::vector &&items) + inline std::size_t push_back(std::vector &&jobs_ids) { - auto jobs_finished = jobs_timeout(items); - m_parent_caller.jobs_finished(jobs_finished); - return items.size(); + m_parent_caller.jobs_timeout(jobs_ids); + return jobs_ids.size(); } private: diff --git a/include/jobs_config.h b/include/jobs_config.h index ebef9bd..45cfb64 100644 --- a/include/jobs_config.h +++ b/include/jobs_config.h @@ -68,19 +68,19 @@ namespace small { // // add default processing function // - inline void add_default_function_processing(FunctionProcessing function_processing) + inline void config_default_function_processing(FunctionProcessing function_processing) { m_default_function_processing = function_processing; apply_default_function_processing(); } - inline void add_default_function_on_children_finished(FunctionOnChildrenFinished function_on_children_finished) + inline void config_default_function_on_children_finished(FunctionOnChildrenFinished function_on_children_finished) { m_default_function_on_children_finished = function_on_children_finished; apply_default_function_on_children_finished(); } - inline void add_default_function_finished(FunctionFinished function_finished) + inline void config_default_function_finished(FunctionFinished function_finished) { m_default_function_finished = function_finished; apply_default_function_finished(); @@ -89,7 +89,7 @@ namespace small { // // add job functions // - inline void add_job_function_processing(const JobsTypeT &jobs_type, FunctionProcessing function_processing) + inline void config_jobs_function_processing(const JobsTypeT &jobs_type, FunctionProcessing function_processing) { auto it_f = m_types.find(jobs_type); if (it_f == m_types.end()) { @@ -99,7 +99,7 @@ namespace small { it_f->second.m_function_processing = function_processing; } - inline void add_job_function_on_children_finished(const JobsTypeT &jobs_type, FunctionOnChildrenFinished function_on_children_finished) + inline void config_jobs_function_on_children_finished(const JobsTypeT &jobs_type, FunctionOnChildrenFinished function_on_children_finished) { auto it_f = m_types.find(jobs_type); if (it_f == m_types.end()) { @@ -109,7 +109,7 @@ namespace small { it_f->second.m_function_on_children_finished = function_on_children_finished; } - inline void add_job_function_finished(const JobsTypeT &jobs_type, FunctionFinished function_finished) + inline void config_jobs_function_finished(const JobsTypeT &jobs_type, FunctionFinished function_finished) { auto it_f = m_types.find(jobs_type); if (it_f == m_types.end()) { diff --git a/include/jobs_engine.h b/include/jobs_engine.h index a9f6917..a1a1ddc 100644 --- a/include/jobs_engine.h +++ b/include/jobs_engine.h @@ -33,14 +33,14 @@ // // create jobs engine // JobsEng jobs(config); // -// jobs.add_default_function_processing([](auto &j /*this jobs engine*/, const auto &jobs_items) { +// jobs.config_default_function_processing([](auto &j /*this jobs engine*/, const auto &jobs_items) { // for (auto &item : jobs_items) { // ... // } // }); // // // add specific function for job1 -// jobs.add_job_function_processing(JobsType::kJobsType1, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto b /*extra param b*/) { +// jobs.config_jobs_function_processing(JobsType::kJobsType1, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto b /*extra param b*/) { // for (auto &item : jobs_items) { // ... // } @@ -164,40 +164,40 @@ namespace small { // override default jobs function template - inline void add_default_function_processing(_Callable function_processing, Args... extra_parameters) + inline void config_default_function_processing(_Callable function_processing, Args... extra_parameters) { - m_config.add_default_function_processing(std::bind(std::forward<_Callable>(function_processing), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::placeholders::_2 /*config*/, std::forward(extra_parameters)...)); + m_config.config_default_function_processing(std::bind(std::forward<_Callable>(function_processing), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::placeholders::_2 /*config*/, std::forward(extra_parameters)...)); } template - inline void add_default_function_on_children_finished(_Callable function_on_children_finished, Args... extra_parameters) + inline void config_default_function_on_children_finished(_Callable function_on_children_finished, Args... extra_parameters) { - m_config.add_default_function_on_children_finished(std::bind(std::forward<_Callable>(function_on_children_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::placeholders::_2 /*config*/, std::forward(extra_parameters)...)); + m_config.config_default_function_on_children_finished(std::bind(std::forward<_Callable>(function_on_children_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::placeholders::_2 /*config*/, std::forward(extra_parameters)...)); } template - inline void add_default_function_finished(_Callable function_finished, Args... extra_parameters) + inline void config_default_function_finished(_Callable function_finished, Args... extra_parameters) { - m_config.add_default_function_finished(std::bind(std::forward<_Callable>(function_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::placeholders::_2 /*config*/, std::forward(extra_parameters)...)); + m_config.config_default_function_finished(std::bind(std::forward<_Callable>(function_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::placeholders::_2 /*config*/, std::forward(extra_parameters)...)); } // specific jobs functions template - inline void add_job_function_processing(const JobsTypeT &jobs_type, _Callable function_processing, Args... extra_parameters) + inline void config_jobs_function_processing(const JobsTypeT &jobs_type, _Callable function_processing, Args... extra_parameters) { - m_config.add_job_function_processing(jobs_type, std::bind(std::forward<_Callable>(function_processing), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::placeholders::_2 /*config*/, std::forward(extra_parameters)...)); + m_config.config_jobs_function_processing(jobs_type, std::bind(std::forward<_Callable>(function_processing), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::placeholders::_2 /*config*/, std::forward(extra_parameters)...)); } template - inline void add_job_function_on_children_finished(const JobsTypeT &jobs_type, _Callable function_on_children_finished, Args... extra_parameters) + inline void config_jobs_function_on_children_finished(const JobsTypeT &jobs_type, _Callable function_on_children_finished, Args... extra_parameters) { - m_config.add_job_function_on_children_finished(jobs_type, std::bind(std::forward<_Callable>(function_on_children_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::forward(extra_parameters)...)); + m_config.config_jobs_function_on_children_finished(jobs_type, std::bind(std::forward<_Callable>(function_on_children_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::forward(extra_parameters)...)); } template - inline void add_job_function_finished(const JobsTypeT &jobs_type, _Callable function_finished, Args... extra_parameters) + inline void config_jobs_function_finished(const JobsTypeT &jobs_type, _Callable function_finished, Args... extra_parameters) { - m_config.add_job_function_finished(jobs_type, std::bind(std::forward<_Callable>(function_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::forward(extra_parameters)...)); + m_config.config_jobs_function_finished(jobs_type, std::bind(std::forward<_Callable>(function_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::forward(extra_parameters)...)); } // @@ -273,8 +273,8 @@ namespace small { { // setup jobs groups for (auto &[jobs_group, jobs_group_config] : m_config.m_groups) { - m_queue.add_jobs_group(jobs_group, m_config.m_engine.m_config_prio); - m_thread_pool.add_job_group(jobs_group, jobs_group_config.m_threads_count); + m_queue.config_jobs_group(jobs_group, m_config.m_engine.m_config_prio); + m_thread_pool.config_jobs_group(jobs_group, jobs_group_config.m_threads_count); } // setup jobs types @@ -287,7 +287,7 @@ namespace small { m_config.apply_default_function_finished(); for (auto &[jobs_type, jobs_type_config] : m_config.m_types) { - m_queue.add_jobs_type(jobs_type, jobs_type_config.m_group, jobs_type_config.m_timeout); + m_queue.config_jobs_type(jobs_type, jobs_type_config.m_group, jobs_type_config.m_timeout); } // auto start threads if count > 0 otherwise threads should be manually started @@ -372,8 +372,6 @@ namespace small { // mark the item as in wait for children of finished // if in callback the state is set to failed, cancelled or timeout setting to finish wont succeed because if less value than those // jobs_item->set_state(small::EnumJobsState::kInProgress); - // TODO put in proper thread for processing children and finished work (1/2 thread(s) for each - better to have a config for it?) - // TODO the worker thread is configured for jobgroup, children and finished are not part of those - a solution is to add a pair or internal_group } // TODO move this delete on the finished thread @@ -393,6 +391,7 @@ namespace small { // // inner function for activate the jobs from queue + // called from queue // friend JobsQueue; @@ -401,16 +400,49 @@ namespace small { m_thread_pool.job_start(m_config.m_types[jobs_type].m_group); } + // + // set the jobs as timeout if it is not finished until now + // called from queue + // + inline std::vector> jobs_timeout(const std::vector &jobs_ids) + { + std::vector> jobs_items = m_queue.jobs_get(jobs_ids); + std::vector> timeout_items; + timeout_items.reserve(jobs_items.size()); + + for (auto &jobs_item : jobs_items) { + // set the jobs as timeout if it is not finished until now + if (jobs_item->state.is_state_finished()) { + continue; + } + + jobs_item->set_state_timeout(); + if (jobs_item->is_state_timeout()) { + timeout_items.push_back(jobs_item); + } + } + jobs_finished(timeout_items); + } + + // + // finish a job + // inline void jobs_finished(const std::vector> &jobs_items) { // TODO call the custom function from config if exists // (this may be called from multiple places - queue timeout, do_action finished, above set state cancel, finish, ) + // TODO delete only if there are no parents (delete all the finished children now) for (auto &jobs_item : jobs_items) { m_queue.jobs_del(jobs_item->m_id); } + + // TODO if it has parents call jobs_on_children_finished } + // + // after child is finished + // inline void jobs_on_children_finished(const std::vector> &jobs_children) { // TODO update parent state and progress From d24a8bd7f18eb2aa810a70954ab0287a2c7641ea Mon Sep 17 00:00:00 2001 From: Cristian Herghelegiu Date: Fri, 10 Jan 2025 23:58:46 +0200 Subject: [PATCH 08/12] Add multiple features for jobs engine like parent child dependencies, throttling with sleep between requests, timeout for processing --- examples/examples_jobs_engine.h | 22 +- include/impl/jobs_engine_thread_pool_impl.h | 12 +- include/impl/jobs_item_impl.h | 6 +- include/impl/jobs_queue_impl.h | 334 +++++++++++++++++--- include/jobs_engine.h | 4 +- 5 files changed, 314 insertions(+), 64 deletions(-) diff --git a/examples/examples_jobs_engine.h b/examples/examples_jobs_engine.h index f0f0a05..f0078fe 100644 --- a/examples/examples_jobs_engine.h +++ b/examples/examples_jobs_engine.h @@ -177,26 +177,26 @@ namespace examples::jobs_engine { // TODO type3 one request will succeed and one request will timeout for demo purposes // push - jobs.queue().push_back(small::EnumPriorities::kNormal, JobsType::kJobsType1, {1, "normal"}, &jobs_id); - jobs.queue().push_back(small::EnumPriorities::kHigh, JobsType::kJobsType2, {2, "high"}, &jobs_id); + jobs.queue().push_back_and_start(small::EnumPriorities::kNormal, JobsType::kJobsType1, {1, "normal"}, &jobs_id); + jobs.queue().push_back_and_start(small::EnumPriorities::kHigh, JobsType::kJobsType2, {2, "high"}, &jobs_id); - jobs.queue().push_back(small::EnumPriorities::kNormal, JobsType::kJobsType1, std::make_pair(3, "normal"), &jobs_id); - jobs.queue().push_back(small::EnumPriorities::kHigh, JobsType::kJobsType1, {4, "high"}, &jobs_id); - jobs.queue().push_back(small::EnumPriorities::kLow, JobsType::kJobsType1, {5, "low"}, &jobs_id); + jobs.queue().push_back_and_start(small::EnumPriorities::kNormal, JobsType::kJobsType1, std::make_pair(3, "normal"), &jobs_id); + jobs.queue().push_back_and_start(small::EnumPriorities::kHigh, JobsType::kJobsType1, {4, "high"}, &jobs_id); + jobs.queue().push_back_and_start(small::EnumPriorities::kLow, JobsType::kJobsType1, {5, "low"}, &jobs_id); Request req = {6, "normal"}; - jobs.queue().push_back(small::EnumPriorities::kNormal, JobsType::kJobsType1, req, nullptr); + jobs.queue().push_back_and_start(small::EnumPriorities::kNormal, JobsType::kJobsType1, req, nullptr); std::vector> jobs_items = { std::make_shared(JobsType::kJobsType1, Request{7, "highest"}), std::make_shared(JobsType::kJobsType1, Request{8, "highest"}), }; - jobs.queue().push_back(small::EnumPriorities::kHighest, jobs_items, &jobs_ids); - jobs.queue().push_back(small::EnumPriorities::kHighest, {std::make_shared(JobsType::kJobsType1, Request{9, "highest"})}, &jobs_ids); + jobs.queue().push_back_and_start(small::EnumPriorities::kHighest, jobs_items, &jobs_ids); + jobs.queue().push_back_and_start(small::EnumPriorities::kHighest, {std::make_shared(JobsType::kJobsType1, Request{9, "highest"})}, &jobs_ids); - jobs.queue().push_back_delay_for(std::chrono::milliseconds(300), small::EnumPriorities::kNormal, JobsType::kJobsType1, {100, "delay normal"}, &jobs_id); - jobs.queue().push_back_delay_until(small::timeNow() + std::chrono::milliseconds(350), small::EnumPriorities::kNormal, JobsType::kJobsType1, {101, "delay normal"}, &jobs_id); - jobs.queue().push_back_delay_for(std::chrono::milliseconds(400), small::EnumPriorities::kNormal, JobsType::kJobsType1, {102, "delay normal"}, &jobs_id); + jobs.queue().push_back_and_start_delay_for(std::chrono::milliseconds(300), small::EnumPriorities::kNormal, JobsType::kJobsType1, {100, "delay normal"}, &jobs_id); + jobs.queue().push_back_and_start_delay_until(small::timeNow() + std::chrono::milliseconds(350), small::EnumPriorities::kNormal, JobsType::kJobsType1, {101, "delay normal"}, &jobs_id); + jobs.queue().push_back_and_start_delay_for(std::chrono::milliseconds(400), small::EnumPriorities::kNormal, JobsType::kJobsType1, {102, "delay normal"}, &jobs_id); jobs.start_threads(3); // manual start threads diff --git a/include/impl/jobs_engine_thread_pool_impl.h b/include/impl/jobs_engine_thread_pool_impl.h index bcb9743..60972d1 100644 --- a/include/impl/jobs_engine_thread_pool_impl.h +++ b/include/impl/jobs_engine_thread_pool_impl.h @@ -63,7 +63,7 @@ namespace small::jobsimpl { // when items are added to be processed in parent class the start scheduler should be called // to trigger action (if needed for the new job group) // - inline void job_start(const JobGroupT &job_group) + inline void jobs_start(const JobGroupT &job_group) { auto it = m_scheduler.find(job_group); // map is not changed, so can be access without locking if (it == m_scheduler.end()) { @@ -73,7 +73,7 @@ namespace small::jobsimpl { // even if here it is considered that there are items and something will be scheduled, // the actual check if work will still exists will be done in do_action of parent auto &stats = it->second; - job_action_start(job_group, true, stats); + jobs_action_start(job_group, true, stats); } // clang-format off @@ -129,7 +129,7 @@ namespace small::jobsimpl { // // to trigger action (if needed for the new job group) // - inline void job_action_start(const JobGroupT &job_group, const bool has_items, JobGroupStats &stats) + inline void jobs_action_start(const JobGroupT &job_group, const bool has_items, JobGroupStats &stats) { if (!has_items) { return; @@ -148,7 +148,7 @@ namespace small::jobsimpl { // // job action ended // - inline void job_action_end(const JobGroupT &job_group, const bool has_items) + inline void jobs_action_end(const JobGroupT &job_group, const bool has_items) { auto it = m_scheduler.find(job_group); // map is not changed, so can be access without locking if (it == m_scheduler.end()) { @@ -160,7 +160,7 @@ namespace small::jobsimpl { auto &stats = it->second; --stats.m_running; - job_action_start(job_group, has_items, stats); + jobs_action_start(job_group, has_items, stats); } // @@ -174,7 +174,7 @@ namespace small::jobsimpl { m_parent_caller.do_action(job_group, &has_items); // start another action - job_action_end(job_group, has_items); + jobs_action_end(job_group, has_items); } } diff --git a/include/impl/jobs_item_impl.h b/include/impl/jobs_item_impl.h index 8d32341..98eaa89 100644 --- a/include/impl/jobs_item_impl.h +++ b/include/impl/jobs_item_impl.h @@ -38,12 +38,14 @@ namespace small::jobsimpl { JobsResponseT m_response{}; // where the results are saved (for the finished callback if exists) explicit jobs_item() = default; + explicit jobs_item(const JobsID &jobs_id, const JobsTypeT &jobs_type, const JobsRequestT &jobs_request) : m_id(jobs_id), m_type(jobs_type), m_request(jobs_request) {} - explicit jobs_item(const JobsTypeT &jobs_type, const JobsRequestT &jobs_request) - : m_type(jobs_type), m_request(jobs_request) {} explicit jobs_item(const JobsID &jobs_id, const JobsTypeT &jobs_type, JobsRequestT &&jobs_request) : m_id(jobs_id), m_type(jobs_type), m_request(std::forward(jobs_request)) {} + + explicit jobs_item(const JobsTypeT &jobs_type, const JobsRequestT &jobs_request) + : m_type(jobs_type), m_request(jobs_request) {} explicit jobs_item(const JobsTypeT &jobs_type, JobsRequestT &&jobs_request) : m_type(jobs_type), m_request(std::forward(jobs_request)) {} diff --git a/include/impl/jobs_queue_impl.h b/include/impl/jobs_queue_impl.h index 8107bec..b59889f 100644 --- a/include/impl/jobs_queue_impl.h +++ b/include/impl/jobs_queue_impl.h @@ -9,7 +9,7 @@ namespace small::jobsimpl { // - // small queue helper class for jobs (parent caller must implement 'jobs_activate', 'jobs_finished') + // small queue helper class for jobs (parent caller must implement 'jobs_start', 'jobs_finished') // template class jobs_queue @@ -108,33 +108,36 @@ namespace small::jobsimpl { // // add items to be processed - // push_back + // push_back only add the jobs item but does not start it // - inline std::size_t push_back(const JobsPrioT &priority, const JobsTypeT &jobs_type, const JobsRequestT &job_req, JobsID *jobs_id = nullptr) + inline std::size_t push_back(const JobsTypeT &jobs_type, const JobsRequestT &job_req, JobsID *jobs_id) { - return push_back(priority, std::make_shared(jobs_type, job_req), jobs_id); + // this job should be manually started by calling jobs_start + return push_back(std::make_shared(jobs_type, job_req), jobs_id); } - inline std::size_t push_back(const JobsPrioT &priority, std::shared_ptr jobs_item, JobsID *jobs_id = nullptr) + inline std::size_t push_back(std::shared_ptr jobs_item, JobsID *jobs_id) { if (is_exit()) { return 0; } + // this job should be manually started by calling jobs_start auto id = jobs_add(jobs_item); if (jobs_id) { *jobs_id = id; } - return jobs_activate(priority, jobs_item->m_type, id); + return 1; } - inline std::size_t push_back(const JobsPrioT &priority, const std::vector> &jobs_items, std::vector *jobs_ids) + inline std::size_t push_back(const std::vector> &jobs_items, std::vector *jobs_ids) { if (is_exit()) { return 0; } + // this jobs should be manually started by calling jobs_start std::unique_lock l(m_lock); std::size_t count = 0; @@ -144,7 +147,7 @@ namespace small::jobsimpl { } JobsID jobs_id{}; for (auto &jobs_item : jobs_items) { - auto ret = push_back(priority, jobs_item, &jobs_id); + auto ret = push_back(jobs_item, &jobs_id); if (ret) { if (jobs_ids) { jobs_ids->push_back(jobs_id); @@ -156,12 +159,257 @@ namespace small::jobsimpl { } // push_back move semantics - inline std::size_t push_back(const JobsPrioT &priority, const JobsTypeT &jobs_type, JobsRequestT &&jobs_req, JobsID *jobs_id = nullptr) + inline std::size_t push_back(const JobsTypeT &jobs_type, JobsRequestT &&jobs_req, JobsID *jobs_id) { - return push_back(priority, std::make_shared(jobs_type, std::forward(jobs_req)), jobs_id); + // this job should be manually started by calling jobs_start + return push_back(std::make_shared(jobs_type, std::forward(jobs_req)), jobs_id); } - // TODO add push_back_child() + // + // push back and start the job + // + inline std::size_t push_back_and_start(const JobsPrioT &priority, const JobsTypeT &jobs_type, const JobsRequestT &job_req, JobsID *jobs_id = nullptr) + { + return push_back_and_start(priority, std::make_shared(jobs_type, job_req), jobs_id); + } + + inline std::size_t push_back_and_start(const JobsPrioT &priority, std::shared_ptr jobs_item, JobsID *jobs_id = nullptr) + { + std::unique_lock l(m_lock); + + JobsID id{}; + auto ret = push_back(jobs_item, &id); + if (!ret) { + return ret; + } + + if (jobs_id) { + *jobs_id = id; + } + + // start the job + return jobs_start(priority, jobs_item->m_type, id); + } + + inline std::size_t push_back_and_start(const JobsPrioT &priority, const std::vector> &jobs_items, std::vector *jobs_ids = nullptr) + { + if (is_exit()) { + return 0; + } + + std::unique_lock l(m_lock); + + std::vector ids; + + auto ret = push_back(jobs_items, &ids); + if (!ret) { + return ret; + } + + // start the jobs + jobs_start(priority, ids); + + if (jobs_ids) { + *jobs_ids = std::move(ids); + } + + return ret; + } + + // push_back move semantics + inline std::size_t push_back_and_start(const JobsPrioT &priority, const JobsTypeT &jobs_type, JobsRequestT &&jobs_req, JobsID *jobs_id = nullptr) + { + return push_back_and_start(priority, std::make_shared(jobs_type, std::forward(jobs_req)), jobs_id); + } + + // + // helper push_back a new job child and link with the parent + // + inline std::size_t push_back_child(const JobsID &parent_jobs_id, const JobsTypeT &child_jobs_type, const JobsRequestT &child_job_req, JobsID *child_jobs_id) + { + // this job should be manually started by calling jobs_start + return push_back_child(parent_jobs_id, std::make_shared(child_jobs_type, child_job_req), child_jobs_id); + } + + inline std::size_t push_back_child(const JobsID &parent_jobs_id, std::shared_ptr child_jobs_item, JobsID *child_jobs_id) + { + if (is_exit()) { + return 0; + } + + std::unique_lock l(m_lock); + + auto *parent_jobs_item = jobs_get(parent_jobs_id); + if (!parent_jobs_item) { + return 0; + } + + // this job should be manually started by calling jobs_start + JobsID id{}; + auto ret = push_back(child_jobs_item, &id); + if (!ret) { + return ret; + } + + if (child_jobs_id) { + *child_jobs_id = id; + } + + jobs_parent_child(*parent_jobs_item, child_jobs_item); + return 1; + } + + inline std::size_t push_back_child(const JobsID &parent_jobs_id, const std::vector> &children_jobs_items, std::vector *children_jobs_ids) + { + if (is_exit()) { + return 0; + } + + // this job should be manually started by calling jobs_start + std::unique_lock l(m_lock); + + std::size_t count = 0; + if (children_jobs_ids) { + children_jobs_ids->reserve(children_jobs_items.size()); + children_jobs_ids->clear(); + } + JobsID child_jobs_id{}; + for (auto &child_jobs_item : children_jobs_items) { + auto ret = push_back_child(parent_jobs_id, child_jobs_item, &child_jobs_id); + if (ret) { + if (children_jobs_ids) { + children_jobs_ids->push_back(child_jobs_id); + } + } + count += ret; + } + return count; + } + + // push_back_child move semantics + inline std::size_t push_back_child(const JobsID &parent_jobs_id, const JobsTypeT &child_jobs_type, JobsRequestT &&child_jobs_req, JobsID *child_jobs_id) + { + // this job should be manually started by calling jobs_start + return push_back_child(parent_jobs_id, std::make_shared(child_jobs_type, std::forward(child_jobs_req)), child_jobs_id); + } + + // + // helper push_back a new job child and link with the parent and start the child job + // + inline std::size_t push_back_and_start_child(const JobsID &parent_jobs_id, const JobsPrioT &child_priority, const JobsTypeT &child_jobs_type, const JobsRequestT &child_job_req, JobsID *child_jobs_id = nullptr) + { + return push_back_and_start_child(parent_jobs_id, child_priority, std::make_shared(child_jobs_type, child_job_req), child_jobs_id); + } + + inline std::size_t push_back_and_start_child(const JobsID &parent_jobs_id, const JobsPrioT &child_priority, std::shared_ptr child_jobs_item, JobsID *child_jobs_id = nullptr) + { + if (is_exit()) { + return 0; + } + + std::unique_lock l(m_lock); + + JobsID id{}; + auto ret = push_back_child(parent_jobs_id, child_jobs_item, &id); + if (!ret) { + return ret; + } + + if (child_jobs_id) { + *child_jobs_id = id; + } + + // start the job + return jobs_start(child_priority, child_jobs_item->m_type, id); + } + + inline std::size_t push_back_and_start_child(const JobsID &parent_jobs_id, const JobsPrioT &children_priority, const std::vector> &children_jobs_items, std::vector *children_jobs_ids = nullptr) + { + if (is_exit()) { + return 0; + } + + std::unique_lock l(m_lock); + + std::vector ids; + + auto ret = push_back_child(parent_jobs_id, children_jobs_items, &ids); + if (!ret) { + return ret; + } + + // start the jobs + jobs_start(children_priority, ids); + + if (children_jobs_ids) { + *children_jobs_ids = std::move(ids); + } + + return ret; + } + + // push_back move semantics + inline std::size_t push_back_and_start_child(const JobsID &parent_jobs_id, const JobsPrioT &child_priority, const JobsTypeT &child_jobs_type, JobsRequestT &&child_jobs_req, JobsID *child_jobs_id = nullptr) + { + return push_back_and_start_child(parent_jobs_id, child_priority, std::make_shared(child_jobs_type, std::forward(child_jobs_req)), child_jobs_id); + } + + // + // set relationship parent-child + // + inline std::size_t jobs_parent_child(const JobsID &parent_jobs_id, const JobsID &child_jobs_id) + { + std::unique_lock l(m_lock); + + auto *parent_jobs_item = jobs_get(parent_jobs_id); + if (!parent_jobs_item) { + return 0; + } + auto *child_jobs_item = jobs_get(child_jobs_id); + if (!child_jobs_item) { + return 0; + } + + jobs_parent_child(*parent_jobs_item, *child_jobs_item); + return 1; + } + + // + // start the jobs + // + inline std::size_t jobs_start(const JobsPrioT &priority, const std::vector &jobs_ids) + { + std::size_t count = 0; + auto jobs_items = jobs_get(jobs_ids); + for (auto &jobs_item : jobs_items) { + auto ret = jobs_start(priority, jobs_item->m_type, jobs_item->m_id); + if (ret) { + ++count; + } + } + return count; + } + + inline std::size_t jobs_start(const JobsPrioT &priority, const JobsTypeT &jobs_type, const JobsID &jobs_id) + { + std::size_t ret = 0; + + // optimization to get the queue from the type + // (instead of getting the group from type from m_config.m_types and then getting the queue from the m_groups_queues) + auto it_q = m_types_queues.find(jobs_type); + if (it_q != m_types_queues.end()) { + auto *q = it_q->second; + ret = q->push_back(priority, jobs_id); + } + + if (ret) { + m_parent_caller.jobs_start(jobs_type, jobs_id); + } else { + // TODO maybe call m_parent.jobs_cancel(jobs_id)? + jobs_del(jobs_id); + } + return ret; + } // no emplace_back do to returning the jobs_id @@ -169,13 +417,13 @@ namespace small::jobsimpl { // push_back with specific timeings // template - inline std::size_t push_back_delay_for(const std::chrono::duration<_Rep, _Period> &__rtime, const JobsPrioT &priority, const JobsTypeT &jobs_type, const JobsRequestT &jobs_req, JobsID *jobs_id = nullptr) + inline std::size_t push_back_and_start_delay_for(const std::chrono::duration<_Rep, _Period> &__rtime, const JobsPrioT &priority, const JobsTypeT &jobs_type, const JobsRequestT &jobs_req, JobsID *jobs_id = nullptr) { - return push_back_delay_for(__rtime, priority, std::make_shared(jobs_type, jobs_req), jobs_id); + return push_back_and_start_delay_for(__rtime, priority, std::make_shared(jobs_type, jobs_req), jobs_id); } template - inline std::size_t push_back_delay_for(const std::chrono::duration<_Rep, _Period> &__rtime, const JobsPrioT &priority, std::shared_ptr jobs_item, JobsID *jobs_id = nullptr) + inline std::size_t push_back_and_start_delay_for(const std::chrono::duration<_Rep, _Period> &__rtime, const JobsPrioT &priority, std::shared_ptr jobs_item, JobsID *jobs_id = nullptr) { auto id = jobs_add(jobs_item); if (jobs_id) { @@ -185,18 +433,18 @@ namespace small::jobsimpl { } template - inline std::size_t push_back_delay_for(const std::chrono::duration<_Rep, _Period> &__rtime, const JobsPrioT &priority, const JobsTypeT &jobs_type, JobsRequestT &&jobs_req, JobsID *jobs_id = nullptr) + inline std::size_t push_back_and_start_delay_for(const std::chrono::duration<_Rep, _Period> &__rtime, const JobsPrioT &priority, const JobsTypeT &jobs_type, JobsRequestT &&jobs_req, JobsID *jobs_id = nullptr) { - return push_back_delay_for(__rtime, priority, std::make_shared(jobs_type, std::forward(jobs_req)), jobs_id); + return push_back_and_start_delay_for(__rtime, priority, std::make_shared(jobs_type, std::forward(jobs_req)), jobs_id); } // avoid time_casting from one clock to another // template // - inline std::size_t push_back_delay_until(const std::chrono::time_point &__atime, const JobsPrioT &priority, const JobsTypeT &jobs_type, const JobsRequestT &jobs_req, JobsID *jobs_id = nullptr) + inline std::size_t push_back_and_start_delay_until(const std::chrono::time_point &__atime, const JobsPrioT &priority, const JobsTypeT &jobs_type, const JobsRequestT &jobs_req, JobsID *jobs_id = nullptr) { - return push_back_delay_until(__atime, priority, std::make_shared(jobs_type, jobs_req), jobs_id); + return push_back_and_start_delay_until(__atime, priority, std::make_shared(jobs_type, jobs_req), jobs_id); } - inline std::size_t push_back_delay_until(const std::chrono::time_point &__atime, const JobsPrioT &priority, std::shared_ptr jobs_item, JobsID *jobs_id = nullptr) + inline std::size_t push_back_and_start_delay_until(const std::chrono::time_point &__atime, const JobsPrioT &priority, std::shared_ptr jobs_item, JobsID *jobs_id = nullptr) { auto id = jobs_add(jobs_item); if (jobs_id) { @@ -205,9 +453,9 @@ namespace small::jobsimpl { return m_delayed_items.queue().push_delay_until(__atime, {priority, jobs_item->m_type, id}); } - inline std::size_t push_back_delay_until(const std::chrono::time_point &__atime, const JobsPrioT &priority, const JobsTypeT &jobs_type, JobsRequestT &&jobs_req, JobsID *jobs_id = nullptr) + inline std::size_t push_back_and_start_delay_until(const std::chrono::time_point &__atime, const JobsPrioT &priority, const JobsTypeT &jobs_type, JobsRequestT &&jobs_req, JobsID *jobs_id = nullptr) { - return push_back_delay_until(__atime, priority, std::make_shared(jobs_type, std::forward(jobs_req)), jobs_id); + return push_back_and_start_delay_until(__atime, priority, std::make_shared(jobs_type, std::forward(jobs_req)), jobs_id); } // TODO add push_back_child_....() @@ -325,6 +573,18 @@ namespace small::jobsimpl { return jobs_items; // will be moved } + // internal jobs_get + inline std::shared_ptr *jobs_get(const JobsID &jobs_id) + { + std::unique_lock l(m_lock); + + auto it_j = m_jobs.find(jobs_id); + if (it_j == m_jobs.end()) { + return nullptr; + } + return &it_j->second; + } + // // add jobs item // @@ -348,35 +608,23 @@ namespace small::jobsimpl { } // - // activate the jobs + // delete jobs item // - inline std::size_t jobs_activate(const JobsPrioT &priority, const JobsTypeT &jobs_type, const JobsID &jobs_id) + inline void jobs_del(const JobsID &jobs_id) { - std::size_t ret = 0; - - // optimization to get the queue from the type - // (instead of getting the group from type from m_config.m_types and then getting the queue from the m_groups_queues) - auto it_q = m_types_queues.find(jobs_type); - if (it_q != m_types_queues.end()) { - auto *q = it_q->second; - ret = q->push_back(priority, jobs_id); - } - - if (ret) { - m_parent_caller.jobs_activate(jobs_type, jobs_id); - } else { - jobs_del(jobs_id); - } - return ret; + std::unique_lock l(m_lock); + m_jobs.erase(jobs_id); } // - // delete jobs item + // set relationship parent-child // - inline void jobs_del(const JobsID &jobs_id) + inline void jobs_parent_child(std::shared_ptr parent_jobs_item, std::shared_ptr child_jobs_item) { std::unique_lock l(m_lock); - m_jobs.erase(jobs_id); + + parent_jobs_item->m_childrenIDs.push_back(child_jobs_item->m_id); + child_jobs_item->m_parentIDs.push_back(parent_jobs_item->m_id); } private: @@ -397,7 +645,7 @@ namespace small::jobsimpl { { std::size_t count = 0; for (auto &[priority, jobs_type, jobs_id] : items) { - count += jobs_activate(priority, jobs_type, jobs_id); + count += jobs_start(priority, jobs_type, jobs_id); } return count; } diff --git a/include/jobs_engine.h b/include/jobs_engine.h index a1a1ddc..d66e2a1 100644 --- a/include/jobs_engine.h +++ b/include/jobs_engine.h @@ -395,9 +395,9 @@ namespace small { // friend JobsQueue; - inline void jobs_activate(const JobsTypeT &jobs_type, const JobsID & /* jobs_id */) + inline void jobs_start(const JobsTypeT &jobs_type, const JobsID & /* jobs_id */) { - m_thread_pool.job_start(m_config.m_types[jobs_type].m_group); + m_thread_pool.jobs_start(m_config.m_types[jobs_type].m_group); } // From e6a342de9e3e661ea74f99c3ba9909a6b5aab711 Mon Sep 17 00:00:00 2001 From: Cristian Herghelegiu Date: Sun, 12 Jan 2025 01:13:45 +0200 Subject: [PATCH 09/12] Add multiple features for jobs engine like parent child dependencies, throttling with sleep between requests, timeout for processing --- include/impl/jobs_engine_thread_pool_impl.h | 2 +- include/impl/jobs_item_impl.h | 11 +- include/impl/jobs_queue_impl.h | 179 +++++++++++------- include/jobs_engine.h | 200 ++++++++++++++++---- 4 files changed, 283 insertions(+), 109 deletions(-) diff --git a/include/impl/jobs_engine_thread_pool_impl.h b/include/impl/jobs_engine_thread_pool_impl.h index 60972d1..cd46a5f 100644 --- a/include/impl/jobs_engine_thread_pool_impl.h +++ b/include/impl/jobs_engine_thread_pool_impl.h @@ -63,7 +63,7 @@ namespace small::jobsimpl { // when items are added to be processed in parent class the start scheduler should be called // to trigger action (if needed for the new job group) // - inline void jobs_start(const JobGroupT &job_group) + inline void jobs_schedule(const JobGroupT &job_group) { auto it = m_scheduler.find(job_group); // map is not changed, so can be access without locking if (it == m_scheduler.end()) { diff --git a/include/impl/jobs_item_impl.h b/include/impl/jobs_item_impl.h index 98eaa89..7d83312 100644 --- a/include/impl/jobs_item_impl.h +++ b/include/impl/jobs_item_impl.h @@ -100,9 +100,14 @@ namespace small::jobsimpl { inline void set_state_failed () { set_state(EnumJobsState::kFailed); } inline void set_state_cancelled () { set_state(EnumJobsState::kCancelled); } - inline bool is_state_inprogress () { return m_state.load() == EnumJobsState::kInProgress; } - inline bool is_state_finished () { return m_state.load() == EnumJobsState::kFinished; } - inline bool is_state_timeout () { return m_state.load() == EnumJobsState::kTimeout; } + inline bool is_state (const EnumJobsState &state) { return m_state.load() == state; } + + inline bool is_state_inprogress () { return is_state(EnumJobsState::kInProgress); } + inline void is_state_waitchildren () { return is_state(EnumJobsState::kWaitChildren); } + inline bool is_state_finished () { return is_state(EnumJobsState::kFinished); } + inline bool is_state_timeout () { return is_state(EnumJobsState::kTimeout); } + inline void is_state_failed () { return is_state(EnumJobsState::kFailed); } + inline void is_state_cancelled () { return is_state(EnumJobsState::kCancelled); } // clang-format on // diff --git a/include/impl/jobs_queue_impl.h b/include/impl/jobs_queue_impl.h index b59889f..ec5ca74 100644 --- a/include/impl/jobs_queue_impl.h +++ b/include/impl/jobs_queue_impl.h @@ -9,7 +9,7 @@ namespace small::jobsimpl { // - // small queue helper class for jobs (parent caller must implement 'jobs_start', 'jobs_finished') + // small queue helper class for jobs (parent caller must implement 'jobs_schedule', 'jobs_finished') // template class jobs_queue @@ -27,7 +27,9 @@ namespace small::jobsimpl { using TimeClock = typename small::time_queue::TimeClock; using TimeDuration = typename small::time_queue::TimeDuration; - public: + private: + friend ParentCallerT; + // // jobs_queue // @@ -106,6 +108,10 @@ namespace small::jobsimpl { return true; } + // + // only this part is public + // + public: // // add items to be processed // push_back only add the jobs item but does not start it @@ -354,63 +360,6 @@ namespace small::jobsimpl { return push_back_and_start_child(parent_jobs_id, child_priority, std::make_shared(child_jobs_type, std::forward(child_jobs_req)), child_jobs_id); } - // - // set relationship parent-child - // - inline std::size_t jobs_parent_child(const JobsID &parent_jobs_id, const JobsID &child_jobs_id) - { - std::unique_lock l(m_lock); - - auto *parent_jobs_item = jobs_get(parent_jobs_id); - if (!parent_jobs_item) { - return 0; - } - auto *child_jobs_item = jobs_get(child_jobs_id); - if (!child_jobs_item) { - return 0; - } - - jobs_parent_child(*parent_jobs_item, *child_jobs_item); - return 1; - } - - // - // start the jobs - // - inline std::size_t jobs_start(const JobsPrioT &priority, const std::vector &jobs_ids) - { - std::size_t count = 0; - auto jobs_items = jobs_get(jobs_ids); - for (auto &jobs_item : jobs_items) { - auto ret = jobs_start(priority, jobs_item->m_type, jobs_item->m_id); - if (ret) { - ++count; - } - } - return count; - } - - inline std::size_t jobs_start(const JobsPrioT &priority, const JobsTypeT &jobs_type, const JobsID &jobs_id) - { - std::size_t ret = 0; - - // optimization to get the queue from the type - // (instead of getting the group from type from m_config.m_types and then getting the queue from the m_groups_queues) - auto it_q = m_types_queues.find(jobs_type); - if (it_q != m_types_queues.end()) { - auto *q = it_q->second; - ret = q->push_back(priority, jobs_id); - } - - if (ret) { - m_parent_caller.jobs_start(jobs_type, jobs_id); - } else { - // TODO maybe call m_parent.jobs_cancel(jobs_id)? - jobs_del(jobs_id); - } - return ret; - } - // no emplace_back do to returning the jobs_id // @@ -425,6 +374,10 @@ namespace small::jobsimpl { template inline std::size_t push_back_and_start_delay_for(const std::chrono::duration<_Rep, _Period> &__rtime, const JobsPrioT &priority, std::shared_ptr jobs_item, JobsID *jobs_id = nullptr) { + if (is_exit()) { + return 0; + } + auto id = jobs_add(jobs_item); if (jobs_id) { *jobs_id = id; @@ -446,6 +399,10 @@ namespace small::jobsimpl { inline std::size_t push_back_and_start_delay_until(const std::chrono::time_point &__atime, const JobsPrioT &priority, std::shared_ptr jobs_item, JobsID *jobs_id = nullptr) { + if (is_exit()) { + return 0; + } + auto id = jobs_add(jobs_item); if (jobs_id) { *jobs_id = id; @@ -460,6 +417,7 @@ namespace small::jobsimpl { // TODO add push_back_child_....() + private: // clang-format off // // signal exit @@ -539,21 +497,28 @@ namespace small::jobsimpl { } private: - friend ParentCallerT; - // - // get group queue - // called from parent jobs engine + // get jobs group queue // - inline JobsQueue *get_group_queue(const JobsGroupT &jobs_group) + inline JobsQueue *get_jobs_group_queue(const JobsGroupT &jobs_group) { auto it = m_groups_queues.find(jobs_group); return it != m_groups_queues.end() ? &it->second : nullptr; } + // + // get jobs type queue + // + inline JobsQueue *get_jobs_type_queue(const JobsTypeT &jobs_type) + { + // optimization to get the queue from the type + // (instead of getting the group from type from m_config.m_types and then getting the queue from the m_groups_queues) + auto it = m_types_queues.find(jobs_type); + return it != m_types_queues.end() ? it->second : nullptr; + } + // // get job items - // called from parent jobs engine // inline std::vector> jobs_get(const std::vector &jobs_ids) { @@ -573,7 +538,6 @@ namespace small::jobsimpl { return jobs_items; // will be moved } - // internal jobs_get inline std::shared_ptr *jobs_get(const JobsID &jobs_id) { std::unique_lock l(m_lock); @@ -599,18 +563,72 @@ namespace small::jobsimpl { m_jobs.emplace(id, jobs_item); // add it to the timeout queue - auto it_timeout = m_types_timeouts.find(jobs_item->m_type); - if (it_timeout != m_types_timeouts.end()) { - m_timeout_queue.queue().push_delay_for(it_timeout->second, id); + jobs_start_timeout(jobs_item); + return id; + } + + // + // start the jobs + // + inline std::size_t jobs_start(const JobsPrioT &priority, const std::vector &jobs_ids) + { + std::size_t count = 0; + auto jobs_items = jobs_get(jobs_ids); + for (auto &jobs_item : jobs_items) { + auto ret = jobs_start(priority, jobs_item->m_type, jobs_item->m_id); + if (ret) { + ++count; + } } + return count; + } - return id; + inline std::size_t jobs_start(const JobsPrioT &priority, const JobsID &jobs_id) + { + auto *jobs_item = jobs_get(jobs_id); + if (!jobs_item) { + return 0; + } + return jobs_start(priority, (*jobs_item)->m_type, (*jobs_item)->m_id); + } + + inline std::size_t jobs_start(const JobsPrioT &priority, const JobsTypeT &jobs_type, const JobsID &jobs_id) + { + std::size_t ret = 0; + + auto *q = get_jobs_type_queue(jobs_type); + if (q) { + ret = q->push_back(priority, jobs_id); + } + + if (ret) { + m_parent_caller.jobs_schedule(jobs_type, jobs_id); + } else { + // TODO maybe call m_parent.jobs_failed(jobs_id)? + jobs_erase(jobs_id); + } + return ret; + } + + // + // add it to the timeout queue + // + inline std::size_t jobs_start_timeout(std::shared_ptr jobs_item) + { + std::unique_lock l(m_lock); + + // only if job type has config a timeout + auto it_timeout = m_types_timeouts.find(jobs_item->m_type); + if (it_timeout == m_types_timeouts.end()) { + return 0; + } + return m_timeout_queue.queue().push_delay_for(it_timeout->second, jobs_item->m_id); } // - // delete jobs item + // erase jobs item // - inline void jobs_del(const JobsID &jobs_id) + inline void jobs_erase(const JobsID &jobs_id) { std::unique_lock l(m_lock); m_jobs.erase(jobs_id); @@ -619,6 +637,23 @@ namespace small::jobsimpl { // // set relationship parent-child // + inline std::size_t jobs_parent_child(const JobsID &parent_jobs_id, const JobsID &child_jobs_id) + { + std::unique_lock l(m_lock); + + auto *parent_jobs_item = jobs_get(parent_jobs_id); + if (!parent_jobs_item) { + return 0; + } + auto *child_jobs_item = jobs_get(child_jobs_id); + if (!child_jobs_item) { + return 0; + } + + jobs_parent_child(*parent_jobs_item, *child_jobs_item); + return 1; + } + inline void jobs_parent_child(std::shared_ptr parent_jobs_item, std::shared_ptr child_jobs_item) { std::unique_lock l(m_lock); diff --git a/include/jobs_engine.h b/include/jobs_engine.h index d66e2a1..7517c87 100644 --- a/include/jobs_engine.h +++ b/include/jobs_engine.h @@ -201,10 +201,88 @@ namespace small { } // - // queue access + // jobs functions + // + + // + // add items to jobs queue // inline JobsQueue &queue() { return m_queue; } + // + // start schedule jobs items + // + inline std::size_t jobs_start(const JobsPrioT &priority, const JobsID &jobs_id) + { + return queue().jobs_start(priority, jobs_id); + } + + inline std::size_t jobs_start(const JobsPrioT &priority, const std::vector &jobs_ids) + { + return queue().jobs_start(priority, jobs_ids); + } + + // + // get job items + // + inline std::shared_ptr *jobs_get(const JobsID &jobs_id) + { + return queue().jobs_get(jobs_id); + } + + inline std::vector> jobs_get(const std::vector &jobs_ids) + { + return queue().jobs_get(jobs_ids); + } + + // + // set relationship parent-child + // + inline std::size_t jobs_parent_child(const JobsID &parent_jobs_id, const JobsID &child_jobs_id) + { + return queue().jobs_parent_child(parent_jobs_id, child_jobs_id); + } + + inline void jobs_parent_child(std::shared_ptr parent_jobs_item, std::shared_ptr child_jobs_item) + { + return queue().jobs_parent_child(parent_jobs_item, child_jobs_item); + } + + // + // set jobs state + // + inline void jobs_progress(const JobsID &jobs_id, const int &progress) + { + jobs_set_progress(jobs_id, progress); + } + + inline void jobs_finished(const JobsID &jobs_id) + { + jobs_set_state(jobs_id, small::jobsimpl::EnumJobsState::kFinished); + } + inline void jobs_finished(const std::vector &jobs_ids) + { + jobs_set_state(jobs_ids, small::jobsimpl::EnumJobsState::kFinished); + } + + inline void jobs_failed(const JobsID &jobs_id) + { + jobs_set_state(jobs_id, small::jobsimpl::EnumJobsState::kFailed); + } + inline void jobs_failed(const std::vector &jobs_ids) + { + jobs_set_state(jobs_ids, small::jobsimpl::EnumJobsState::kFailed); + } + + inline void jobs_cancelled(const JobsID &jobs_id) + { + jobs_set_state(jobs_id, small::jobsimpl::EnumJobsState::kCancelled); + } + inline void jobs_cancelled(const std::vector &jobs_ids) + { + jobs_set_state(jobs_ids, small::jobsimpl::EnumJobsState::kCancelled); + } + // clang-format off // // signal exit @@ -297,7 +375,7 @@ namespace small { } // - // inner thread function for executing items (should return if there are more items) + // inner thread function for executing items (should return if there are more items) (called from thread_pool) // friend small::jobsimpl::jobs_engine_thread_pool; @@ -318,7 +396,7 @@ namespace small { group_config.m_delay_next_request = it_cfg_grp->second.m_delay_next_request; // get items to process - auto *q = m_queue.get_group_queue(jobs_group); + auto *q = m_queue.get_jobs_group_queue(jobs_group); if (!q) { return small::EnumLock::kExit; } @@ -368,15 +446,9 @@ namespace small { } } - // TODO marks the items as either wait for children (if it has children) or finished // mark the item as in wait for children of finished // if in callback the state is set to failed, cancelled or timeout setting to finish wont succeed because if less value than those - // jobs_item->set_state(small::EnumJobsState::kInProgress); - } - - // TODO move this delete on the finished thread - for (auto &jobs_id : vec_ids) { - m_queue.jobs_del(jobs_id); + jobs_waitforchildren(jobs); } // TODO group_config.m_delay_next_request @@ -386,55 +458,117 @@ namespace small { return ret; } - // TODO external set state for a job moves it to proper wait for children or finished - // TODO add functions jobs_cancel, jobs_finish(response), jobs_failed(response) - // - // inner function for activate the jobs from queue - // called from queue + // inner function for activate the jobs from queue (called from queue) // friend JobsQueue; - inline void jobs_start(const JobsTypeT &jobs_type, const JobsID & /* jobs_id */) + inline void jobs_schedule(const JobsTypeT &jobs_type, const JobsID & /* jobs_id */) + { + m_thread_pool.jobs_schedule(m_config.m_types[jobs_type].m_group); + } + + // + // jobs states + // + inline void jobs_set_progress(const JobsID &jobs_id, const int &progress) + { + auto *jobs_item = jobs_get(jobs_id); + if (!jobs_item) { + return; + } + + (*jobs_item)->set_progress(progress); + + if (progress == 100) { + jobs_finished(jobs_id); + } + } + + inline void jobs_waitforchildren(const std::vector> &jobs_items) + { + // set the jobs as waitforchildren only if there are children otherwise advance to finish + jobs_set_state(jobs_items, small::jobsimpl::EnumJobsState::kTimeout); + } + + inline void jobs_timeout(const std::vector &jobs_ids) { - m_thread_pool.jobs_start(m_config.m_types[jobs_type].m_group); + // set the jobs as timeout if it is not finished until now (called from queue) + jobs_set_state(jobs_ids, small::jobsimpl::EnumJobsState::kTimeout); } // - // set the jobs as timeout if it is not finished until now - // called from queue + // apply state // - inline std::vector> jobs_timeout(const std::vector &jobs_ids) + inline void jobs_set_state(const JobsID &jobs_id, const small::jobsimpl::EnumJobsState &jobs_state) + { + auto *jobs_item = jobs_get(jobs_id); + if (!jobs_item) { + return; + } + + auto ret = jobs_set_state(*jobs_item, jobs_state); + if (ret) { + jobs_completed({*jobs_item}); + } + } + + inline void jobs_set_state(const std::vector &jobs_ids, const small::jobsimpl::EnumJobsState &jobs_state) + { + auto jobs_items = jobs_get(jobs_ids); + jobs_set_state(jobs_items, jobs_state); + } + + inline void jobs_set_state(const std::vector> &jobs_items, const small::jobsimpl::EnumJobsState &jobs_state) { - std::vector> jobs_items = m_queue.jobs_get(jobs_ids); - std::vector> timeout_items; - timeout_items.reserve(jobs_items.size()); + std::vector> changed_items; + changed_items.reserve(jobs_items.size()); for (auto &jobs_item : jobs_items) { - // set the jobs as timeout if it is not finished until now - if (jobs_item->state.is_state_finished()) { - continue; + auto ret = jobs_set_state(jobs_item, jobs_state); + if (ret) { + changed_items.push_back(jobs_item); } + } + + jobs_completed(changed_items); + } + + inline std::size_t jobs_set_state(std::shared_ptr jobs_item, small::jobsimpl::EnumJobsState jobs_state) + { + // state is already the same + if (jobs_item->is_state(jobs_state)) { + return 0; + } - jobs_item->set_state_timeout(); - if (jobs_item->is_state_timeout()) { - timeout_items.push_back(jobs_item); + // set the jobs as timeout if it is not finished until now + if (jobs_state == small::jobsimpl::EnumJobsState::kTimeout && jobs_item->is_state_finished()) { + return 0; + } + + // set the jobs as waitforchildren only if there are children otherwise advance to finish + if (jobs_state == small::jobsimpl::EnumJobsState::kWaitChildren) { + std::unique_lock l(*this); + if (jobs_item->m_childrenIDs.size() == 0) { + jobs_state = small::jobsimpl::EnumJobsState::kFinished; } } - jobs_finished(timeout_items); + + jobs_item->set_state(jobs_state); + return jobs_item->is_state(jobs_state) ? 1 : 0; } // - // finish a job + // when a job is completed (finished/timeout/canceled/failed) // - inline void jobs_finished(const std::vector> &jobs_items) + inline void jobs_completed(const std::vector> &jobs_items) { // TODO call the custom function from config if exists // (this may be called from multiple places - queue timeout, do_action finished, above set state cancel, finish, ) // TODO delete only if there are no parents (delete all the finished children now) for (auto &jobs_item : jobs_items) { - m_queue.jobs_del(jobs_item->m_id); + m_queue.jobs_erase(jobs_item->m_id); } // TODO if it has parents call jobs_on_children_finished From d3993dcbc7c8e9a780ca7057be6cf8a893c44191 Mon Sep 17 00:00:00 2001 From: Cristian Herghelegiu Date: Mon, 13 Jan 2025 23:36:30 +0200 Subject: [PATCH 10/12] Add multiple features for jobs engine like parent child dependencies, throttling with sleep between requests, timeout for processing --- examples/examples_jobs_engine.h | 105 ++++++++++++------ include/jobs_engine.h | 186 ++++++++++++++++++++++++-------- 2 files changed, 213 insertions(+), 78 deletions(-) diff --git a/examples/examples_jobs_engine.h b/examples/examples_jobs_engine.h index f0078fe..0f648e2 100644 --- a/examples/examples_jobs_engine.h +++ b/examples/examples_jobs_engine.h @@ -33,6 +33,7 @@ namespace examples::jobs_engine { // enum class JobsType { + kJobsNone = 0, kJobsType1, kJobsType2, kJobsType3, @@ -48,8 +49,9 @@ namespace examples::jobs_engine { kJobsGroupCache, }; - using Request = std::pair; - using JobsEng = small::jobs_engine; + using Request = std::pair; + using Response = int; + using JobsEng = small::jobs_engine; auto jobs_function_processing = [](const std::vector> &items, JobsEng::JobsConfig::ConfigProcessing & /* config */) { // this functions is defined without the engine params (it is here just for the example) @@ -86,16 +88,17 @@ namespace examples::jobs_engine { // create a cache server (with workers to simulate access to it) // (as an external engine outside the jobs engine for demo purposes) - small::worker_thread cache_server({.threads_count = 1}, [](auto &w /*this*/, const auto &items) { - // process item using the workers lock (not recommended) - + small::worker_thread cache_server({.threads_count = 1}, [&](auto &w /*this*/, const auto &items) { for (auto &i : items) { std::cout << "thread " << std::this_thread::get_id() - << " processing cache {" << i << "}" << "\n"; + << " CACHE processing" + << " {" << i << "}" << "\n"; - // TODO mark the jobs id associated as succeeded (for demo purposes to avoid creating other structures) + // mark the jobs id associated as succeeded (for demo purposes to avoid creating other structures) + jobs.jobs_finished(i, (Response)i); } // sleep long enough + // no coalesce for demo purposes (sleep 500) so 3rd parent items is finished due to database and not cache server small::sleep(500); }); @@ -120,10 +123,10 @@ namespace examples::jobs_engine { }); // add specific function for job1 (calling the function from jobs intead of config allows to pass the engine and extra param) - jobs.config_jobs_function_processing(JobsType::kJobsType1, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto & /* config */, auto b /*extra param b*/) { + jobs.config_jobs_function_processing(JobsType::kJobsType1, [&](auto &j /*this jobs engine*/, const auto &jobs_items, auto & /* config */, auto b /*extra param b*/) { for (auto &item : jobs_items) { std::cout << "thread " << std::this_thread::get_id() - << " JOB1 processing " + << " JOB1 processing " << "{" << " type=" << (int)item->m_type << " req.int=" << item->m_request.first << "," @@ -132,22 +135,41 @@ namespace examples::jobs_engine { << " ref count " << item.use_count() << " time " << small::toISOString(small::timeNow()) << "\n"; - // TODO add 2 more children jobs for current one for database and server cache - // TODO save somewhere in an unordered_map the database requests - the problem is that jobid is received after push_jobs - // TODO save type1 requests into a promises unordered_map - // TODO for type 2 only database (add another processing function) + + // add 2 more children jobs for current one for database and server cache + JobsEng::JobsID jobs_child_db_id{}; + JobsEng::JobsID jobs_child_cache_id{}; + + auto ret = j.queue().push_back_child(item->m_id /*parent*/, JobsType::kJobsDatabase, item->m_request, &jobs_child_db_id); + if (!ret) { + j.jobs_failed(item->m_id); + } + ret = j.queue().push_back_child(item->m_id /*parent*/, JobsType::kJobsCache, item->m_request, &jobs_child_cache_id); + if (!ret) { + j.jobs_failed(jobs_child_db_id); + j.jobs_failed(item->m_id); + } + + j.jobs_start(small::EnumPriorities::kNormal, jobs_child_db_id); + // jobs_child_cache_id has no threads to execute, it has external executors + cache_server.push_back(jobs_child_cache_id); } small::sleep(30); }, 5 /*param b*/); + // TODO save type1 requests into a promises unordered_map and complete on finishing the job + // TODO add custom finish function for jobtype1 to complete the promises + + // TODO save somewhere in an unordered_map the database requests (passes as request params for job type1) // TODO daca as vrea sa folosesc un alt job_server cum modelez asa incat jobul dintr-o parte sa ramana intr-o stare ca si cand ar avea copii si // TODO sa se faca un request in alta parte si ala cand se termina pe finish (sau daca e worker thread in functia de procesare) sa faca set state // TODO set state merge daca e doar o dependinta, daca sunt mai multe atunci ar tb o functie custom - childProcessing (desi are sau nu are children - sau cum fac un dummy children - poate cu thread_count 0?) // add specific function for job2 jobs.config_jobs_function_processing(JobsType::kJobsType2, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto & /* config */) { + bool first_job = true; for (auto &item : jobs_items) { std::cout << "thread " << std::this_thread::get_id() - << " JOB2 processing " + << " JOB2 processing " << "{" << " type=" << (int)item->m_type << " req.int=" << item->m_request.first << "," @@ -156,7 +178,21 @@ namespace examples::jobs_engine { << " ref count " << item.use_count() << " time " << small::toISOString(small::timeNow()) << "\n"; - // TODO for type 2 only database children (add another processing function) + + if (first_job) { + // for type 2 only database children (for demo purposes no result will be used from database) + auto ret = j.queue().push_back_and_start_child(item->m_id /*parent*/, + small::EnumPriorities::kNormal, + JobsType::kJobsDatabase, + item->m_request); + if (!ret) { + j.jobs_failed(item->m_id); + } + } else { + j.jobs_failed(item->m_id); + } + + first_job = false; } // TODO config to wait after request (even if it is not specified in the global config - so custom throttle) small::sleep(30); }); @@ -170,35 +206,36 @@ namespace examples::jobs_engine { JobsEng::JobsID jobs_id{}; std::vector jobs_ids; - // TODO create a promises/futures unordered_map for type1 requests and wait later + // type3 one request will succeed and one request will timeout for demo purposes + jobs.queue().push_back_and_start(small::EnumPriorities::kNormal, JobsType::kJobsType3, {3, "normal3"}, &jobs_id); + jobs.queue().push_back_and_start(small::EnumPriorities::kHigh, JobsType::kJobsType3, {3, "high3"}, &jobs_id); + + // type2 only the first request succeeds and waits for child the other fails from the start + jobs.queue().push_back_and_start(small::EnumPriorities::kNormal, JobsType::kJobsType2, {2, "normal2"}, &jobs_id); + jobs.queue().push_back_and_start(small::EnumPriorities::kHigh, JobsType::kJobsType2, {2, "high2"}, &jobs_id); // show coalesce for children database requests std::unordered_map web_requests; - // TODO type3 one request will succeed and one request will timeout for demo purposes - // push - jobs.queue().push_back_and_start(small::EnumPriorities::kNormal, JobsType::kJobsType1, {1, "normal"}, &jobs_id); - jobs.queue().push_back_and_start(small::EnumPriorities::kHigh, JobsType::kJobsType2, {2, "high"}, &jobs_id); - - jobs.queue().push_back_and_start(small::EnumPriorities::kNormal, JobsType::kJobsType1, std::make_pair(3, "normal"), &jobs_id); - jobs.queue().push_back_and_start(small::EnumPriorities::kHigh, JobsType::kJobsType1, {4, "high"}, &jobs_id); - jobs.queue().push_back_and_start(small::EnumPriorities::kLow, JobsType::kJobsType1, {5, "low"}, &jobs_id); - - Request req = {6, "normal"}; - jobs.queue().push_back_and_start(small::EnumPriorities::kNormal, JobsType::kJobsType1, req, nullptr); + // TODO create a promises/futures unordered_map for type1 requests and wait later + // push with multiple variants + jobs.queue().push_back_and_start(small::EnumPriorities::kNormal, JobsType::kJobsType1, {11, "normal11"}, &jobs_id); std::vector> jobs_items = { - std::make_shared(JobsType::kJobsType1, Request{7, "highest"}), - std::make_shared(JobsType::kJobsType1, Request{8, "highest"}), + std::make_shared(JobsType::kJobsType1, Request{12, "highest12"}), }; jobs.queue().push_back_and_start(small::EnumPriorities::kHighest, jobs_items, &jobs_ids); - jobs.queue().push_back_and_start(small::EnumPriorities::kHighest, {std::make_shared(JobsType::kJobsType1, Request{9, "highest"})}, &jobs_ids); - jobs.queue().push_back_and_start_delay_for(std::chrono::milliseconds(300), small::EnumPriorities::kNormal, JobsType::kJobsType1, {100, "delay normal"}, &jobs_id); - jobs.queue().push_back_and_start_delay_until(small::timeNow() + std::chrono::milliseconds(350), small::EnumPriorities::kNormal, JobsType::kJobsType1, {101, "delay normal"}, &jobs_id); - jobs.queue().push_back_and_start_delay_for(std::chrono::milliseconds(400), small::EnumPriorities::kNormal, JobsType::kJobsType1, {102, "delay normal"}, &jobs_id); + jobs.queue().push_back_and_start(small::EnumPriorities::kLow, JobsType::kJobsType1, {13, "low13"}, nullptr); + + Request req = {14, "normal14"}; + jobs.queue().push_back(JobsType::kJobsType1, req, &jobs_id); + jobs.jobs_start(small::EnumPriorities::kNormal, jobs_id); + + jobs.queue().push_back_and_start_delay_for(std::chrono::milliseconds(300), small::EnumPriorities::kNormal, JobsType::kJobsType1, {115, "delay normal115"}, &jobs_id); - jobs.start_threads(3); // manual start threads + // manual start threads + jobs.start_threads(3); small::sleep(50); // jobs.signal_exit_force(); diff --git a/include/jobs_engine.h b/include/jobs_engine.h index 7517c87..f24fae4 100644 --- a/include/jobs_engine.h +++ b/include/jobs_engine.h @@ -108,7 +108,7 @@ namespace small { // clear inline void clear() { - std::unique_lock l(m_queue); + std::unique_lock l(*this); m_queue.clear(); m_thread_pool.clear(); } @@ -251,36 +251,69 @@ namespace small { // // set jobs state // - inline void jobs_progress(const JobsID &jobs_id, const int &progress) + inline bool jobs_progress(const JobsID &jobs_id, const int &progress) { - jobs_set_progress(jobs_id, progress); + return jobs_set_progress(jobs_id, progress); } - inline void jobs_finished(const JobsID &jobs_id) + inline bool jobs_response(const JobsID &jobs_id, const JobsResponseT &jobs_response) { - jobs_set_state(jobs_id, small::jobsimpl::EnumJobsState::kFinished); + return jobs_set_response(jobs_id, jobs_response); } - inline void jobs_finished(const std::vector &jobs_ids) + inline bool jobs_response(const JobsID &jobs_id, JobsResponseT &&jobs_response) { - jobs_set_state(jobs_ids, small::jobsimpl::EnumJobsState::kFinished); + return jobs_set_response(jobs_id, jobs_response); } - inline void jobs_failed(const JobsID &jobs_id) + inline bool jobs_finished(const JobsID &jobs_id) { - jobs_set_state(jobs_id, small::jobsimpl::EnumJobsState::kFailed); + return jobs_set_state(jobs_id, small::jobsimpl::EnumJobsState::kFinished); } - inline void jobs_failed(const std::vector &jobs_ids) + inline bool jobs_finished(const std::vector &jobs_ids) { - jobs_set_state(jobs_ids, small::jobsimpl::EnumJobsState::kFailed); + return jobs_set_state(jobs_ids, small::jobsimpl::EnumJobsState::kFinished); + } + inline bool jobs_finished(const JobsID &jobs_id, const JobsResponseT &jobs_response) + { + return jobs_set_state(jobs_id, small::jobsimpl::EnumJobsState::kFinished, jobs_response); + } + inline bool jobs_finished(const JobsID &jobs_id, JobsResponseT &&jobs_response) + { + return jobs_set_state(jobs_id, small::jobsimpl::EnumJobsState::kFinished, std::forward(jobs_response)); } - inline void jobs_cancelled(const JobsID &jobs_id) + inline bool jobs_failed(const JobsID &jobs_id) + { + return jobs_set_state(jobs_id, small::jobsimpl::EnumJobsState::kFailed); + } + inline bool jobs_failed(const std::vector &jobs_ids) + { + return jobs_set_state(jobs_ids, small::jobsimpl::EnumJobsState::kFailed); + } + inline bool jobs_failed(const JobsID &jobs_id, const JobsResponseT &jobs_response) + { + return jobs_set_state(jobs_id, small::jobsimpl::EnumJobsState::kFailed, jobs_response); + } + inline bool jobs_failed(const JobsID &jobs_id, JobsResponseT &&jobs_response) + { + return jobs_set_state(jobs_id, small::jobsimpl::EnumJobsState::kFailed, std::forward(jobs_response)); + } + + inline bool jobs_cancelled(const JobsID &jobs_id) + { + return jobs_set_state(jobs_id, small::jobsimpl::EnumJobsState::kCancelled); + } + inline bool jobs_cancelled(const std::vector &jobs_ids) + { + return jobs_set_state(jobs_ids, small::jobsimpl::EnumJobsState::kCancelled); + } + inline bool jobs_cancelled(const JobsID &jobs_id, const JobsResponseT &jobs_response) { - jobs_set_state(jobs_id, small::jobsimpl::EnumJobsState::kCancelled); + return jobs_set_state(jobs_id, small::jobsimpl::EnumJobsState::kCancelled, jobs_response); } - inline void jobs_cancelled(const std::vector &jobs_ids) + inline bool jobs_cancelled(const JobsID &jobs_id, JobsResponseT &&jobs_response) { - jobs_set_state(jobs_ids, small::jobsimpl::EnumJobsState::kCancelled); + return jobs_set_state(jobs_id, small::jobsimpl::EnumJobsState::kCancelled, std::forward(jobs_response)); } // clang-format off @@ -469,81 +502,146 @@ namespace small { } // - // jobs states + // jobs area transform from id to jobs_item // - inline void jobs_set_progress(const JobsID &jobs_id, const int &progress) + inline bool jobs_set_progress(const JobsID &jobs_id, const int &progress) { auto *jobs_item = jobs_get(jobs_id); if (!jobs_item) { - return; + return false; } + return jobs_set_progress(*jobs_item, progress); + } - (*jobs_item)->set_progress(progress); + inline bool jobs_set_response(const JobsID &jobs_id, const JobsResponseT &jobs_response) + { + std::unique_lock l(*this); + auto *jobs_item = jobs_get(jobs_id); + if (!jobs_item) { + return false; + } + return jobs_set_response(*jobs_item, jobs_response); + } - if (progress == 100) { - jobs_finished(jobs_id); + inline bool jobs_set_response(const JobsID &jobs_id, JobsResponseT &&jobs_response) + { + std::unique_lock l(*this); + auto *jobs_item = jobs_get(jobs_id); + if (!jobs_item) { + return false; } + return jobs_set_response(*jobs_item, std::forward(jobs_response)); } - inline void jobs_waitforchildren(const std::vector> &jobs_items) + inline bool jobs_set_state(const JobsID &jobs_id, const small::jobsimpl::EnumJobsState &jobs_state) { - // set the jobs as waitforchildren only if there are children otherwise advance to finish - jobs_set_state(jobs_items, small::jobsimpl::EnumJobsState::kTimeout); + auto *jobs_item = jobs_get(jobs_id); + if (!jobs_item) { + return false; + } + return jobs_set_state(*jobs_item, jobs_state); } - inline void jobs_timeout(const std::vector &jobs_ids) + inline bool jobs_set_state(const JobsID &jobs_id, const small::jobsimpl::EnumJobsState &jobs_state, const JobsResponseT &jobs_response) { - // set the jobs as timeout if it is not finished until now (called from queue) - jobs_set_state(jobs_ids, small::jobsimpl::EnumJobsState::kTimeout); + auto *jobs_item = jobs_get(jobs_id); + if (!jobs_item) { + return false; + } + jobs_set_response(*jobs_item, jobs_response); + return jobs_set_state(*jobs_item, jobs_state); } - // - // apply state - // - inline void jobs_set_state(const JobsID &jobs_id, const small::jobsimpl::EnumJobsState &jobs_state) + inline bool jobs_set_state(const JobsID &jobs_id, const small::jobsimpl::EnumJobsState &jobs_state, JobsResponseT &&jobs_response) { auto *jobs_item = jobs_get(jobs_id); if (!jobs_item) { - return; + return false; } + jobs_set_response(*jobs_item, std::forward(jobs_response)); + return jobs_set_state(*jobs_item, jobs_state); + } - auto ret = jobs_set_state(*jobs_item, jobs_state); - if (ret) { - jobs_completed({*jobs_item}); + inline std::size_t jobs_timeout(const std::vector &jobs_ids) + { + // set the jobs as timeout if it is not finished until now (called from queue) + return jobs_set_state(jobs_get(jobs_ids), small::jobsimpl::EnumJobsState::kTimeout); + } + + // + // jobs set progress + // + inline bool jobs_set_progress(const std::shared_ptr &jobs_item, const int &progress) + { + jobs_item->set_progress(progress); + if (progress == 100) { + jobs_finished(jobs_item->m_id); } + return true; } - inline void jobs_set_state(const std::vector &jobs_ids, const small::jobsimpl::EnumJobsState &jobs_state) + // + // jobs set response + // + inline void jobs_set_response(std::shared_ptr &jobs_item, const JobsResponseT &jobs_response) { - auto jobs_items = jobs_get(jobs_ids); - jobs_set_state(jobs_items, jobs_state); + std::unique_lock l(*this); + jobs_item->m_response = jobs_response; + } + + inline void jobs_set_response(std::shared_ptr &jobs_item, JobsResponseT &&jobs_response) + { + std::unique_lock l(*this); + jobs_item->m_response = std::move(jobs_response); + } + + // + // jobs set states + // + inline std::size_t jobs_waitforchildren(const std::vector> &jobs_items) + { + // set the jobs as waitforchildren only if there are children otherwise advance to finish + return jobs_set_state(jobs_items, small::jobsimpl::EnumJobsState::kTimeout); + } + + // + // apply state + // + inline bool jobs_set_state(const std::shared_ptr &jobs_item, const small::jobsimpl::EnumJobsState &jobs_state) + { + auto ret = jobs_apply_state(jobs_item, jobs_state); + if (ret) { + jobs_completed({jobs_item}); + } + return ret; } - inline void jobs_set_state(const std::vector> &jobs_items, const small::jobsimpl::EnumJobsState &jobs_state) + inline std::size_t jobs_set_state(const std::vector> &jobs_items, const small::jobsimpl::EnumJobsState &jobs_state) { std::vector> changed_items; changed_items.reserve(jobs_items.size()); for (auto &jobs_item : jobs_items) { - auto ret = jobs_set_state(jobs_item, jobs_state); + auto ret = jobs_apply_state(jobs_item, jobs_state); if (ret) { changed_items.push_back(jobs_item); } } jobs_completed(changed_items); + return changed_items.size(); } - inline std::size_t jobs_set_state(std::shared_ptr jobs_item, small::jobsimpl::EnumJobsState jobs_state) + inline bool jobs_apply_state(std::shared_ptr jobs_item, small::jobsimpl::EnumJobsState jobs_state) { // state is already the same if (jobs_item->is_state(jobs_state)) { - return 0; + return false; } // set the jobs as timeout if it is not finished until now if (jobs_state == small::jobsimpl::EnumJobsState::kTimeout && jobs_item->is_state_finished()) { - return 0; + return false; } // set the jobs as waitforchildren only if there are children otherwise advance to finish @@ -555,7 +653,7 @@ namespace small { } jobs_item->set_state(jobs_state); - return jobs_item->is_state(jobs_state) ? 1 : 0; + return jobs_item->is_state(jobs_state); } // From 253b16e8049ba659021e331741ede9e651abf4ae Mon Sep 17 00:00:00 2001 From: Cristian Herghelegiu Date: Tue, 14 Jan 2025 21:18:04 +0200 Subject: [PATCH 11/12] Add multiple features for jobs engine like parent child dependencies, throttling with sleep between requests, timeout for processing --- .clang-format | 1 + examples/examples_jobs_engine.h | 100 +++++++++++++++++--------------- include/impl/jobs_item_impl.h | 1 + include/impl/jobs_queue_impl.h | 53 ++++------------- include/jobs_engine.h | 94 +++++++++++++++++++++++------- 5 files changed, 137 insertions(+), 112 deletions(-) diff --git a/.clang-format b/.clang-format index dbe3254..dccc649 100644 --- a/.clang-format +++ b/.clang-format @@ -12,6 +12,7 @@ AlignConsecutiveAssignments: AlignFunctionDeclarations: true AlignConsecutiveBitFields: true AlignConsecutiveMacros: true +# PointerAlignment: Left BraceWrapping: AfterEnum: true AfterStruct: true diff --git a/examples/examples_jobs_engine.h b/examples/examples_jobs_engine.h index 0f648e2..282902d 100644 --- a/examples/examples_jobs_engine.h +++ b/examples/examples_jobs_engine.h @@ -77,7 +77,7 @@ namespace examples::jobs_engine { .m_types = {{JobsType::kJobsType1, {.m_group = JobsGroupType::kJobsGroup12}}, {JobsType::kJobsType2, {.m_group = JobsGroupType::kJobsGroup12}}, - {JobsType::kJobsType3, {.m_group = JobsGroupType::kJobsGroup3, .m_timeout = std::chrono::milliseconds(500)}}, + {JobsType::kJobsType3, {.m_group = JobsGroupType::kJobsGroup3, .m_timeout = std::chrono::milliseconds(700)}}, {JobsType::kJobsDatabase, {.m_group = JobsGroupType::kJobsGroupDatabase}}, {JobsType::kJobsCache, {.m_group = JobsGroupType::kJobsGroupCache}}}}; @@ -88,53 +88,52 @@ namespace examples::jobs_engine { // create a cache server (with workers to simulate access to it) // (as an external engine outside the jobs engine for demo purposes) - small::worker_thread cache_server({.threads_count = 1}, [&](auto &w /*this*/, const auto &items) { - for (auto &i : items) { - std::cout << "thread " << std::this_thread::get_id() - << " CACHE processing" - << " {" << i << "}" << "\n"; + small::worker_thread> cache_server({.threads_count = 1}, [&](auto &w /*this*/, const auto &items) { + for (auto &[job_id, req] : items) { + std::cout << "worker thread " << std::this_thread::get_id() + << " CACHE processing" + << " {" << req.first << ", " << req.second << ", jobid=" << job_id << "}" + << " time " << small::toISOString(small::timeNow()) + << "\n"; // mark the jobs id associated as succeeded (for demo purposes to avoid creating other structures) - jobs.jobs_finished(i, (Response)i); + jobs.jobs_finished(job_id, (Response)job_id); } // sleep long enough // no coalesce for demo purposes (sleep 500) so 3rd parent items is finished due to database and not cache server small::sleep(500); }); + auto fn_print_item = [](auto item, std::string fn_type) { + std::cout << "thread " << std::this_thread::get_id() + << std::setw(10) << fn_type + << " processing " + << "{" + << " jobid=" << std::setw(2) << item->m_id + << " type=" << std::setw(1) << (int)item->m_type + << " req.int=" << std::setw(2) << item->m_request.first << "," + << " req.str=\"" << item->m_request.second << "\"" + << "}" + << " time " << small::toISOString(small::timeNow()) + << "\n"; + }; + // default processing used for job type 3 with custom delay in between requests // one request will succeed and one request will timeout for demo purposes - jobs.config_default_function_processing([](auto &j /*this jobs engine*/, const auto &jobs_items, auto &jobs_config) { + jobs.config_default_function_processing([&](auto &j /*this jobs engine*/, const auto &jobs_items, auto &jobs_config) { for (auto &item : jobs_items) { - std::cout << "thread " << std::this_thread::get_id() - << " DEFAULT processing " - << "{" - << " type=" << (int)item->m_type - << " req.int=" << item->m_request.first << "," - << " req.str=\"" << item->m_request.second << "\"" - << "}" - << " ref count " << item.use_count() - << " time " << small::toISOString(small::timeNow()) - << "\n"; + fn_print_item(item, "DEFAULT"); } // set a custom delay (timeout for job3 is 500 ms) - jobs_config.m_delay_next_request = std::chrono::milliseconds(1000); + jobs_config.m_delay_next_request = std::chrono::milliseconds(500); + small::sleep(500); // TODO remove this after delay works }); // add specific function for job1 (calling the function from jobs intead of config allows to pass the engine and extra param) jobs.config_jobs_function_processing(JobsType::kJobsType1, [&](auto &j /*this jobs engine*/, const auto &jobs_items, auto & /* config */, auto b /*extra param b*/) { for (auto &item : jobs_items) { - std::cout << "thread " << std::this_thread::get_id() - << " JOB1 processing " - << "{" - << " type=" << (int)item->m_type - << " req.int=" << item->m_request.first << "," - << " req.str=\"" << item->m_request.second << "\"" - << "}" - << " ref count " << item.use_count() - << " time " << small::toISOString(small::timeNow()) - << "\n"; + fn_print_item(item, "JOB1"); // add 2 more children jobs for current one for database and server cache JobsEng::JobsID jobs_child_db_id{}; @@ -152,7 +151,7 @@ namespace examples::jobs_engine { j.jobs_start(small::EnumPriorities::kNormal, jobs_child_db_id); // jobs_child_cache_id has no threads to execute, it has external executors - cache_server.push_back(jobs_child_cache_id); + cache_server.push_back({jobs_child_cache_id, item->m_request}); } small::sleep(30); }, 5 /*param b*/); @@ -165,25 +164,16 @@ namespace examples::jobs_engine { // TODO set state merge daca e doar o dependinta, daca sunt mai multe atunci ar tb o functie custom - childProcessing (desi are sau nu are children - sau cum fac un dummy children - poate cu thread_count 0?) // add specific function for job2 - jobs.config_jobs_function_processing(JobsType::kJobsType2, [](auto &j /*this jobs engine*/, const auto &jobs_items, auto & /* config */) { - bool first_job = true; + jobs.config_jobs_function_processing(JobsType::kJobsType2, [&](auto &j /*this jobs engine*/, const auto &jobs_items, auto &jobs_config) { + static bool first_job = true; for (auto &item : jobs_items) { - std::cout << "thread " << std::this_thread::get_id() - << " JOB2 processing " - << "{" - << " type=" << (int)item->m_type - << " req.int=" << item->m_request.first << "," - << " req.str=\"" << item->m_request.second << "\"" - << "}" - << " ref count " << item.use_count() - << " time " << small::toISOString(small::timeNow()) - << "\n"; + fn_print_item(item, "JOB2"); if (first_job) { // for type 2 only database children (for demo purposes no result will be used from database) auto ret = j.queue().push_back_and_start_child(item->m_id /*parent*/, - small::EnumPriorities::kNormal, - JobsType::kJobsDatabase, + small::EnumPriorities::kNormal, + JobsType::kJobsDatabase, item->m_request); if (!ret) { j.jobs_failed(item->m_id); @@ -195,7 +185,21 @@ namespace examples::jobs_engine { first_job = false; } // TODO config to wait after request (even if it is not specified in the global config - so custom throttle) - small::sleep(30); }); + small::sleep(30); + jobs_config.m_delay_next_request = std::chrono::milliseconds(30); + }); + + // add specific function for job2 + jobs.config_jobs_function_processing(JobsType::kJobsDatabase, [&](auto &j /*this jobs engine*/, const auto &jobs_items, auto & /* config */) { + for (auto &item : jobs_items) { + // simulate long db call + small::sleep(200); + + fn_print_item(item, "DATABASE"); + + // this job will be auto-finished + } + }); // TODO add function for database where demonstrate coalesce of 3 items (sleep 1000) // TODO add function for cache server - no coalesce for demo purposes (sleep 500) so 3rd parent items is finished due to database and not cache server @@ -207,8 +211,8 @@ namespace examples::jobs_engine { std::vector jobs_ids; // type3 one request will succeed and one request will timeout for demo purposes - jobs.queue().push_back_and_start(small::EnumPriorities::kNormal, JobsType::kJobsType3, {3, "normal3"}, &jobs_id); - jobs.queue().push_back_and_start(small::EnumPriorities::kHigh, JobsType::kJobsType3, {3, "high3"}, &jobs_id); + jobs.queue().push_back_and_start_delay_for(std::chrono::milliseconds(100), small::EnumPriorities::kNormal, JobsType::kJobsType3, {3, "normal3"}, &jobs_id); + jobs.queue().push_back_and_start_delay_for(std::chrono::milliseconds(100), small::EnumPriorities::kHigh, JobsType::kJobsType3, {3, "high3"}, &jobs_id); // type2 only the first request succeeds and waits for child the other fails from the start jobs.queue().push_back_and_start(small::EnumPriorities::kNormal, JobsType::kJobsType2, {2, "normal2"}, &jobs_id); @@ -242,6 +246,8 @@ namespace examples::jobs_engine { auto ret = jobs.wait_for(std::chrono::milliseconds(100)); // wait to finished std::cout << "wait for with timeout, ret = " << static_cast(ret) << " as timeout\n"; jobs.wait(); // wait here for jobs to finish due to exit flag + cache_server.signal_exit_force(); + cache_server.wait(); std::cout << "size = " << jobs.size() << "\n"; diff --git a/include/impl/jobs_item_impl.h b/include/impl/jobs_item_impl.h index 7d83312..6500fb6 100644 --- a/include/impl/jobs_item_impl.h +++ b/include/impl/jobs_item_impl.h @@ -101,6 +101,7 @@ namespace small::jobsimpl { inline void set_state_cancelled () { set_state(EnumJobsState::kCancelled); } inline bool is_state (const EnumJobsState &state) { return m_state.load() == state; } + static bool is_state_complete (const EnumJobsState &state) { return state >= EnumJobsState::kFinished; } inline bool is_state_inprogress () { return is_state(EnumJobsState::kInProgress); } inline void is_state_waitchildren () { return is_state(EnumJobsState::kWaitChildren); } diff --git a/include/impl/jobs_queue_impl.h b/include/impl/jobs_queue_impl.h index ec5ca74..ca7b70f 100644 --- a/include/impl/jobs_queue_impl.h +++ b/include/impl/jobs_queue_impl.h @@ -9,7 +9,7 @@ namespace small::jobsimpl { // - // small queue helper class for jobs (parent caller must implement 'jobs_schedule', 'jobs_finished') + // small queue helper class for jobs (parent caller must implement 'jobs_add', 'jobs_schedule', 'jobs_finished') // template class jobs_queue @@ -94,7 +94,7 @@ namespace small::jobsimpl { // config job types // m_types_queues will be initialized in the initial setup phase and will be accessed without locking afterwards // - inline bool config_jobs_type(const JobsTypeT &jobs_type, const JobsGroupT &jobs_group, const std::optional &jobs_timeout) + inline bool config_jobs_type(const JobsTypeT &jobs_type, const JobsGroupT &jobs_group) { auto it_g = m_groups_queues.find(jobs_group); if (it_g == m_groups_queues.end()) { @@ -102,9 +102,6 @@ namespace small::jobsimpl { } m_types_queues[jobs_type] = &it_g->second; - if (jobs_timeout) { - m_types_timeouts[jobs_type] = *jobs_timeout; - } return true; } @@ -562,8 +559,9 @@ namespace small::jobsimpl { jobs_item->m_id = id; m_jobs.emplace(id, jobs_item); - // add it to the timeout queue - jobs_start_timeout(jobs_item); + // call parent for extra processing + m_parent_caller.jobs_add(jobs_item); + return id; } @@ -610,21 +608,6 @@ namespace small::jobsimpl { return ret; } - // - // add it to the timeout queue - // - inline std::size_t jobs_start_timeout(std::shared_ptr jobs_item) - { - std::unique_lock l(m_lock); - - // only if job type has config a timeout - auto it_timeout = m_types_timeouts.find(jobs_item->m_type); - if (it_timeout == m_types_timeouts.end()) { - return 0; - } - return m_timeout_queue.queue().push_delay_for(it_timeout->second, jobs_item->m_id); - } - // // erase jobs item // @@ -685,34 +668,18 @@ namespace small::jobsimpl { return count; } - // - // inner thread function for timeout items - // called from m_timeout_queue - // - using JobsQueueTimeout = small::time_queue_thread; - friend JobsQueueTimeout; - - inline std::size_t push_back(std::vector &&jobs_ids) - { - m_parent_caller.jobs_timeout(jobs_ids); - return jobs_ids.size(); - } - private: // // members // - mutable small::base_lock m_lock; // global locker - std::atomic m_jobs_seq_id{}; // to get the next jobs id - std::unordered_map> m_jobs; // current jobs - std::unordered_map m_groups_queues; // map of queues by group - std::unordered_map m_types_queues; // optimize to have queues by type (which reference queues by group) - std::unordered_map m_types_timeouts; // timeouts for types + mutable small::base_lock m_lock; // global locker + std::atomic m_jobs_seq_id{}; // to get the next jobs id + std::unordered_map> m_jobs; // current jobs + std::unordered_map m_groups_queues; // map of queues by group + std::unordered_map m_types_queues; // optimize to have queues by type (which reference queues by group) JobQueueDelayedT m_delayed_items{*this}; // queue of delayed items - JobsQueueTimeout m_timeout_queue{*this}; // for timeout elements - ParentCallerT &m_parent_caller; // jobs engine }; } // namespace small::jobsimpl diff --git a/include/jobs_engine.h b/include/jobs_engine.h index f24fae4..42a4e0d 100644 --- a/include/jobs_engine.h +++ b/include/jobs_engine.h @@ -143,6 +143,7 @@ namespace small { { m_config.m_engine.m_threads_count = threads_count; m_queue.start_threads(threads_count); + m_timeout_queue.start_threads(); m_thread_pool.start_threads(threads_count); } @@ -320,7 +321,7 @@ namespace small { // // signal exit // - inline void signal_exit_force () { m_thread_pool.signal_exit_force(); m_queue.signal_exit_force(); } + inline void signal_exit_force () { m_thread_pool.signal_exit_force(); m_timeout_queue.queue().signal_exit_force(); m_queue.signal_exit_force(); } inline void signal_exit_when_done () { m_queue.signal_exit_when_done(); /*when the delayed will be finished will signal the active queue items to exit when done, then the processing pool */ } // to be used in processing function @@ -338,7 +339,13 @@ namespace small { m_queue.wait(); // only now can signal exit when done for workers (when no more items exists) - return m_thread_pool.wait(); + m_thread_pool.wait(); + + // the timeouts are no longer necessay, force exit + m_timeout_queue.queue().signal_exit_force(); + m_timeout_queue.wait(); + + return small::EnumLock::kExit; } // wait some time then signal exit @@ -366,7 +373,16 @@ namespace small { } // only now can signal exit when done for workers (when no more items exists) - return m_thread_pool.wait_until(__atime); + delayed_status = m_thread_pool.wait_until(__atime); + if (delayed_status == small::EnumLock::kTimeout) { + return small::EnumLock::kTimeout; + } + + // the timeouts are no longer necessay, force exit + m_timeout_queue.queue().signal_exit_force(); + m_timeout_queue.wait(); + + return small::EnumLock::kExit; } private: @@ -398,7 +414,7 @@ namespace small { m_config.apply_default_function_finished(); for (auto &[jobs_type, jobs_type_config] : m_config.m_types) { - m_queue.config_jobs_type(jobs_type, jobs_type_config.m_group, jobs_type_config.m_timeout); + m_queue.config_jobs_type(jobs_type, jobs_type_config.m_group); } // auto start threads if count > 0 otherwise threads should be manually started @@ -492,15 +508,40 @@ namespace small { } // - // inner function for activate the jobs from queue (called from queue) + // inner function for extra processing after addding the jobs into queue (called from queue) // friend JobsQueue; + inline void jobs_add(std::shared_ptr jobs_item) + { + // add it to the timeout queue + // only if job type has config a timeout + auto timeout = m_config.m_types[jobs_item->m_type].m_timeout; + if (timeout) { + m_timeout_queue.queue().push_delay_for(*timeout, jobs_item->m_id); + } + } + + // + // inner function for activate the jobs from queue (called from queue) + // inline void jobs_schedule(const JobsTypeT &jobs_type, const JobsID & /* jobs_id */) { m_thread_pool.jobs_schedule(m_config.m_types[jobs_type].m_group); } + // + // inner thread function for timeout items (called from m_timeout_queue) + // + using JobsQueueTimeout = small::time_queue_thread; + friend JobsQueueTimeout; + + inline std::size_t push_back(std::vector &&jobs_ids) + { + jobs_timeout(jobs_ids); + return jobs_ids.size(); + } + // // jobs area transform from id to jobs_item // @@ -601,7 +642,7 @@ namespace small { inline std::size_t jobs_waitforchildren(const std::vector> &jobs_items) { // set the jobs as waitforchildren only if there are children otherwise advance to finish - return jobs_set_state(jobs_items, small::jobsimpl::EnumJobsState::kTimeout); + return jobs_set_state(jobs_items, small::jobsimpl::EnumJobsState::kWaitChildren); } // @@ -609,8 +650,10 @@ namespace small { // inline bool jobs_set_state(const std::shared_ptr &jobs_item, const small::jobsimpl::EnumJobsState &jobs_state) { - auto ret = jobs_apply_state(jobs_item, jobs_state); - if (ret) { + small::jobsimpl::EnumJobsState set_state = jobs_state; + + auto ret = jobs_apply_state(jobs_item, jobs_state, &set_state); + if (ret && JobsItem::is_state_complete(set_state)) { jobs_completed({jobs_item}); } return ret; @@ -618,42 +661,48 @@ namespace small { inline std::size_t jobs_set_state(const std::vector> &jobs_items, const small::jobsimpl::EnumJobsState &jobs_state) { - std::vector> changed_items; - changed_items.reserve(jobs_items.size()); + small::jobsimpl::EnumJobsState set_state = jobs_state; + std::vector> completed_items; + completed_items.reserve(jobs_items.size()); + std::size_t changed_count{}; for (auto &jobs_item : jobs_items) { - auto ret = jobs_apply_state(jobs_item, jobs_state); + auto ret = jobs_apply_state(jobs_item, jobs_state, &set_state); if (ret) { - changed_items.push_back(jobs_item); + ++changed_count; + if (JobsItem::is_state_complete(set_state)) { + completed_items.push_back(jobs_item); + } } } - jobs_completed(changed_items); - return changed_items.size(); + jobs_completed(completed_items); + return changed_count; } - inline bool jobs_apply_state(std::shared_ptr jobs_item, small::jobsimpl::EnumJobsState jobs_state) + inline bool jobs_apply_state(std::shared_ptr jobs_item, const small::jobsimpl::EnumJobsState &jobs_state, small::jobsimpl::EnumJobsState *jobs_set_state) { + *jobs_set_state = jobs_state; // state is already the same - if (jobs_item->is_state(jobs_state)) { + if (jobs_item->is_state(*jobs_set_state)) { return false; } // set the jobs as timeout if it is not finished until now - if (jobs_state == small::jobsimpl::EnumJobsState::kTimeout && jobs_item->is_state_finished()) { + if (*jobs_set_state == small::jobsimpl::EnumJobsState::kTimeout && jobs_item->is_state_finished()) { return false; } // set the jobs as waitforchildren only if there are children otherwise advance to finish - if (jobs_state == small::jobsimpl::EnumJobsState::kWaitChildren) { + if (*jobs_set_state == small::jobsimpl::EnumJobsState::kWaitChildren) { std::unique_lock l(*this); if (jobs_item->m_childrenIDs.size() == 0) { - jobs_state = small::jobsimpl::EnumJobsState::kFinished; + *jobs_set_state = small::jobsimpl::EnumJobsState::kFinished; } } - jobs_item->set_state(jobs_state); - return jobs_item->is_state(jobs_state); + jobs_item->set_state(*jobs_set_state); + return jobs_item->is_state(*jobs_set_state); } // @@ -688,6 +737,7 @@ namespace small { // JobsConfig m_config; JobsQueue m_queue{*this}; - small::jobsimpl::jobs_engine_thread_pool m_thread_pool{*this}; // for processing items (by group) using a pool of threads + JobsQueueTimeout m_timeout_queue{*this}; // for timeout elements + small::jobsimpl::jobs_engine_thread_pool m_thread_pool{*this}; // for processing items (by group) using a pool of threads }; } // namespace small From 65a2ceef12a1246359fdb91adc9c6c86e6bc8586 Mon Sep 17 00:00:00 2001 From: Cristian Herghelegiu Date: Tue, 14 Jan 2025 23:15:50 +0200 Subject: [PATCH 12/12] Add multiple features for jobs engine like parent child dependencies, throttling with sleep between requests, timeout for processing --- include/impl/jobs_item_impl.h | 84 +++++++++++++++++++-------- include/impl/jobs_queue_impl.h | 23 ++++++-- include/jobs_config.h | 46 +++++++-------- include/jobs_engine.h | 101 +++++++++++++++++++++++++++------ 4 files changed, 186 insertions(+), 68 deletions(-) diff --git a/include/impl/jobs_item_impl.h b/include/impl/jobs_item_impl.h index 6500fb6..007f37c 100644 --- a/include/impl/jobs_item_impl.h +++ b/include/impl/jobs_item_impl.h @@ -32,6 +32,8 @@ namespace small::jobsimpl { JobsTypeT m_type{}; // job type std::atomic m_state{EnumJobsState::kNone}; // job state std::atomic m_progress{}; // progress 0-100 for state kInProgress + std::atomic_bool m_has_parents{}; // for dependencies relationships parent-child + std::atomic_bool m_has_children{}; // for dependencies relationships parent-child std::vector m_parentIDs{}; // for dependencies relationships parent-child std::vector m_childrenIDs{}; // for dependencies relationships parent-child JobsRequestT m_request{}; // request needed for processing function @@ -53,26 +55,30 @@ namespace small::jobsimpl { jobs_item(jobs_item &&other) noexcept { operator=(other); }; jobs_item &operator=(const jobs_item &other) { - m_id = other.m_id; - m_type = other.m_type; - m_state = other.m_state.load(); - m_progress = other.m_progress.load(); - m_parentIDs = other.m_parentIDs; - m_childrenIDs = other.m_childrenIDs; - m_request = other.m_request; - m_response = other.m_response; + m_id = other.m_id; + m_type = other.m_type; + m_state = other.m_state.load(); + m_progress = other.m_progress.load(); + m_has_parents = other.m_has_parents.load(); + m_has_children = other.m_has_children.load(); + m_parentIDs = other.m_parentIDs; + m_childrenIDs = other.m_childrenIDs; + m_request = other.m_request; + m_response = other.m_response; return *this; } jobs_item &operator=(jobs_item &&other) noexcept { - m_id = std::move(other.m_id); - m_type = std::move(other.m_type); - m_state = other.m_state.load(); - m_progress = other.m_progress.load(); - m_parentIDs = std::move(other.m_parentIDs); - m_childrenIDs = std::move(other.m_childrenIDs); - m_request = std::move(other.m_request); - m_response = std::move(other.m_response); + m_id = std::move(other.m_id); + m_type = std::move(other.m_type); + m_state = other.m_state.load(); + m_progress = other.m_progress.load(); + m_has_parents = other.m_has_parents.load(); + m_has_children = other.m_has_children.load(); + m_parentIDs = std::move(other.m_parentIDs); + m_childrenIDs = std::move(other.m_childrenIDs); + m_request = std::move(other.m_request); + m_response = std::move(other.m_response); return *this; } @@ -100,21 +106,23 @@ namespace small::jobsimpl { inline void set_state_failed () { set_state(EnumJobsState::kFailed); } inline void set_state_cancelled () { set_state(EnumJobsState::kCancelled); } - inline bool is_state (const EnumJobsState &state) { return m_state.load() == state; } static bool is_state_complete (const EnumJobsState &state) { return state >= EnumJobsState::kFinished; } - inline bool is_state_inprogress () { return is_state(EnumJobsState::kInProgress); } - inline void is_state_waitchildren () { return is_state(EnumJobsState::kWaitChildren); } - inline bool is_state_finished () { return is_state(EnumJobsState::kFinished); } - inline bool is_state_timeout () { return is_state(EnumJobsState::kTimeout); } - inline void is_state_failed () { return is_state(EnumJobsState::kFailed); } - inline void is_state_cancelled () { return is_state(EnumJobsState::kCancelled); } + inline EnumJobsState get_state () const { return m_state.load(); } + inline bool is_state (const EnumJobsState &state) const { return get_state() == state; } + inline bool is_complete () const { return is_state_complete(get_state()); } + + inline bool is_state_inprogress () const { return is_state(EnumJobsState::kInProgress); } + inline void is_state_waitchildren () const { return is_state(EnumJobsState::kWaitChildren); } + inline bool is_state_finished () const { return is_state(EnumJobsState::kFinished); } + inline bool is_state_timeout () const { return is_state(EnumJobsState::kTimeout); } + inline void is_state_failed () const { return is_state(EnumJobsState::kFailed); } + inline void is_state_cancelled () const { return is_state(EnumJobsState::kCancelled); } // clang-format on // // set job progress (can only increase) // - inline void set_progress(const int &new_progress) { for (;;) { @@ -127,6 +135,34 @@ namespace small::jobsimpl { } } } + + // + // add child + // + inline void add_child(const JobsID &child_jobs_id) + { + m_childrenIDs.push_back(child_jobs_id); // this should be set under locked area + m_has_children = true; + } + + inline bool has_children() const + { + return m_has_children.load(); + } + + // + // add parent + // + inline void add_parent(const JobsID &parent_jobs_id) + { + m_parentIDs.push_back(parent_jobs_id); // this should be set under locked area + m_has_parents = true; + } + + inline bool has_parents() const + { + return m_has_parents.load(); + } }; } // namespace small::jobsimpl diff --git a/include/impl/jobs_queue_impl.h b/include/impl/jobs_queue_impl.h index ca7b70f..f6069f6 100644 --- a/include/impl/jobs_queue_impl.h +++ b/include/impl/jobs_queue_impl.h @@ -602,7 +602,7 @@ namespace small::jobsimpl { if (ret) { m_parent_caller.jobs_schedule(jobs_type, jobs_id); } else { - // TODO maybe call m_parent.jobs_failed(jobs_id)? + // TODO call m_parent.jobs_failed(jobs_id)? // jobs_start should not be under lock then jobs_erase(jobs_id); } return ret; @@ -614,7 +614,23 @@ namespace small::jobsimpl { inline void jobs_erase(const JobsID &jobs_id) { std::unique_lock l(m_lock); + + auto jobs_item = jobs_get(jobs_id); + if (!jobs_item) { + // already deleted + return; + } + // if not a final state, set it to cancelled (in case it is executing at this point) + if (!JobsItem::is_state_complete((*jobs_item)->get_state())) { + (*jobs_item)->set_state_cancelled(); + } + m_jobs.erase(jobs_id); + + // delete all children + for (auto &child_jobs_id : (*jobs_item)->m_childrenIDs) { + jobs_erase(child_jobs_id); + } } // @@ -640,9 +656,8 @@ namespace small::jobsimpl { inline void jobs_parent_child(std::shared_ptr parent_jobs_item, std::shared_ptr child_jobs_item) { std::unique_lock l(m_lock); - - parent_jobs_item->m_childrenIDs.push_back(child_jobs_item->m_id); - child_jobs_item->m_parentIDs.push_back(parent_jobs_item->m_id); + parent_jobs_item->add_child(child_jobs_item->m_id); + child_jobs_item->add_parent(parent_jobs_item->m_id); } private: diff --git a/include/jobs_config.h b/include/jobs_config.h index 45cfb64..1b3dffb 100644 --- a/include/jobs_config.h +++ b/include/jobs_config.h @@ -48,22 +48,22 @@ namespace small { // config for an individual job type struct ConfigJobsType { - JobsGroupT m_group{}; // job type group (multiple job types can be configured to same group) - std::optional m_timeout{}; // if need to delay the next request processing to have some throtelling - bool m_has_function_processing{false}; // use default processing function - bool m_has_function_on_children_finished{false}; // use default function for children finished - bool m_has_function_finished{false}; // use default finished function - FunctionProcessing m_function_processing{}; // processing Function for jobs items - FunctionOnChildrenFinished m_function_on_children_finished{}; // function called for a parent when a child is finished - FunctionFinished m_function_finished{}; // function called when jobs items are finished + JobsGroupT m_group{}; // job type group (multiple job types can be configured to same group) + std::optional m_timeout{}; // if need to delay the next request processing to have some throtelling + bool m_has_function_processing{false}; // use default processing function + bool m_has_function_children_finished{false}; // use default function for children finished + bool m_has_function_finished{false}; // use default finished function + FunctionProcessing m_function_processing{}; // processing Function for jobs items + FunctionOnChildrenFinished m_function_children_finished{}; // function called for a parent when a child is finished + FunctionFinished m_function_finished{}; // function called when jobs items are finished }; - ConfigJobsEngine m_engine{}; // config for entire engine (threads, priorities, etc) - FunctionProcessing m_default_function_processing{}; // default processing function - FunctionOnChildrenFinished m_default_function_on_children_finished{}; // default function to call for a parent when children are finished - FunctionFinished m_default_function_finished{}; // default function to call when jobs items are finished - std::unordered_map m_groups; // config by jobs group - std::unordered_map m_types; // config by jobs type + ConfigJobsEngine m_engine{}; // config for entire engine (threads, priorities, etc) + FunctionProcessing m_default_function_processing{}; // default processing function + FunctionOnChildrenFinished m_default_function_children_finished{}; // default function to call for a parent when children are finished + FunctionFinished m_default_function_finished{}; // default function to call when jobs items are finished + std::unordered_map m_groups; // config by jobs group + std::unordered_map m_types; // config by jobs type // // add default processing function @@ -74,10 +74,10 @@ namespace small { apply_default_function_processing(); } - inline void config_default_function_on_children_finished(FunctionOnChildrenFinished function_on_children_finished) + inline void config_default_function_children_finished(FunctionOnChildrenFinished function_children_finished) { - m_default_function_on_children_finished = function_on_children_finished; - apply_default_function_on_children_finished(); + m_default_function_children_finished = function_children_finished; + apply_default_function_children_finished(); } inline void config_default_function_finished(FunctionFinished function_finished) @@ -99,14 +99,14 @@ namespace small { it_f->second.m_function_processing = function_processing; } - inline void config_jobs_function_on_children_finished(const JobsTypeT &jobs_type, FunctionOnChildrenFinished function_on_children_finished) + inline void config_jobs_function_children_finished(const JobsTypeT &jobs_type, FunctionOnChildrenFinished function_children_finished) { auto it_f = m_types.find(jobs_type); if (it_f == m_types.end()) { return; } - it_f->second.m_has_function_on_children_finished = true; - it_f->second.m_function_on_children_finished = function_on_children_finished; + it_f->second.m_has_function_children_finished = true; + it_f->second.m_function_children_finished = function_children_finished; } inline void config_jobs_function_finished(const JobsTypeT &jobs_type, FunctionFinished function_finished) @@ -131,11 +131,11 @@ namespace small { } } - inline void apply_default_function_on_children_finished() + inline void apply_default_function_children_finished() { for (auto &[type, jobs_type_config] : m_types) { - if (jobs_type_config.m_has_function_on_children_finished == false) { - jobs_type_config.m_function_on_children_finished = m_default_function_on_children_finished; + if (jobs_type_config.m_has_function_children_finished == false) { + jobs_type_config.m_function_children_finished = m_default_function_children_finished; } } } diff --git a/include/jobs_engine.h b/include/jobs_engine.h index 42a4e0d..fc711c5 100644 --- a/include/jobs_engine.h +++ b/include/jobs_engine.h @@ -171,9 +171,9 @@ namespace small { } template - inline void config_default_function_on_children_finished(_Callable function_on_children_finished, Args... extra_parameters) + inline void config_default_function_children_finished(_Callable function_children_finished, Args... extra_parameters) { - m_config.config_default_function_on_children_finished(std::bind(std::forward<_Callable>(function_on_children_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::placeholders::_2 /*config*/, std::forward(extra_parameters)...)); + m_config.config_default_function_children_finished(std::bind(std::forward<_Callable>(function_children_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::placeholders::_2 /*config*/, std::forward(extra_parameters)...)); } template @@ -190,9 +190,9 @@ namespace small { } template - inline void config_jobs_function_on_children_finished(const JobsTypeT &jobs_type, _Callable function_on_children_finished, Args... extra_parameters) + inline void config_jobs_function_children_finished(const JobsTypeT &jobs_type, _Callable function_children_finished, Args... extra_parameters) { - m_config.config_jobs_function_on_children_finished(jobs_type, std::bind(std::forward<_Callable>(function_on_children_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::forward(extra_parameters)...)); + m_config.config_jobs_function_children_finished(jobs_type, std::bind(std::forward<_Callable>(function_children_finished), std::ref(*this), std::placeholders::_1 /*jobs_items*/, std::forward(extra_parameters)...)); } template @@ -338,6 +338,8 @@ namespace small { // first wait for queue items to finish m_queue.wait(); + // TODO wait for not finished items to be finished (some are finished by external) + // only now can signal exit when done for workers (when no more items exists) m_thread_pool.wait(); @@ -372,6 +374,8 @@ namespace small { return small::EnumLock::kTimeout; } + // TODO wait for not finished items to be finished (some are finished by external) + // only now can signal exit when done for workers (when no more items exists) delayed_status = m_thread_pool.wait_until(__atime); if (delayed_status == small::EnumLock::kTimeout) { @@ -405,12 +409,15 @@ namespace small { } // setup jobs types - if (!m_config.m_default_function_on_children_finished) { - m_config.m_default_function_on_children_finished = std::bind(&jobs_engine::jobs_on_children_finished, this, std::placeholders::_1 /*jobs_items*/); + if (!m_config.m_default_function_finished) { + m_config.m_default_function_finished = std::bind(&jobs_engine::jobs_on_finished, this, std::placeholders::_1 /*jobs_items*/); + } + if (!m_config.m_default_function_children_finished) { + m_config.m_default_function_children_finished = std::bind(&jobs_engine::jobs_on_children_finished, this, std::placeholders::_1 /*jobs_items*/); } m_config.apply_default_function_processing(); - m_config.apply_default_function_on_children_finished(); + m_config.apply_default_function_children_finished(); m_config.apply_default_function_finished(); for (auto &[jobs_type, jobs_type_config] : m_config.m_types) { @@ -616,7 +623,7 @@ namespace small { { jobs_item->set_progress(progress); if (progress == 100) { - jobs_finished(jobs_item->m_id); + jobs_set_state(jobs_item, small::jobsimpl::EnumJobsState::kFinished); } return true; } @@ -695,8 +702,7 @@ namespace small { // set the jobs as waitforchildren only if there are children otherwise advance to finish if (*jobs_set_state == small::jobsimpl::EnumJobsState::kWaitChildren) { - std::unique_lock l(*this); - if (jobs_item->m_childrenIDs.size() == 0) { + if (!jobs_item->has_children()) { *jobs_set_state = small::jobsimpl::EnumJobsState::kFinished; } } @@ -710,15 +716,29 @@ namespace small { // inline void jobs_completed(const std::vector> &jobs_items) { - // TODO call the custom function from config if exists // (this may be called from multiple places - queue timeout, do_action finished, above set state cancel, finish, ) - // TODO delete only if there are no parents (delete all the finished children now) + // call the custom function from config (if exists, otherwise the default will be called) for (auto &jobs_item : jobs_items) { - m_queue.jobs_erase(jobs_item->m_id); + m_config.m_types[jobs_item->m_type].m_function_finished({jobs_item}); + + // if it has parents call jobs_on_children_finished (or custom function) + if (jobs_item->has_parents()) { + jobs_set_progress(jobs_item, 100); // TODO update parents too + m_config.m_types[jobs_item->m_type].m_function_children_finished({jobs_item}); + } else { + // delete only if there are no parents (+delete all children) + m_queue.jobs_erase(jobs_item->m_id); + } } + } - // TODO if it has parents call jobs_on_children_finished + // + // when is finished + // + inline void jobs_on_finished(const std::vector> &/* jobs_items */) + { + // by default nothing to here, but it can be setup for each jobs type } // @@ -726,9 +746,56 @@ namespace small { // inline void jobs_on_children_finished(const std::vector> &jobs_children) { - // TODO update parent state and progress - // for (auto &jobs_child : jobs_children) { - // } + for (auto &jobs_item : jobs_children) { + // + // compute parent state and progress based on children + // if a children has failed/timeout/cancelled then parent is set to failed + // if all children are finished then the parent is finished + // + std::unordered_map, std::vector>>> unfinished_parents; + { + std::unique_lock l(*this); + + auto parent_jobs_items = jobs_get(jobs_item->m_parentIDs); + for (auto &parent_jobs_item : parent_jobs_items) { + if (parent_jobs_item->is_complete()) { + continue; + } + // add to the unfinished parents map (with all children) + unfinished_parents[parent_jobs_item->m_id] = {parent_jobs_item, jobs_get(parent_jobs_item->m_childrenIDs)}; + } + } // lock finished + + for (auto &[parent_jobs_id, parent_info] : unfinished_parents) { + // compute progress from all finished children + std::size_t count_failed_children = 0; + std::size_t count_completed_children = 0; + std::size_t count_total_children = parent_info.second.size(); + for (auto &child_jobs_item : parent_info.second) { + if (!child_jobs_item->is_complete()) { + continue; + } + + ++count_completed_children; + if (!child_jobs_item->is_state_finished()) { + ++count_failed_children; + } + } + + if (count_failed_children) { + jobs_failed(parent_jobs_id); + } else { + + std::size_t progress = count_total_children ? (count_completed_children * 100 / count_total_children) : 100; + jobs_progress(parent_jobs_id, progress); // TODO this should be recursive child->parent->parent (taking into account state) + + // set finished state + if (count_total_children == count_completed_children) { + jobs_finished(parent_jobs_id); + } + } + } + } } private: