Skip to content

Commit

Permalink
work in progress
Browse files Browse the repository at this point in the history
  • Loading branch information
Simon Klix committed Jan 10, 2025
1 parent a30b810 commit af446e6
Show file tree
Hide file tree
Showing 10 changed files with 361 additions and 17 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ namespace hal
virtual Result<std::vector<FEATURE_TYPE>> calculate_feature(Context& ctx, const Gate* g) const = 0;
virtual std::string to_string() const = 0;

Result<std::vector<std::vector<FEATURE_TYPE>>> calculate_feature(Context& ctx, const std::vector<Gate*>& gates) const override;
virtual Result<std::vector<std::vector<FEATURE_TYPE>>> calculate_feature(Context& ctx, const std::vector<Gate*>& gates) const override;
};

class GateFeatureBulk : public GateFeature
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ namespace hal
{
public:
virtual Result<std::vector<FEATURE_TYPE>> calculate_feature(Context& ctx, const Gate* g_a, const Gate* g_b) const = 0;
virtual std::string to_string() const = 0;
virtual Result<std::vector<std::vector<FEATURE_TYPE>>> calculate_features(Context& ctx, const std::vector<std::pair<Gate*, Gate*>>& gate_pairs) const;
virtual std::string to_string() const = 0;
};

class LogicalDistance : public GatePairFeature
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
#!/usr/bin/env python3
import sys, os
import pathlib

user_name = os.getlogin()

# some necessary configuration:
if user_name == "simon":
base_path = "/home/simon/projects/hal/"
if user_name == "simon.klix":
base_path = "/mnt/scratch/simon.klix/tools/hal/"
else:
print("add base paths for user {} before executing...".format(user_name))
exit()

sys.path.append(base_path + "build/lib/") #this is where your hal python lib is located
os.environ["HAL_BASE_PATH"] = base_path + "build" # hal base path

import hal_py
import random
import time

#initialize HAL
hal_py.plugin_manager.load_all_plugins()

from hal_plugins import machine_learning

netlist_path = "/home/nfs0/simon.klix/projects/benchmarks/netlists_preprocessed/yosys/NangateOpenCellLibrary/synthetic/arithmetic/synth_0/netlist_1559bb70c68caf4c_7403e6d54ad10560/netlist_1559bb70c68caf4c_7403e6d54ad10560.hal"
gate_lib_path = base_path + "/plugins/gate_libraries/definitions/NangateOpenCellLibrary.hgl"

netlist = hal_py.NetlistFactory.load_netlist(netlist_path, gate_lib_path)


seq_gates = netlist.get_gates(lambda g : g.type.has_property(hal_py.ff))
nl_seq_abstr = hal_py.NetlistAbstraction.create(netlist, seq_gates, False)

res = nl_seq_abstr.get_successors(seq_gates[0])
if not res:
quit()

nl_seq_abstr_dec = hal_py.NetlistAbstractionDecorator(nl_seq_abstr)

to_test = list()
for i in range(10):
ri = random.randint(0, len(seq_gates) - 1)
g = seq_gates[ri]
to_test.append(g)


ep_filter = lambda _ep, d : d <= 2

# TEST cached
start_cached = time.time()
cached_results = nl_seq_abstr_dec.get_next_matching_gates(to_test, lambda g : True, hal_py.PinDirection.output, True, True, ep_filter, ep_filter)
end_cached = time.time()

# TEST single
original_results = list()
start_original = time.time()
for g in to_test:
res = nl_seq_abstr_dec.get_next_matching_gates(g, lambda g : True, hal_py.PinDirection.output, True, True, ep_filter, ep_filter)
original_results.append(res)
end_original = time.time()

print(cached_results == original_results)
print(end_cached - start_cached)
print(end_original - start_original)

hal_py.plugin_manager.unload_all_plugins()
12 changes: 8 additions & 4 deletions plugins/machine_learning/src/features/gate_feature_single.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,10 @@ namespace hal
{
// Preallocate the feature vectors
std::vector<std::vector<FEATURE_TYPE>> feature_vecs(gates.size());
std::vector<Result<std::monostate>> thread_results(ctx.num_threads, ERR("uninitialized"));

const u32 used_threads = std::min(u32(gates.size()), ctx.num_threads);

std::vector<Result<std::monostate>> thread_results(used_threads, ERR("uninitialized"));

// Worker function for each thread
auto thread_func = [&](u32 start, u32 end, u32 thread_index) {
Expand All @@ -292,8 +295,8 @@ namespace hal

// Launch threads to process gates in parallel
std::vector<std::thread> threads;
u32 chunk_size = (gates.size() + ctx.num_threads - 1) / ctx.num_threads;
for (u32 t = 0; t < ctx.num_threads; ++t)
u32 chunk_size = (gates.size() + used_threads - 1) / used_threads;
for (u32 t = 0; t < used_threads; ++t)
{
u32 start = t * chunk_size;
u32 end = std::min(start + chunk_size, u32(gates.size()));
Expand All @@ -309,8 +312,9 @@ namespace hal
}

// Check whether a thread encountered an error
for (const auto& res : thread_results)
for (u32 idx = 0; idx < threads.size(); idx++)
{
const auto& res = thread_results.at(idx);
if (res.is_error())
{
return ERR_APPEND(res.get_error(), "Encountered error when building feature vectors");
Expand Down
29 changes: 25 additions & 4 deletions plugins/machine_learning/src/features/gate_pair_feature.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,23 @@ namespace hal
{
namespace gate_pair_feature
{
Result<std::vector<std::vector<FEATURE_TYPE>>> GatePairFeature::calculate_features(Context& ctx, const std::vector<std::pair<Gate*, Gate*>>& gate_pairs) const
{
std::vector<std::vector<FEATURE_TYPE>> results;
for (const auto& [g_a, g_b] : gate_pairs)
{
auto res = calculate_feature(ctx, g_a, g_b);
if (res.is_error())
{
return ERR_APPEND(res.get_error(), "cannot calculate features for all pairs");
}

results.push_back(res.get());
}

return OK(results);
}

Result<std::vector<FEATURE_TYPE>> LogicalDistance::calculate_feature(Context& ctx, const Gate* g_a, const Gate* g_b) const
{
if (g_a == g_b)
Expand Down Expand Up @@ -550,7 +567,10 @@ namespace hal
{
// Preallocate the feature vectors
std::vector<std::vector<FEATURE_TYPE>> feature_vecs(gate_pairs.size());
std::vector<Result<std::monostate>> thread_results(ctx.num_threads, ERR("uninitialized"));

const u32 used_threads = std::min(u32(gate_pairs.size()), ctx.num_threads);

std::vector<Result<std::monostate>> thread_results(used_threads, ERR("uninitialized"));

#ifdef PROGRESS_BAR
const auto msg = "Calculated gate pair features for " + std::to_string(gate_pairs.size()) + "/" + std::to_string(gate_pairs.size()) + " pairs";
Expand Down Expand Up @@ -591,8 +611,8 @@ namespace hal

// Launch threads to process gate_pairs in parallel
std::vector<std::thread> threads;
u32 chunk_size = (gate_pairs.size() + ctx.num_threads - 1) / ctx.num_threads;
for (u32 t = 0; t < ctx.num_threads; ++t)
u32 chunk_size = (gate_pairs.size() + used_threads - 1) / used_threads;
for (u32 t = 0; t < used_threads; ++t)
{
u32 start = t * chunk_size;
u32 end = std::min(start + chunk_size, static_cast<u32>(gate_pairs.size()));
Expand All @@ -608,8 +628,9 @@ namespace hal
}

// Check whether a thread encountered an error
for (const auto& res : thread_results)
for (u32 idx = 0; idx < threads.size(); idx++)
{
const auto& res = thread_results.at(idx);
if (res.is_error())
{
return ERR_APPEND(res.get_error(), "Encountered error when building feature vectors");
Expand Down
2 changes: 1 addition & 1 deletion plugins/machine_learning/src/types.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace hal
{
if (!g->has_data("preprocessing_information", "multi_bit_indexed_identifiers"))
{
log_error("machine_learning", "unable to find indexed identifiers for gate with ID {}", g->get_id());
log_error("machine_learning", "unable to find indexed identifiers for gate with {} ID {}", g->get_name(), g->get_id());
continue;
}

Expand Down
14 changes: 9 additions & 5 deletions src/netlist/decorators/netlist_abstraction_decorator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,9 @@ namespace hal
// gather all successors
for (Endpoint* ep_out : gate->get_fan_out_endpoints())
{
// TODO remove debug print
// std::cout << ep_out->get_pin()->get_name() << std::endl;

new_abstraction->m_successors.insert({ep_out, {}});
const auto successors = nl_trav_dec.get_next_matching_endpoints(
ep_out,
Expand Down Expand Up @@ -120,6 +123,9 @@ namespace hal
}
}

// TODO remove debug print
// std::cout << new_abstraction->m_successors.size() << std::endl;

return OK(std::move(new_abstraction));
}

Expand Down Expand Up @@ -207,7 +213,7 @@ namespace hal
std::vector<Endpoint*> successors;
for (auto* ep : gate->get_fan_out_endpoints())
{
const auto new_successors = get_predecessors(ep);
const auto new_successors = get_successors(ep);
if (new_successors.is_error())
{
return ERR_APPEND(new_successors.get_error(), "failed to get successors of gate " + gate->get_name() + " with ID " + std::to_string(gate->get_id()) + " in netlist abstraction");
Expand Down Expand Up @@ -247,10 +253,8 @@ namespace hal
}
}

// std::sort(successors.begin(), successors.end());
// successors.erase(std::unique(successors.begin(), successors.end()), successors.end());

successors = utils::to_vector(utils::to_set(successors));
std::sort(successors.begin(), successors.end());
successors.erase(std::unique(successors.begin(), successors.end()), successors.end());

return OK(successors);
}
Expand Down
86 changes: 85 additions & 1 deletion src/python_bindings/bindings/netlist_abstraction_decorator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ namespace hal
auto res = NetlistAbstraction::create(netlist, gates, include_all_netlist_gates, exit_endpoint_filter, entry_endpoint_filter);
if (res.is_ok())
{
return res.get();
return std::shared_ptr<NetlistAbstraction>(res.get());
}
else
{
Expand Down Expand Up @@ -525,6 +525,90 @@ namespace hal
:rtype: set[hal_py.Gate] or None
)");

py_netlist_abstraction_decorator.def(
"get_next_matching_gates",
[](const NetlistAbstractionDecorator& self,
const std::vector<Endpoint*>& endpoints,
const std::function<bool(const Gate*)>& target_gate_filter,
const PinDirection& direction,
bool directed,
bool continue_on_match,
const std::function<bool(const Endpoint*, const u32)>& exit_endpoint_filter,
const std::function<bool(const Endpoint*, const u32)>& entry_endpoint_filter) -> std::optional<std::vector<std::set<Gate*>>> {
auto res = self.get_next_matching_gates(endpoints, target_gate_filter, direction, directed, continue_on_match, exit_endpoint_filter, entry_endpoint_filter);
if (res.is_ok())
{
return res.get();
}
else
{
log_error("python_context", "error in get_next_matching_gates:{}", res.get_error().get());
return std::nullopt;
}
},
py::arg("endpoints"),
py::arg("target_gate_filter"),
py::arg("direction"),
py::arg("directed") = true,
py::arg("continue_on_match") = false,
py::arg("exit_endpoint_filter") = nullptr,
py::arg("entry_endpoint_filter") = nullptr,
R"(
Starting from the given endpoint, traverse the netlist abstraction and return the successor/predecessor gates that satisfy the `target_gate_filter`.
:param list[hal_py.Endpoint] endpoints: The starting endpoints.
:param callable target_gate_filter: A filter function for the target gates.
:param hal_py.PinDirection direction: The direction to search (`PinDirection.input` or `PinDirection.output`).
:param bool directed: Whether to use a directed graph representation. Defaults to `True`.
:param bool continue_on_match: Whether to continue traversal even after finding a match. Defaults to `False`.
:param callable exit_endpoint_filter: A filter function to stop traversal on a fan-in/out endpoint. Defaults to `None`.
:param callable entry_endpoint_filter: A filter function to stop traversal on successor/predecessor endpoints. Defaults to `None`.
:returns: A list of sets of gates matching the filter, or `None` on error.
:rtype: list[set[hal_py.Gate]] or None
)");

py_netlist_abstraction_decorator.def(
"get_next_matching_gates",
[](const NetlistAbstractionDecorator& self,
const std::vector<Gate*>& gates,
const std::function<bool(const Gate*)>& target_gate_filter,
const PinDirection& direction,
bool directed,
bool continue_on_match,
const std::function<bool(const Endpoint*, const u32)>& exit_endpoint_filter,
const std::function<bool(const Endpoint*, const u32)>& entry_endpoint_filter) -> std::optional<std::vector<std::set<Gate*>>> {
auto res = self.get_next_matching_gates(gates, target_gate_filter, direction, directed, continue_on_match, exit_endpoint_filter, entry_endpoint_filter);
if (res.is_ok())
{
return res.get();
}
else
{
log_error("python_context", "error in get_next_matching_gates:{}", res.get_error().get());
return std::nullopt;
}
},
py::arg("gates"),
py::arg("target_gate_filter"),
py::arg("direction"),
py::arg("directed") = true,
py::arg("continue_on_match") = false,
py::arg("exit_endpoint_filter") = nullptr,
py::arg("entry_endpoint_filter") = nullptr,
R"(
Starting from the given gate, traverse the netlist abstraction and return the successor/predecessor gates that satisfy the `target_gate_filter`.
:param list[hal_py.Gate] gates: The starting gates.
:param callable target_gate_filter: A filter function for the target gates.
:param hal_py.PinDirection direction: The direction to search (`PinDirection.input` or `PinDirection.output`).
:param bool directed: Whether to use a directed graph representation. Defaults to `True`.
:param bool continue_on_match: Whether to continue traversal even after finding a match. Defaults to `False`.
:param callable exit_endpoint_filter: A filter function to stop traversal on a fan-in/out endpoint. Defaults to `None`.
:param callable entry_endpoint_filter: A filter function to stop traversal on successor/predecessor endpoints. Defaults to `None`.
:returns: A list of sets of gates matching the filter, or `None` on error.
:rtype: list[set[hal_py.Gate]] or None
)");

// Bind the first overloaded get_next_matching_gates_until method
py_netlist_abstraction_decorator.def(
"get_next_matching_gates_until",
Expand Down
2 changes: 2 additions & 0 deletions src/python_bindings/python_bindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ namespace hal

netlist_traversal_decorator_init(m);

netlist_abstraction_decorator_init(m);

log_init(m);

#ifndef PYBIND11_MODULE
Expand Down

0 comments on commit af446e6

Please sign in to comment.