From bc64da89f2aa96e787b77e63128f9cb6df9292b4 Mon Sep 17 00:00:00 2001 From: Simon Date: Tue, 5 Nov 2024 15:22:23 +0100 Subject: [PATCH] first version of the machine learning plugin --- plugins/.gitignore | 2 + plugins/machine_learning/.gitignore | 0 plugins/machine_learning/CMakeLists.txt | 14 + .../machine_learning/features/gate_feature.h | 123 +++ .../features/gate_pair_feature.h | 126 +++ .../machine_learning/graph_neural_network.h | 26 + .../machine_learning/labels/gate_pair_label.h | 73 ++ .../plugin_machine_learning.h | 48 ++ .../include/machine_learning/types.h | 22 + .../python/python_bindings.cpp | 763 ++++++++++++++++++ .../hal_testing/gui_annotate_netlist_graph.py | 12 + .../gui_test_pybinds_gate_feature.py | 32 + .../gui_test_pybinds_gate_pair_feature.py | 33 + .../scripts/installation/uninstall.sh | 22 + .../scripts/ml_testing/minimal_cuda_test.py | 9 + .../scripts/ml_testing/minimal_test.py | 167 ++++ .../ml_testing/minimal_test_benchmarks.py | 178 ++++ .../scripts/ml_testing/minimal_test_pairs.py | 147 ++++ .../minimal_test_pairs_benchmarks.py | 156 ++++ .../ml_testing/test_noise_resistance.py | 70 ++ .../scripts/ml_testing/test_normalization.py | 13 + .../src/features/gate_feature.cpp | 309 +++++++ .../src/features/gate_pair_feature.cpp | 391 +++++++++ .../src/graph_neural_network.cpp | 97 +++ .../src/labels/gate_pair_label.cpp | 394 +++++++++ .../src/plugin_machine_learning.cpp | 29 + 26 files changed, 3256 insertions(+) create mode 100644 plugins/machine_learning/.gitignore create mode 100644 plugins/machine_learning/CMakeLists.txt create mode 100644 plugins/machine_learning/include/machine_learning/features/gate_feature.h create mode 100644 plugins/machine_learning/include/machine_learning/features/gate_pair_feature.h create mode 100644 plugins/machine_learning/include/machine_learning/graph_neural_network.h create mode 100644 plugins/machine_learning/include/machine_learning/labels/gate_pair_label.h create mode 100644 plugins/machine_learning/include/machine_learning/plugin_machine_learning.h create mode 100644 plugins/machine_learning/include/machine_learning/types.h create mode 100644 plugins/machine_learning/python/python_bindings.cpp create mode 100644 plugins/machine_learning/scripts/hal_testing/gui_annotate_netlist_graph.py create mode 100644 plugins/machine_learning/scripts/hal_testing/gui_test_pybinds_gate_feature.py create mode 100644 plugins/machine_learning/scripts/hal_testing/gui_test_pybinds_gate_pair_feature.py create mode 100755 plugins/machine_learning/scripts/installation/uninstall.sh create mode 100644 plugins/machine_learning/scripts/ml_testing/minimal_cuda_test.py create mode 100644 plugins/machine_learning/scripts/ml_testing/minimal_test.py create mode 100644 plugins/machine_learning/scripts/ml_testing/minimal_test_benchmarks.py create mode 100644 plugins/machine_learning/scripts/ml_testing/minimal_test_pairs.py create mode 100644 plugins/machine_learning/scripts/ml_testing/minimal_test_pairs_benchmarks.py create mode 100644 plugins/machine_learning/scripts/ml_testing/test_noise_resistance.py create mode 100644 plugins/machine_learning/scripts/ml_testing/test_normalization.py create mode 100644 plugins/machine_learning/src/features/gate_feature.cpp create mode 100644 plugins/machine_learning/src/features/gate_pair_feature.cpp create mode 100644 plugins/machine_learning/src/graph_neural_network.cpp create mode 100644 plugins/machine_learning/src/labels/gate_pair_label.cpp create mode 100644 plugins/machine_learning/src/plugin_machine_learning.cpp diff --git a/plugins/.gitignore b/plugins/.gitignore index 6e8f61e8cbf..2e7fa7d8014 100644 --- a/plugins/.gitignore +++ b/plugins/.gitignore @@ -27,6 +27,8 @@ !liberty_parser/**/* !logic_evaluator* !logic_evaluator/**/* +!machine_learning* +!machine_learning/**/* !module_identification* !module_identification/**/* !netlist_preprocessing* diff --git a/plugins/machine_learning/.gitignore b/plugins/machine_learning/.gitignore new file mode 100644 index 00000000000..e69de29bb2d diff --git a/plugins/machine_learning/CMakeLists.txt b/plugins/machine_learning/CMakeLists.txt new file mode 100644 index 00000000000..13d83256421 --- /dev/null +++ b/plugins/machine_learning/CMakeLists.txt @@ -0,0 +1,14 @@ +option(PL_MACHINE_LEARNING "PL_MACHINE_LEARNING" OFF) + +if(PL_MACHINE_LEARNING OR BUILD_ALL_PLUGINS) + file(GLOB_RECURSE MACHINE_LEARNING_INC ${CMAKE_CURRENT_SOURCE_DIR}/include/*.h) + file(GLOB_RECURSE MACHINE_LEARNING_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) + file(GLOB_RECURSE MACHINE_LEARNING_PYTHON_SRC ${CMAKE_CURRENT_SOURCE_DIR}/python/*.cpp) + + hal_add_plugin(machine_learning + SHARED + HEADER ${MACHINE_LEARNING_INC} + SOURCES ${MACHINE_LEARNING_SRC} ${MACHINE_LEARNING_PYTHON_SRC} + LINK_LIBRARIES nlohmann_json::nlohmann_json + ) +endif() diff --git a/plugins/machine_learning/include/machine_learning/features/gate_feature.h b/plugins/machine_learning/include/machine_learning/features/gate_feature.h new file mode 100644 index 00000000000..09a31cd6b75 --- /dev/null +++ b/plugins/machine_learning/include/machine_learning/features/gate_feature.h @@ -0,0 +1,123 @@ +#include "hal_core/defines.h" +#include "hal_core/netlist/decorators/netlist_abstraction_decorator.h" + +#include +#include + +namespace hal +{ + /* Forward declaration */ + class Gate; + class Netlist; + + namespace machine_learning + { + namespace gate_feature + { + + struct FeatureContext + { + public: + FeatureContext() = delete; + FeatureContext(const Netlist* netlist) : nl(netlist) {}; + + const NetlistAbstraction& get_sequential_abstraction(); + const std::vector& get_possible_gate_type_properties(); + + const Netlist* nl; + + private: + std::optional m_seqential_abstraction; + std::optional> m_possible_gate_type_properties; + }; + + class GateFeature + { + public: + virtual std::vector calculate_feature(FeatureContext& fc, const Gate* g) const = 0; + virtual std::string get_name() const = 0; + }; + + class ConnectedGlobalIOs : public GateFeature + { + public: + ConnectedGlobalIOs() {}; + + std::vector calculate_feature(FeatureContext& fc, const Gate* g) const override; + std::string get_name() const override; + }; + + class DistanceGlobalIO : public GateFeature + { + public: + DistanceGlobalIO(const PinDirection& direction) : m_direction(direction) {}; + + std::vector calculate_feature(FeatureContext& fc, const Gate* g) const override; + std::string get_name() const override; + + private: + const PinDirection m_direction; + }; + + class SequentialDistanceGlobalIO : public GateFeature + { + public: + SequentialDistanceGlobalIO(const PinDirection& direction) : m_direction(direction) {}; + + std::vector calculate_feature(FeatureContext& fc, const Gate* g) const override; + std::string get_name() const override; + + private: + const PinDirection m_direction; + }; + + class IODegrees : public GateFeature + { + public: + IODegrees() {}; + + std::vector calculate_feature(FeatureContext& fc, const Gate* g) const override; + std::string get_name() const override; + }; + + class GateTypeOneHot : public GateFeature + { + public: + GateTypeOneHot() {}; + + std::vector calculate_feature(FeatureContext& fc, const Gate* g) const override; + std::string get_name() const override; + }; + + class NeighboringGateTypes : public GateFeature + { + public: + NeighboringGateTypes(const u32 depth, const PinDirection& direction) : m_depth(depth), m_direction(direction) {}; + + std::vector calculate_feature(FeatureContext& fc, const Gate* g) const override; + std::string get_name() const override; + + private: + const u32 m_depth; + const PinDirection m_direction; + }; + + // Feature ideas + + // number of sequential predecessors/successors + // graph metrics (centrality) + + // distance to global io in sequential only netlist + + // distance to nearest type/module (e.g. RAM, DSP) + // distance to nearest shift register + // distance to nearest bus register + + std::vector build_feature_vec(const std::vector& features, const Gate* g); + std::vector build_feature_vec(FeatureContext& fc, const std::vector& features, const Gate* g); + + std::vector> build_feature_vecs(const std::vector& features, const std::vector& gates); + std::vector> build_feature_vecs(FeatureContext& fc, const std::vector& features, const std::vector& gates); + } // namespace gate_feature + } // namespace machine_learning +} // namespace hal \ No newline at end of file diff --git a/plugins/machine_learning/include/machine_learning/features/gate_pair_feature.h b/plugins/machine_learning/include/machine_learning/features/gate_pair_feature.h new file mode 100644 index 00000000000..3b28d0ffafb --- /dev/null +++ b/plugins/machine_learning/include/machine_learning/features/gate_pair_feature.h @@ -0,0 +1,126 @@ +#pragma once + +#include "hal_core/defines.h" +#include "hal_core/netlist/decorators/netlist_abstraction_decorator.h" +#include "machine_learning/types.h" + +#include +#include + +namespace hal +{ + /* Forward declaration */ + class Gate; + class Netlist; + + namespace machine_learning + { + namespace gate_pair_feature + { + + struct FeatureContext + { + public: + FeatureContext() = delete; + FeatureContext(const Netlist* netlist) : nl(netlist) {}; + + const NetlistAbstraction& get_sequential_abstraction(); + + const Netlist* nl; + + private: + std::optional m_seqential_abstraction; + }; + + class GatePairFeature + { + public: + virtual std::vector calculate_feature(FeatureContext& fc, const Gate* g_a, const Gate* g_b) const = 0; + virtual std::string get_name() const = 0; + }; + + class LogicalDistance : public GatePairFeature + { + public: + LogicalDistance(const PinDirection direction) : m_direction(direction) {}; + + std::vector calculate_feature(FeatureContext& fc, const Gate* g_a, const Gate* g_b) const override; + std::string get_name() const override; + + private: + const PinDirection m_direction; + }; + + class SequentialDistance : public GatePairFeature + { + public: + SequentialDistance(const PinDirection direction) : m_direction(direction) {}; + + std::vector calculate_feature(FeatureContext& fc, const Gate* g_a, const Gate* g_b) const override; + std::string get_name() const override; + + private: + const PinDirection m_direction; + }; + + class PhysicalDistance : public GatePairFeature + { + public: + PhysicalDistance() {}; + + std::vector calculate_feature(FeatureContext& fc, const Gate* g_a, const Gate* g_b) const override; + std::string get_name() const override; + }; + + class SharedControlSignals : public GatePairFeature + { + public: + SharedControlSignals() {}; + + std::vector calculate_feature(FeatureContext& fc, const Gate* g_a, const Gate* g_b) const override; + std::string get_name() const override; + }; + + class SharedSequentialNeighbors : public GatePairFeature + { + public: + SharedSequentialNeighbors(const u32 depth, const PinDirection direction) : m_depth(depth), m_direction(direction) {}; + + std::vector calculate_feature(FeatureContext& fc, const Gate* g_a, const Gate* g_b) const override; + std::string get_name() const override; + + private: + const u32 m_depth; + const PinDirection m_direction; + }; + + class SharedNeighbors : public GatePairFeature + { + public: + SharedNeighbors(const u32 depth, const PinDirection direction) : m_depth(depth), m_direction(direction) {}; + + std::vector calculate_feature(FeatureContext& fc, const Gate* g_a, const Gate* g_b) const override; + std::string get_name() const override; + + private: + const u32 m_depth; + const PinDirection m_direction; + }; + + // feature ideas: + + // distance to each other in a sequential only netlist + // shared neighbors in a sequential only netlist + + std::vector build_feature_vec(const std::vector& features, const Gate* g_a, const Gate* g_b); + std::vector build_feature_vec(FeatureContext& fc, const std::vector& features, const Gate* g_a, const Gate* g_b); + + std::vector build_feature_vec(const std::vector& features, const std::pair& gate_pair); + std::vector build_feature_vec(FeatureContext& fc, const std::vector& features, const std::pair& gate_pair); + + std::vector> build_feature_vecs(const std::vector& features, const std::vector>& gate_pairs); + std::vector> + build_feature_vecs(FeatureContext& fc, const std::vector& features, const std::vector>& gate_pairs); + } // namespace gate_pair_feature + } // namespace machine_learning +} // namespace hal \ No newline at end of file diff --git a/plugins/machine_learning/include/machine_learning/graph_neural_network.h b/plugins/machine_learning/include/machine_learning/graph_neural_network.h new file mode 100644 index 00000000000..d8b15758537 --- /dev/null +++ b/plugins/machine_learning/include/machine_learning/graph_neural_network.h @@ -0,0 +1,26 @@ +#pragma once +#include "hal_core/defines.h" +#include "machine_learning/types.h" + +#include + +namespace hal +{ + class Netlist; + + namespace machine_learning + { + namespace graph + { + struct NetlistGraph + { + std::pair, std::vector> edge_list; + GraphDirection direction; + }; + + NetlistGraph construct_netlist_graph(const Netlist* nl, const std::vector& gates, const GraphDirection& dir); + + void annotate_netlist_graph(Netlist* nl, const std::vector& gates, const NetlistGraph& nlg, const std::vector>& node_features); + } // namespace graph + } // namespace machine_learning +} // namespace hal \ No newline at end of file diff --git a/plugins/machine_learning/include/machine_learning/labels/gate_pair_label.h b/plugins/machine_learning/include/machine_learning/labels/gate_pair_label.h new file mode 100644 index 00000000000..b3c39a52bf0 --- /dev/null +++ b/plugins/machine_learning/include/machine_learning/labels/gate_pair_label.h @@ -0,0 +1,73 @@ +#pragma once + +#include "hal_core/defines.h" +#include "machine_learning/types.h" + +#include +#include +#include + +namespace hal +{ + /* Forward declaration */ + class Gate; + class Netlist; + + namespace machine_learning + { + namespace gate_pair_label + { + struct MultiBitInformation + { + std::map, std::vector> word_to_gates; + std::map>> gate_to_words; + }; + + struct LabelContext + { + LabelContext() = delete; + LabelContext(const Netlist* netlist, const std::vector& gates) : nl(netlist), gates{gates} {}; + + const MultiBitInformation& get_multi_bit_information(); + + const Netlist* nl; + const std::vector gates; + std::optional mbi; + }; + + class GatePairLabel + { + public: + virtual std::vector> calculate_gate_pairs(LabelContext& lc, const Netlist* nl, const std::vector& gates) const = 0; + virtual std::vector calculate_label(LabelContext& lc, const Gate* g_a, const Gate* g_b) const = 0; + virtual std::vector> calculate_labels(LabelContext& lc, const std::vector>& gate_pairs) const = 0; + + virtual std::pair>, std::vector>> calculate_labels(LabelContext& lc) const = 0; + }; + + class SharedSignalGroup : public GatePairLabel + { + public: + SharedSignalGroup() {}; + + std::vector> calculate_gate_pairs(LabelContext& lc, const Netlist* nl, const std::vector& gates) const override; + std::vector calculate_label(LabelContext& lc, const Gate* g_a, const Gate* g_b) const override; + std::vector> calculate_labels(LabelContext& lc, const std::vector>& gate_pairs) const override; + + std::pair>, std::vector>> calculate_labels(LabelContext& lc) const override; + }; + + class SharedConnection : public GatePairLabel + { + public: + SharedConnection() {}; + + std::vector> calculate_gate_pairs(LabelContext& lc, const Netlist* nl, const std::vector& gates) const override; + std::vector calculate_label(LabelContext& lc, const Gate* g_a, const Gate* g_b) const override; + std::vector> calculate_labels(LabelContext& lc, const std::vector>& gate_pairs) const override; + + std::pair>, std::vector>> calculate_labels(LabelContext& lc) const override; + }; + } // namespace gate_pair_label + } // namespace machine_learning +} // namespace hal \ No newline at end of file diff --git a/plugins/machine_learning/include/machine_learning/plugin_machine_learning.h b/plugins/machine_learning/include/machine_learning/plugin_machine_learning.h new file mode 100644 index 00000000000..2d96ceece2a --- /dev/null +++ b/plugins/machine_learning/include/machine_learning/plugin_machine_learning.h @@ -0,0 +1,48 @@ +#pragma once + +#include "hal_core/plugin_system/plugin_interface_base.h" + +namespace hal +{ + class PLUGIN_API MachineLearningPlugin : public BasePluginInterface + { + public: + /** + * @brief Default constructor for `MachineLearningPlugin`. + */ + MachineLearningPlugin() = default; + + /** + * @brief Default destructor for `MachineLearningPlugin`. + */ + ~MachineLearningPlugin() = default; + + /** + * @brief Get the name of the plugin. + * + * @returns The name of the plugin. + */ + std::string get_name() const override; + + /** + * @brief Get the version of the plugin. + * + * @returns The version of the plugin. + */ + std::string get_version() const override; + + /** + * @brief Get a short description of the plugin. + * + * @returns The short description of the plugin. + */ + std::string get_description() const override; + + /** + * @brief Get the plugin dependencies. + * + * @returns A set of plugin names that this plugin depends on. + */ + std::set get_dependencies() const override; + }; +} // namespace hal diff --git a/plugins/machine_learning/include/machine_learning/types.h b/plugins/machine_learning/include/machine_learning/types.h new file mode 100644 index 00000000000..b185753c1c4 --- /dev/null +++ b/plugins/machine_learning/include/machine_learning/types.h @@ -0,0 +1,22 @@ +#pragma once + +#include "hal_core/defines.h" + +#include + +namespace hal +{ + /* Forward declaration */ + class Gate; + class Netlist; + + namespace machine_learning + { + enum GraphDirection + { + directed_forward, + directed_backward, + bidirectional, + }; + } // namespace machine_learning +} // namespace hal \ No newline at end of file diff --git a/plugins/machine_learning/python/python_bindings.cpp b/plugins/machine_learning/python/python_bindings.cpp new file mode 100644 index 00000000000..5c89f90cc6f --- /dev/null +++ b/plugins/machine_learning/python/python_bindings.cpp @@ -0,0 +1,763 @@ +#include "hal_core/python_bindings/python_bindings.h" + +#include "machine_learning/features/gate_feature.h" +#include "machine_learning/features/gate_pair_feature.h" +#include "machine_learning/graph_neural_network.h" +#include "machine_learning/labels/gate_pair_label.h" +#include "machine_learning/plugin_machine_learning.h" +#include "machine_learning/types.h" +#include "pybind11/operators.h" +#include "pybind11/pybind11.h" +#include "pybind11/stl.h" +#include "pybind11/stl_bind.h" + +namespace py = pybind11; + +namespace hal +{ + +#ifdef PYBIND11_MODULE + PYBIND11_MODULE(machine_learning, m) + { + m.doc() = "Machine learning plugin for HAL."; +#else + PYBIND11_PLUGIN(machine_learning) + { + py::module m("machine_learning", "Machine learning plugin for HAL."); +#endif // ifdef PYBIND11_MODULE + + // Define submodules for namespaces + py::module py_gate_feature = m.def_submodule("gate_feature"); + py::module py_gate_pair_feature = m.def_submodule("gate_pair_feature"); + py::module py_gate_pair_label = m.def_submodule("gate_pair_label"); + py::module py_graph = m.def_submodule("graph"); + + py::class_, BasePluginInterface> py_machine_learning_plugin( + m, "MachineLearningPlugin", R"(Provides machine learning functionality as a plugin within the HAL framework.)"); + + py_machine_learning_plugin.def("get_name", &MachineLearningPlugin::get_name, R"( + Get the name of the plugin. + + :returns: The plugin name. + :rtype: str + )"); + + py_machine_learning_plugin.def("get_version", &MachineLearningPlugin::get_version, R"( + Get the plugin version. + + :returns: The plugin version. + :rtype: str + )"); + + py_machine_learning_plugin.def("get_description", &MachineLearningPlugin::get_description, R"( + Get the plugin description. + + :returns: The plugin description. + :rtype: str + )"); + + py_machine_learning_plugin.def("get_dependencies", &MachineLearningPlugin::get_dependencies, R"( + Get the plugin dependencies. + + :returns: The plugin dependencies. + :rtype: set(str) + )"); + + // machine_learning::types + py::enum_ py_graph_direction(m, "GraphDirection", R"( + Enumeration of graph traversal directions. + )"); + + py_graph_direction + .value("directed_forward", machine_learning::GraphDirection::directed_forward, R"( + Directed forward traversal. + + :type: int + )") + .value("directed_backward", machine_learning::GraphDirection::directed_backward, R"( + Directed backward traversal. + + :type: int + )") + .value("bidirectional", machine_learning::GraphDirection::bidirectional, R"( + Bidirectional traversal. + + :type: int + )") + .export_values(); + + // Bindings for NetlistGraph + py::class_ py_netlist_graph(py_graph, "NetlistGraph", R"( + Represents a graph of the netlist. + )"); + + py_netlist_graph.def_readwrite("edge_list", &machine_learning::graph::NetlistGraph::edge_list, R"( + Edge list of the graph as a tuple of source and target node indices. + + :type: tuple[list[int], list[int]] + )"); + + py_netlist_graph.def_readwrite("direction", &machine_learning::graph::NetlistGraph::direction, R"( + Direction of the graph. + + :type: hal_py.machine_learning.GraphDirection + )"); + + // Bindings for construct_netlist_graph + py_graph.def("construct_netlist_graph", + &machine_learning::graph::construct_netlist_graph, + py::arg("netlist"), + py::arg("gates"), + py::arg("direction"), + R"( + Constructs a netlist graph from the given netlist and gates. + + :param hal_py.Netlist netlist: The netlist. + :param list[hal_py.Gate] gates: The gates to include in the graph. + :param hal_py.machine_learning.GraphDirection direction: The direction of the graph. + :returns: A NetlistGraph object. + :rtype: hal_py.machine_learning.graph.NetlistGraph + )"); + + // Bindings for annotate_netlist_graph + py_graph.def("annotate_netlist_graph", + &machine_learning::graph::annotate_netlist_graph, + py::arg("netlist"), + py::arg("gates"), + py::arg("netlist_graph"), + py::arg("node_features"), + R"( + Annotates the netlist graph with the given node features. + + :param hal_py.Netlist netlist: The netlist. + :param list[hal_py.Gate] gates: The gates included in the graph. + :param hal_py.machine_learning.graph.NetlistGraph netlist_graph: The netlist graph. + :param list[list[int]] node_features: The features for each node. + )"); + + // machine_learning::features::gate_feature + py::class_ py_gate_feature_context(py_gate_feature, "FeatureContext", R"( + This class holds context information for feature extraction in a netlist analysis. + Provides methods for obtaining specific feature-related data. + )"); + + py_gate_feature_context.def(py::init(), py::arg("netlist"), R"( + Initialize the FeatureContext with the given netlist. + + :param hal_py.Netlist netlist: The netlist to analyze. + )"); + + // py_gate_feature_context.def("get_sequential_abstraction", &hal::machine_learning::gate_feature::FeatureContext::get_sequential_abstraction, R"( + // Get the sequential abstraction of the netlist. + + // :returns: The sequential abstraction of the netlist. + // :rtype: hal_py.NetlistAbstraction + // )"); + + // py_gate_feature_context.def("get_possible_gate_type_properties", &hal::machine_learning::gate_feature::FeatureContext::get_possible_gate_type_properties, R"( + // Get possible gate type properties for feature extraction. + + // :returns: A list of possible gate type properties. + // :rtype: list[hal_py.GateTypeProperty] + // )"); + + py_gate_feature_context.def_readonly("nl", &hal::machine_learning::gate_feature::FeatureContext::nl, R"( + The netlist associated with this context. + :type: hal_py.Netlist + )"); + + py::class_ py_gate_feature_class(py_gate_feature, "GateFeature", R"( + Base class for gate feature extraction in machine learning analysis. + Provides an interface for calculating features and obtaining feature names. + )"); + + py_gate_feature_class.def("calculate_feature", &hal::machine_learning::gate_feature::GateFeature::calculate_feature, py::arg("fc"), py::arg("g"), R"( + Calculate the feature vector for a specific gate in the given feature context. + + :param hal_py.machine_learning.gate_feature.FeatureContext fc: The feature context. + :param hal_py.Gate g: The gate to calculate features for. + :returns: A vector of feature values. + :rtype: list[int] + )"); + + py_gate_feature_class.def("get_name", &hal::machine_learning::gate_feature::GateFeature::get_name, R"( + Get the name of the feature. + + :returns: The feature name. + :rtype: str + )"); + + py::class_ py_connected_global_ios(py_gate_feature, "ConnectedGlobalIOs", R"( + Feature class for extracting features based on globally connected IOs. + )"); + + py_connected_global_ios.def(py::init<>(), R"( + Construct a ConnectedGlobalIOs feature extractor. + )"); + + py_connected_global_ios.def("calculate_feature", &hal::machine_learning::gate_feature::ConnectedGlobalIOs::calculate_feature, py::arg("fc"), py::arg("g"), R"( + Calculate the ConnectedGlobalIOs feature for a specific gate in the given feature context. + + :param hal_py.machine_learning.gate_feature.FeatureContext fc: The feature context. + :param hal_py.Gate g: The gate to calculate features for. + :returns: A vector of feature values. + :rtype: list[int] + )"); + + py_connected_global_ios.def("get_name", &hal::machine_learning::gate_feature::ConnectedGlobalIOs::get_name, R"( + Get the name of the ConnectedGlobalIOs feature. + + :returns: The feature name. + :rtype: str + )"); + + py::class_ py_distance_global_io(py_gate_feature, "DistanceGlobalIO", R"( + Feature class for calculating distance to global IO based on pin direction. + )"); + + py_distance_global_io.def(py::init(), py::arg("direction"), R"( + Construct a DistanceGlobalIO feature extractor with a specified pin direction. + + :param hal_py.PinDirection direction: The pin direction. + )"); + + py_distance_global_io.def("calculate_feature", &hal::machine_learning::gate_feature::DistanceGlobalIO::calculate_feature, py::arg("fc"), py::arg("g"), R"( + Calculate the DistanceGlobalIO feature for a specific gate in the given feature context. + + :param hal_py.machine_learning.gate_feature.FeatureContext fc: The feature context. + :param hal_py.Gate g: The gate to calculate features for. + :returns: A vector of feature values. + :rtype: list[int] + )"); + + py_distance_global_io.def("get_name", &hal::machine_learning::gate_feature::DistanceGlobalIO::get_name, R"( + Get the name of the DistanceGlobalIO feature. + + :returns: The feature name. + :rtype: str + )"); + + py::class_ py_sequential_distance_global_io( + py_gate_feature, "SequentialDistanceGlobalIO", R"( + Feature class for calculating distance to global IO based on pin direction. + )"); + + py_sequential_distance_global_io.def(py::init(), py::arg("direction"), R"( + Construct a SequentialDistanceGlobalIO feature extractor with a specified pin direction. + + :param hal_py.PinDirection direction: The pin direction. + )"); + + py_sequential_distance_global_io.def("calculate_feature", &hal::machine_learning::gate_feature::SequentialDistanceGlobalIO::calculate_feature, py::arg("fc"), py::arg("g"), R"( + Calculate the SequentialDistanceGlobalIO feature for a specific gate in the given feature context. + + :param hal_py.machine_learning.gate_feature.FeatureContext fc: The feature context. + :param hal_py.Gate g: The gate to calculate features for. + :returns: A vector of feature values. + :rtype: list[int] + )"); + + py_sequential_distance_global_io.def("get_name", &hal::machine_learning::gate_feature::SequentialDistanceGlobalIO::get_name, R"( + Get the name of the SequentialDistanceGlobalIO feature. + + :returns: The feature name. + :rtype: str + )"); + + py::class_ py_io_degrees(py_gate_feature, "IODegrees", R"( + Feature class for calculating distance to global IO based on pin direction. + )"); + + py_io_degrees.def(py::init<>(), R"( + Construct a IODegrees feature extractor with a specified pin direction. + )"); + + py_io_degrees.def("calculate_feature", &hal::machine_learning::gate_feature::IODegrees::calculate_feature, py::arg("fc"), py::arg("g"), R"( + Calculate the IODegrees feature for a specific gate in the given feature context. + + :param hal_py.machine_learning.gate_feature.FeatureContext fc: The feature context. + :param hal_py.Gate g: The gate to calculate features for. + :returns: A vector of feature values. + :rtype: list[int] + )"); + + py_io_degrees.def("get_name", &hal::machine_learning::gate_feature::IODegrees::get_name, R"( + Get the name of the IODegrees feature. + + :returns: The feature name. + :rtype: str + )"); + + py::class_ py_gate_type_one_hot(py_gate_feature, "GateTypeOneHot", R"( + Feature class for calculating distance to global IO based on pin direction. + )"); + + py_gate_type_one_hot.def(py::init<>(), R"( + Construct a GateTypeOneHot feature extractor with a specified pin direction. + )"); + + py_gate_type_one_hot.def("calculate_feature", &hal::machine_learning::gate_feature::GateTypeOneHot::calculate_feature, py::arg("fc"), py::arg("g"), R"( + Calculate the GateTypeOneHot feature for a specific gate in the given feature context. + + :param hal_py.machine_learning.gate_feature.FeatureContext fc: The feature context. + :param hal_py.Gate g: The gate to calculate features for. + :returns: A vector of feature values. + :rtype: list[int] + )"); + + py_gate_type_one_hot.def("get_name", &hal::machine_learning::gate_feature::GateTypeOneHot::get_name, R"( + Get the name of the GateTypeOneHot feature. + + :returns: The feature name. + :rtype: str + )"); + + py::class_ py_neighboring_gate_types(py_gate_feature, "NeighboringGateTypes", R"( + Feature class for calculating distance to global IO based on pin direction. + )"); + + py_neighboring_gate_types.def(py::init(), py::arg("depth"), py::arg("direction"), R"( + Construct a NeighboringGateTypes feature extractor with a specified pin direction. + + :param hal_py.PinDirection direction: The pin direction. + )"); + + py_neighboring_gate_types.def("calculate_feature", &hal::machine_learning::gate_feature::NeighboringGateTypes::calculate_feature, py::arg("fc"), py::arg("g"), R"( + Calculate the NeighboringGateTypes feature for a specific gate in the given feature context. + + :param hal_py.machine_learning.gate_feature.FeatureContext fc: The feature context. + :param hal_py.Gate g: The gate to calculate features for. + :returns: A vector of feature values. + :rtype: list[int] + )"); + + py_neighboring_gate_types.def("get_name", &hal::machine_learning::gate_feature::NeighboringGateTypes::get_name, R"( + Get the name of the NeighboringGateTypes feature. + + :returns: The feature name. + :rtype: str + )"); + + // Define Python bindings for build_feature_vec and build_feature_vecs functions + py_gate_feature.def("build_feature_vec", + py::overload_cast&, const hal::Gate*>(&hal::machine_learning::gate_feature::build_feature_vec), + py::arg("features"), + py::arg("g"), + R"( + Build the feature vector for a specific gate using a list of gate features. + + :param list[hal_py.machine_learning.gate_feature.GateFeature] features: The list of gate features. + :param hal_py.Gate g: The gate to calculate features for. + :returns: A vector of feature values. + :rtype: list[int] + )"); + + py_gate_feature.def("build_feature_vec", + py::overload_cast&, const hal::Gate*>( + &hal::machine_learning::gate_feature::build_feature_vec), + py::arg("fc"), + py::arg("features"), + py::arg("g"), + R"( + Build the feature vector for a specific gate using a list of gate features and a feature context. + + :param hal_py.machine_learning.gate_feature.FeatureContext fc: The feature context. + :param list[hal_py.machine_learning.gate_feature.GateFeature] features: The list of gate features. + :param hal_py.Gate g: The gate to calculate features for. + :returns: A vector of feature values. + :rtype: list[int] + )"); + + py_gate_feature.def( + "build_feature_vecs", + py::overload_cast&, const std::vector&>(&hal::machine_learning::gate_feature::build_feature_vecs), + py::arg("features"), + py::arg("gates"), + R"( + Build feature vectors for a list of gates using a list of gate features. + + :param list[hal_py.machine_learning.gate_feature.GateFeature] features: The list of gate features. + :param list[hal_py.Gate] gates: The list of gates to calculate features for. + :returns: A list of feature vectors for each gate. + :rtype: list[list[int]] + )"); + + py_gate_feature.def( + "build_feature_vecs", + py::overload_cast&, const std::vector&>( + &hal::machine_learning::gate_feature::build_feature_vecs), + py::arg("fc"), + py::arg("features"), + py::arg("gates"), + R"( + Build feature vectors for a list of gates using a list of gate features and a feature context. + + :param hal_py.machine_learning.gate_feature.FeatureContext fc: The feature context. + :param list[hal_py.machine_learning.gate_feature.GateFeature] features: The list of gate features. + :param list[hal_py.Gate] gates: The list of gates to calculate features for. + :returns: A list of feature vectors for each gate. + :rtype: list[list[int]] + )"); + + // machine_learning::features::gate_pair_feature + // Define FeatureContext + py::class_ py_gate_pair_feature_context(py_gate_pair_feature, "FeatureContext", R"( + This class provides the feature context which includes information from a netlist to analyze gate pairs. + )"); + + py_gate_pair_feature_context.def(py::init(), py::arg("netlist"), R"( + Initialize the feature context with a given netlist. + + :param hal_py.Netlist netlist: The netlist to analyze. + )"); + + // py_gate_pair_feature_context.def("get_sequential_abstraction", &hal::machine_learning::gate_pair_feature::FeatureContext::get_sequential_abstraction, R"( + // Retrieve the sequential abstraction of the netlist. + + // :returns: The sequential abstraction of the netlist. + // :rtype: hal_py.NetlistAbstraction + // )"); + + // Define GatePairFeature + py::class_ py_gate_pair_feature_class(py_gate_pair_feature, "GatePairFeature", R"( + Base class for features that operate on pairs of gates. + )"); + + py_gate_pair_feature_class.def("calculate_feature", &hal::machine_learning::gate_pair_feature::GatePairFeature::calculate_feature, py::arg("fc"), py::arg("g_a"), py::arg("g_b"), R"( + Calculate feature vector for a pair of gates. + + :param hal_py.FeatureContext fc: The feature context. + :param hal_py.Gate g_a: The first gate. + :param hal_py.Gate g_b: The second gate. + :returns: The feature vector for the gate pair. + :rtype: list[int] + )"); + + py_gate_pair_feature_class.def("get_name", &hal::machine_learning::gate_pair_feature::GatePairFeature::get_name, R"( + Get the name of the feature. + + :returns: The name of the feature. + :rtype: str + )"); + + // Define LogicalDistance + py::class_ py_logical_distance( + py_gate_pair_feature, "LogicalDistance", R"( + Feature representing logical distance between gate pairs in a specific direction. + )"); + + py_logical_distance.def(py::init(), py::arg("direction"), R"( + Initialize the LogicalDistance feature with a specified pin direction. + + :param hal_py.PinDirection direction: The direction of the pin. + )"); + + // Define SequentialDistance + py::class_ py_sequential_distance( + py_gate_pair_feature, "SequentialDistance", R"( + Feature representing sequential distance between gate pairs in a specific direction. + )"); + + py_sequential_distance.def(py::init(), py::arg("direction"), R"( + Initialize the SequentialDistance feature with a specified pin direction. + + :param hal_py.PinDirection direction: The direction of the pin. + )"); + + // Define PhysicalDistance + py::class_ py_physical_distance( + py_gate_pair_feature, "PhysicalDistance", R"( + Feature representing physical distance between gate pairs. + )"); + + py_physical_distance.def(py::init<>(), R"( + Initialize the PhysicalDistance feature. + )"); + + // Define SharedControlSignals + py::class_ py_shared_control_signals( + py_gate_pair_feature, "SharedControlSignals", R"( + Feature indicating shared control signals between gate pairs. + )"); + + py_shared_control_signals.def(py::init<>(), R"( + Initialize the SharedControlSignals feature. + )"); + + // Define SharedSequentialNeighbors + py::class_ py_shared_sequential_neighbors( + py_gate_pair_feature, "SharedSequentialNeighbors", R"( + Feature indicating shared sequential neighbors for gate pairs with a specified depth and direction. + )"); + + py_shared_sequential_neighbors.def(py::init(), py::arg("depth"), py::arg("direction"), R"( + Initialize the SharedSequentialNeighbors feature with a specified depth and direction. + + :param int depth: The depth for the analysis. + :param hal_py.PinDirection direction: The direction of the pin. + )"); + + // Define SharedNeighbors + py::class_ py_shared_neighbors( + py_gate_pair_feature, "SharedNeighbors", R"( + Feature indicating shared neighbors for gate pairs with a specified depth and direction. + )"); + + py_shared_neighbors.def(py::init(), py::arg("depth"), py::arg("direction"), R"( + Initialize the SharedNeighbors feature with a specified depth and direction. + + :param int depth: The depth for the analysis. + :param hal_py.PinDirection direction: The direction of the pin. + )"); + + // Free functions in gate_pair_feature + py_gate_pair_feature.def("build_feature_vec", + py::overload_cast&, const Gate*, const Gate*>( + &hal::machine_learning::gate_pair_feature::build_feature_vec), + py::arg("features"), + py::arg("g_a"), + py::arg("g_b"), + R"( + Build a feature vector for a pair of gates using specified features. + + :param list[hal_py.GatePairFeature] features: The features to calculate. + :param hal_py.Gate g_a: The first gate. + :param hal_py.Gate g_b: The second gate. + :returns: A feature vector. + :rtype: list[int] + )"); + + py_gate_pair_feature.def("build_feature_vec", + py::overload_cast&, + const Gate*, + const Gate*>(&hal::machine_learning::gate_pair_feature::build_feature_vec), + py::arg("fc"), + py::arg("features"), + py::arg("g_a"), + py::arg("g_b"), + R"( + Build a feature vector for a pair of gates using specified features and a feature context. + + :param hal_py.FeatureContext fc: The feature context. + :param list[hal_py.GatePairFeature] features: The features to calculate. + :param hal_py.Gate g_a: The first gate. + :param hal_py.Gate g_b: The second gate. + :returns: A feature vector. + :rtype: list[int] + )"); + + py_gate_pair_feature.def("build_feature_vec", + py::overload_cast&, const std::pair&>( + &hal::machine_learning::gate_pair_feature::build_feature_vec), + py::arg("features"), + py::arg("gate_pair"), + R"( + Build a feature vector for a pair of gates from a gate pair using specified features. + + :param list[hal_py.GatePairFeature] features: The features to calculate. + :param tuple(hal_py.Gate, hal_py.Gate) gate_pair: The gate pair. + :returns: A feature vector. + :rtype: list[int] + )"); + + py_gate_pair_feature.def("build_feature_vec", + py::overload_cast&, + const std::pair&>(&hal::machine_learning::gate_pair_feature::build_feature_vec), + py::arg("fc"), + py::arg("features"), + py::arg("gate_pair"), + R"( + Build a feature vector for a pair of gates from a gate pair using specified features and a feature context. + + :param hal_py.FeatureContext fc: The feature context. + :param list[hal_py.GatePairFeature] features: The features to calculate. + :param tuple(hal_py.Gate, hal_py.Gate) gate_pair: The gate pair. + :returns: A feature vector. + :rtype: list[int] + )"); + + py_gate_pair_feature.def("build_feature_vecs", + py::overload_cast&, const std::vector>&>( + &hal::machine_learning::gate_pair_feature::build_feature_vecs), + py::arg("features"), + py::arg("gate_pairs"), + R"( + Build feature vectors for multiple gate pairs using specified features. + + :param list[hal_py.GatePairFeature] features: The features to calculate. + :param list[tuple(hal_py.Gate, hal_py.Gate)] gate_pairs: The gate pairs. + :returns: A list of feature vectors for each gate pair. + :rtype: list[list[int]] + )"); + + py_gate_pair_feature.def("build_feature_vecs", + py::overload_cast&, + const std::vector>&>(&hal::machine_learning::gate_pair_feature::build_feature_vecs), + py::arg("fc"), + py::arg("features"), + py::arg("gate_pairs"), + R"( + Build feature vectors for multiple gate pairs using specified features and a feature context. + + :param hal_py.FeatureContext fc: The feature context. + :param list[hal_py.GatePairFeature] features: The features to calculate. + :param list[tuple(hal_py.Gate, hal_py.Gate)] gate_pairs: The gate pairs. + :returns: A list of feature vectors for each gate pair. + :rtype: list[list[int]] + )"); + + // machine_learning::labels::gate_pair_label + py::class_ py_multi_bit_information(py_gate_pair_label, "MultiBitInformation", R"( + Holds information about multi-bit labels, mapping word pairs to gates and gates to word pairs. + )"); + + py_multi_bit_information.def_readwrite("word_to_gates", &hal::machine_learning::gate_pair_label::MultiBitInformation::word_to_gates, R"( + Maps word pairs to lists of gates. + + :type: dict[tuple[str, str], list[hal_py.Gate]] + )"); + + py_multi_bit_information.def_readwrite("gate_to_words", &hal::machine_learning::gate_pair_label::MultiBitInformation::gate_to_words, R"( + Maps gates to lists of word pairs. + + :type: dict[hal_py.Gate, list[tuple[str, str]]] + )"); + + py::class_ py_label_context(py_gate_pair_label, "LabelContext", R"( + Provides context for labeling gates within a netlist. + )"); + + py_label_context.def(py::init&>(), py::arg("netlist"), py::arg("gates"), R"( + Initialize a LabelContext with a netlist and a set of gates. + + :param hal_py.Netlist netlist: The netlist for analysis. + :param list[hal_py.Gate] gates: The gates for labeling. + )"); + + py_label_context.def("get_multi_bit_information", &hal::machine_learning::gate_pair_label::LabelContext::get_multi_bit_information, R"( + Retrieve multi-bit information for the current context. + + :returns: Multi-bit information. + :rtype: gate_pair_label.MultiBitInformation + )"); + + py_label_context.def_readonly("nl", &hal::machine_learning::gate_pair_label::LabelContext::nl, R"( + The netlist associated with the LabelContext. + + :type: hal_py.Netlist + )"); + + py_label_context.def_readonly("gates", &hal::machine_learning::gate_pair_label::LabelContext::gates, R"( + The gates within the context. + + :type: list[hal_py.Gate] + )"); + + py_label_context.def_readonly("mbi", &hal::machine_learning::gate_pair_label::LabelContext::mbi, R"( + Multi-bit information if available. + + :type: Optional[gate_pair_label.MultiBitInformation] + )"); + + py::class_> py_gate_pair_label_class( + py_gate_pair_label, "GatePairLabel", R"( + Base class for gate-pair labeling implementations. + )"); + + // py_gate_pair_label_class.def("calculate_gate_pairs", &hal::machine_learning::gate_pair_label::GatePairLabel::calculate_gate_pairs, py::arg("lc"), py::arg("netlist"), py::arg("gates"), R"( + // Calculate gate pairs based on the provided label context, netlist, and gates. + + // :param gate_pair_label.LabelContext lc: The labeling context. + // :param hal_py.Netlist netlist: The netlist. + // :param list[hal_py.Gate] gates: The gates to analyze. + // :returns: List of gate pairs. + // :rtype: list[tuple[hal_py.Gate, hal_py.Gate]] + // )"); + + // py_gate_pair_label_class.def("calculate_label", &hal::machine_learning::gate_pair_label::GatePairLabel::calculate_label, py::arg("lc"), py::arg("g_a"), py::arg("g_b"), R"( + // Calculate the label for a given pair of gates. + + // :param gate_pair_label.LabelContext lc: The labeling context. + // :param hal_py.Gate g_a: The first gate. + // :param hal_py.Gate g_b: The second gate. + // :returns: List of labels. + // :rtype: list[int] + // )"); + + // py_gate_pair_label_class.def("calculate_labels", &hal::machine_learning::gate_pair_label::GatePairLabel::calculate_labels, py::arg("lc"), py::arg("gate_pairs"), R"( + // Calculate labels for a set of gate pairs. + + // :param gate_pair_label.LabelContext lc: The labeling context. + // :param list[tuple[hal_py.Gate, hal_py.Gate]] gate_pairs: The gate pairs. + // :returns: List of labels for each gate pair. + // :rtype: list[list[int]] + // )"); + + // py_gate_pair_label_class.def("calculate_labels", &hal::machine_learning::gate_pair_label::GatePairLabel::calculate_labels, py::arg("lc"), R"( + // Calculate labels for all gate pairs in the context. + + // :param gate_pair_label.LabelContext lc: The labeling context. + // :returns: Tuple of gate pairs and corresponding labels. + // :rtype: tuple[list[tuple[hal_py.Gate, hal_py.Gate]], list[list[int]]] + // )"); + + py::class_> + py_shared_signal_group(py_gate_pair_label, "SharedSignalGroup", R"( + Labeling strategy based on shared signals between gate pairs. + )"); + + py_shared_signal_group.def(py::init<>(), R"( + Constructs a SharedSignalGroup labeling strategy. + )"); + + py_shared_signal_group.def("calculate_gate_pairs", &hal::machine_learning::gate_pair_label::SharedSignalGroup::calculate_gate_pairs, py::arg("lc"), py::arg("netlist"), py::arg("gates"), R"( + Calculate gate pairs based on shared signals. + + :param gate_pair_label.LabelContext lc: The labeling context. + :param hal_py.Netlist netlist: The netlist. + :param list[hal_py.Gate] gates: The gates to analyze. + :returns: List of gate pairs. + :rtype: list[tuple[hal_py.Gate, hal_py.Gate]] + )"); + + py_shared_signal_group.def("calculate_label", &hal::machine_learning::gate_pair_label::SharedSignalGroup::calculate_label, py::arg("lc"), py::arg("g_a"), py::arg("g_b"), R"( + Calculate the label for a given pair of gates based on shared signals. + + :param gate_pair_label.LabelContext lc: The labeling context. + :param hal_py.Gate g_a: The first gate. + :param hal_py.Gate g_b: The second gate. + :returns: List of labels. + :rtype: list[int] + )"); + + py::class_> + py_shared_connection(py_gate_pair_label, "SharedConnection", R"( + Labeling strategy based on shared connections between gate pairs. + )"); + + py_shared_connection.def(py::init<>(), R"( + Constructs a SharedConnection labeling strategy. + )"); + + py_shared_connection.def("calculate_gate_pairs", &hal::machine_learning::gate_pair_label::SharedConnection::calculate_gate_pairs, py::arg("lc"), py::arg("netlist"), py::arg("gates"), R"( + Calculate gate pairs based on shared connections. + + :param gate_pair_label.LabelContext lc: The labeling context. + :param hal_py.Netlist netlist: The netlist. + :param list[hal_py.Gate] gates: The gates to analyze. + :returns: List of gate pairs. + :rtype: list[tuple[hal_py.Gate, hal_py.Gate]] + )"); + +#ifndef PYBIND11_MODULE + return m.ptr(); +#endif // PYBIND11_MODULE + } +} // namespace hal diff --git a/plugins/machine_learning/scripts/hal_testing/gui_annotate_netlist_graph.py b/plugins/machine_learning/scripts/hal_testing/gui_annotate_netlist_graph.py new file mode 100644 index 00000000000..6274d1bffce --- /dev/null +++ b/plugins/machine_learning/scripts/hal_testing/gui_annotate_netlist_graph.py @@ -0,0 +1,12 @@ +from hal_plugins import machine_learning + +g = machine_learning.MachineLearning.Graph.test_construct_netlist_graph(netlist) +l = machine_learning.MachineLearning.Graph.test_construct_node_labels(netlist) + +print(g) +print(g.node_features) +print(g.edge_list) + +print(l) + +machine_learning.MachineLearning.Graph.annotate_netlist_graph(netlist, g) \ No newline at end of file diff --git a/plugins/machine_learning/scripts/hal_testing/gui_test_pybinds_gate_feature.py b/plugins/machine_learning/scripts/hal_testing/gui_test_pybinds_gate_feature.py new file mode 100644 index 00000000000..c870751d4d8 --- /dev/null +++ b/plugins/machine_learning/scripts/hal_testing/gui_test_pybinds_gate_feature.py @@ -0,0 +1,32 @@ + +# Example of using build_feature_vec for gate_feature with all available features + +from hal_plugins import machine_learning + +# Create the feature context with the netlist +fc = machine_learning.gate_feature.FeatureContext(netlist) + +# Instantiate all available gate pair features +connected_global_ios = machine_learning.gate_feature.ConnectedGlobalIOs() +distance_global_io = machine_learning.gate_feature.DistanceGlobalIO(hal_py.PinDirection.output) +sequnetial_distance_global_io = machine_learning.gate_feature.SequentialDistanceGlobalIO(hal_py.PinDirection.output) +io_degrees = machine_learning.gate_feature.IODegrees() +gate_type_one_hot = machine_learning.gate_feature.GateTypeOneHot() +neighboring_gate_types = machine_learning.gate_feature.NeighboringGateTypes(2, hal_py.PinDirection.output) + +# Collect all features into a list +features = [ + connected_global_ios, + distance_global_io, + #sequnetial_distance_global_io, + io_degrees, + #gate_type_one_hot, + #neighboring_gate_types, +] + +gate_a = netlist.get_gate_by_id(21) + +# Build the feature vector for the pair of gates +feature_vector = machine_learning.gate_feature.build_feature_vec(fc, features, gate_a) + +print("Feature vector:", feature_vector) diff --git a/plugins/machine_learning/scripts/hal_testing/gui_test_pybinds_gate_pair_feature.py b/plugins/machine_learning/scripts/hal_testing/gui_test_pybinds_gate_pair_feature.py new file mode 100644 index 00000000000..6d76c1b1d06 --- /dev/null +++ b/plugins/machine_learning/scripts/hal_testing/gui_test_pybinds_gate_pair_feature.py @@ -0,0 +1,33 @@ +# Example of using build_feature_vec for gate_pair_feature with all available features + +from hal_plugins import machine_learning + +# Create the feature context with the netlist +fc = machine_learning.gate_pair_feature.FeatureContext(netlist) + +# Instantiate all available gate pair features +logical_distance = machine_learning.gate_pair_feature.LogicalDistance(direction=hal_py.PinDirection.output) +sequential_distance = machine_learning.gate_pair_feature.SequentialDistance(direction=hal_py.PinDirection.output) +physical_distance = machine_learning.gate_pair_feature.PhysicalDistance() +shared_control_signals = machine_learning.gate_pair_feature.SharedControlSignals() +shared_sequential_neighbors = machine_learning.gate_pair_feature.SharedSequentialNeighbors(depth=2, direction=hal_py.PinDirection.output) +shared_neighbors = machine_learning.gate_pair_feature.SharedNeighbors(depth=2, direction=hal_py.PinDirection.output) + +# Collect all features into a list +features = [ + logical_distance, + sequential_distance, + #physical_distance, + shared_control_signals, + shared_sequential_neighbors, + shared_neighbors +] + +gate_a = netlist.get_gate_by_id(21) +gate_b = netlist.get_gate_by_id(151) + + +# Build the feature vector for the pair of gates +feature_vector = machine_learning.gate_pair_feature.build_feature_vec(fc, features, gate_a, gate_b) + +print("Feature vector:", feature_vector) \ No newline at end of file diff --git a/plugins/machine_learning/scripts/installation/uninstall.sh b/plugins/machine_learning/scripts/installation/uninstall.sh new file mode 100755 index 00000000000..e6ba8dbb71d --- /dev/null +++ b/plugins/machine_learning/scripts/installation/uninstall.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# Stop on the first sign of trouble +set -e + +echo "Removing all NVIDIA drivers and CUDA packages..." + +# Remove the NVIDIA driver +sudo apt-get purge -y '*nvidia*' + +# Remove CUDA Toolkit and associated packages +sudo apt-get purge -y '*cuda*' +sudo apt-get purge -y '*cublas*' '*cufft*' '*curand*' '*cusolver*' '*cusparse*' '*npp*' '*nvjpeg*' + +# Autoremove remaining dependencies +sudo apt-get autoremove -y + +# Remove any remaining NVIDIA and CUDA directories +sudo rm -rf /usr/local/cuda* + +echo "Uninstallation complete. Please reboot the system." + diff --git a/plugins/machine_learning/scripts/ml_testing/minimal_cuda_test.py b/plugins/machine_learning/scripts/ml_testing/minimal_cuda_test.py new file mode 100644 index 00000000000..884c4301eef --- /dev/null +++ b/plugins/machine_learning/scripts/ml_testing/minimal_cuda_test.py @@ -0,0 +1,9 @@ +import torch + +print(torch.__version__) +print(torch.version.cuda) +print(torch.cuda.is_available()) + + +x = torch.rand(5, 3).cuda() +print(x) diff --git a/plugins/machine_learning/scripts/ml_testing/minimal_test.py b/plugins/machine_learning/scripts/ml_testing/minimal_test.py new file mode 100644 index 00000000000..2f2f76c6eb4 --- /dev/null +++ b/plugins/machine_learning/scripts/ml_testing/minimal_test.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python3 +import sys, os +import pathlib + +#some necessary configuration: +user_name = os.getlogin() + +# some necessary configuration: +if user_name == "simon": + base_path = "/home/simon/projects/hal/" + benchmarks_base_path = pathlib.Path("/home/simon/projects/benchmarks") +if user_name == "klix": + base_path = "/home/klix/projects/hal/" + benchmarks_base_path = pathlib.Path("/home/klix/projects/benchmarks") +else: + print("add base paths foe user {}before executing...".format(user_name)) + exit() + +sys.path.append(base_path + "build/lib/") #this is where your hal python lib is located +os.environ["HAL_BASE_PATH"] = base_path + "build" # hal base path +import hal_py + +lm = hal_py.LogManager() +lm.remove_sink_from_default("stdout") + +#initialize HAL +hal_py.plugin_manager.load_all_plugins() + + +netlist_paths = [ + (base_path + "examples/uart/uart.v", base_path + "examples/uart/example_library.hgl"), + (base_path + "examples/fsm/fsm.v", base_path + "examples/fsm/example_library.hgl"), + #(base_path + "examples/simple_alu/simple_alu.vhdl", base_path + "examples/simple_alu/XILINX_UNISIM.hgl"), + #(base_path + "examples/toy_cipher/toy_cipher.vhd", base_path + "examples/toy_cipher/XILINX_UNISIM.hgl"), +] + +from hal_plugins import machine_learning + +import torch +import torch.nn.functional as F +from torch_geometric.nn import GCNConv, SAGEConv + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +print(device) + +def generate_netlist_graph(nl_path, gl_path): + netlist = hal_py.NetlistFactory.load_netlist(nl_path, gl_path) + + g = machine_learning.MachineLearning.Graph.test_construct_netlist_graph(netlist) + l = machine_learning.MachineLearning.Graph.test_construct_node_labels(netlist) + + # print(g) + # print(g.node_features) + # print(g.edge_list) + + # print(l) + + edge_index = torch.Tensor(g.edge_list).long().to(device) + x = torch.Tensor(g.node_features).to(device) + y = torch.Tensor(l).long().to(device) + + #torch.nn.functional.normalize(x, dim=0, out=x) + + return (edge_index, x, y) + +data_set = list() + +for nl_path, gl_path in netlist_paths: + print(nl_path) + edge_index, x, y = generate_netlist_graph(nl_path, gl_path) + + data_set.append((edge_index, x, y)) + +split = int(len(data_set) / 2) +training_set = data_set[:split] +evaluation_set = data_set[split:] + + +num_classes = max(list(max(d[2]) for d in data_set)).item() + 1 +num_features = len(data_set[0][1][0]) + +print(num_classes, num_features) + +class GNN(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = SAGEConv(num_features, 16, aggr="mean") + self.conv2 = SAGEConv(16, 16, aggr="mean") + self.lin1 = torch.nn.Linear(16, 16) + self.lin2 = torch.nn.Linear(16, num_classes) + + def forward(self, x, edge_list): + x = self.conv1(x, edge_list) + x = F.sigmoid(x) + x = F.dropout(x, p=0.5, training=self.training) + x = self.conv2(x, edge_list) + x = F.sigmoid(x) + x = F.dropout(x, p=0.5, training=self.training) + + x = self.lin1(x) + x = F.sigmoid(x) + x = F.dropout(x, p=0.5, training=self.training) + x= self.lin2(x) + + return x + #return F.softmax(x) + +class NN(torch.nn.Module): + def __init__(self): + super().__init__() + self.l1 = torch.nn.Linear(num_features, 16) + self.l2 = torch.nn.Linear(16, num_classes) + + def forward(self, x): + x = self.l1(x) + x = F.leaky_relu(x) + x = F.dropout(x, p=0.1, training=self.training) + x = self.l2(x) + x = F.leaky_relu(x) + x = F.dropout(x, p=0.1, training=self.training) + + return x + #return F.softmax(x) + +model = GNN().to(device) +#model = NN().to(device) +optimizer = torch.optim.Adam(model.parameters(), lr=0.001) + +for epoch in range(2500): + for edge_index, x, _y in training_set: + optimizer.zero_grad() + + out = model(x, edge_index) + #out = model(x) + + y = torch.eye(num_classes).to(device)[_y].squeeze() + + # print(out) + # print(y) + + loss = F.cross_entropy(out, y) + loss.backward() + optimizer.step() + print("Epoch {} - Loss: {}".format(epoch, loss)) + +# evaluate model: +model.eval() + +with torch.no_grad(): + for edge_index, x, _y in evaluation_set: + out = model(x, edge_index) + #out = model(x) + + y = torch.eye(num_classes).to(device)[_y].squeeze() + + # print(graph[1]) + # print(graph[2]) + print(out) + print(y) + + loss = F.cross_entropy(out, y) + print("Eval Loss: {}".format(loss)) + + +#unload everything hal related +hal_py.plugin_manager.unload_all_plugins() \ No newline at end of file diff --git a/plugins/machine_learning/scripts/ml_testing/minimal_test_benchmarks.py b/plugins/machine_learning/scripts/ml_testing/minimal_test_benchmarks.py new file mode 100644 index 00000000000..48e0c0d0269 --- /dev/null +++ b/plugins/machine_learning/scripts/ml_testing/minimal_test_benchmarks.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +import sys, os +import pathlib + +user_name = os.getlogin() + +# some necessary configuration: +if user_name == "simon": + base_path = "/home/simon/projects/hal/" + benchmarks_base_path = pathlib.Path("/home/simon/projects/benchmarks") +if user_name == "klix": + base_path = "/home/klix/projects/hal" + benchmarks_base_path = pathlib.Path("/home/klix/projects/benchmarks") +else: + print("add base paths for user {}before executing...".format(user_name)) + exit() + +sys.path.append(base_path + "build/lib/") #this is where your hal python lib is located +os.environ["HAL_BASE_PATH"] = base_path + "build" # hal base path +import hal_py + +lm = hal_py.LogManager() +lm.remove_sink_from_default("stdout") + +#initialize HAL +hal_py.plugin_manager.load_all_plugins() + + +netlist_base_paths = benchmarks_base_path / "netlists_preprocessed/yosys/NangateOpenCellLibrary_functional" + +netlist_paths = list() +for netlist_path in netlist_base_paths.glob("**/*.hal"): + # netlist_base_path = netlist_path.parent + # netlist_information_path = netlist_base_path / "netlist_information.json" + # netlist_information = json.load(json_file) + + netlist_paths.append((netlist_path, benchmarks_base_path / "gate_libraries" / "NangateOpenCellLibrary_functional.hgl")) + +from hal_plugins import machine_learning + +import torch +import torch.nn.functional as F +from torch_geometric.nn import GCNConv, SAGEConv + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +print(device) + +def generate_netlist_graph(nl_path, gl_path): + print(nl_path) + netlist = hal_py.NetlistFactory.load_netlist(nl_path, gl_path) + + g = machine_learning.MachineLearning.Graph.test_construct_netlist_graph(netlist) + l = machine_learning.MachineLearning.Graph.test_construct_node_labels(netlist) + + # print(g) + # print(g.node_features) + # print(g.edge_list) + + # print(l) + + edge_index = torch.Tensor(g.edge_list).long().to(device) + x = torch.Tensor(g.node_features).float().to(device) + y = torch.Tensor(l).long().to(device) + + return (edge_index, x, y) + +data_set = list() + +if pathlib.Path("/home/simon/projects/hal/plugins/machine_learning/data/netlist_graphs.pt").exists(): + data_set = torch.load("/home/simon/projects/hal/plugins/machine_learning/data/netlist_graphs.pt") + print("Loaded dataset") +else: + for nl_path, gl_path in netlist_paths[:1]: + #print(nl_path) + edge_index, x, y = generate_netlist_graph(nl_path, gl_path) + + data_set.append((edge_index, x, y)) + + torch.save(data_set, "/home/simon/projects/hal/plugins/machine_learning/data/netlist_graphs_benchmarks.pt") + print("Saved dataset") + +split = int(len(data_set) / 2) +training_set = data_set[:split] +evaluation_set = data_set[split:] + + +#num_classes = max(list(max(d[2]) for d in data_set)).item() + 1 +num_classes = 2 +num_features = len(data_set[0][1][0]) + +print(num_classes, num_features) + +class GNN(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = SAGEConv(num_features, 64, aggr="mean") + self.conv2 = SAGEConv(64, 64, aggr="mean") + self.lin1 = torch.nn.Linear(64, 64) + self.lin2 = torch.nn.Linear(64, num_classes) + + def forward(self, x, edge_list): + x = self.conv1(x, edge_list) + x = F.sigmoid(x) + #x = F.dropout(x, p=0.5, training=self.training) + x = self.conv2(x, edge_list) + x = F.sigmoid(x) + x = F.dropout(x, p=0.5, training=self.training) + + x = self.lin1(x) + x = F.sigmoid(x) + #x = F.dropout(x, p=0.5, training=self.training) + x= self.lin2(x) + + #return x + return F.softmax(x, dim=1) + +class NN(torch.nn.Module): + def __init__(self): + super().__init__() + self.l1 = torch.nn.Linear(num_features, 64) + self.l2 = torch.nn.Linear(64, num_classes) + + def forward(self, x): + x = self.l1(x) + x = F.relu6(x) + #x = F.dropout(x, p=0.1, training=self.training) + x = self.l2(x) + x = F.relu6(x) + #x = F.dropout(x, p=0.1, training=self.training) + + #return x + return F.softmax(x) + +model = GNN().to(device) +#model = NN().to(device) +optimizer = torch.optim.Adam(model.parameters(), lr=0.001) + +for epoch in range(250): + for edge_index, x, _y in training_set: + optimizer.zero_grad() + + out = model(x, edge_index) + #out = model(x) + + y = torch.eye(num_classes).to(device)[_y].squeeze() + + print(out) + print(y) + + loss = F.cross_entropy(out, y) + loss.backward() + optimizer.step() + + if (epoch % 50) == 0 or epoch < 20: + print("Epoch {} - Loss: {}".format(epoch, loss)) + +# evaluate model: +model.eval() + +with torch.no_grad(): + for edge_index, x, _y in evaluation_set: + out = model(x, edge_index) + #out = model(x) + + y = torch.eye(num_classes).to(device)[_y].squeeze() + + # print(graph[1]) + # print(graph[2]) + # print(out) + # print(y) + + loss = F.cross_entropy(out, y) + print("Eval Loss: {}".format(loss)) + + +#unload everything hal related +hal_py.plugin_manager.unload_all_plugins() \ No newline at end of file diff --git a/plugins/machine_learning/scripts/ml_testing/minimal_test_pairs.py b/plugins/machine_learning/scripts/ml_testing/minimal_test_pairs.py new file mode 100644 index 00000000000..0029e25d7b2 --- /dev/null +++ b/plugins/machine_learning/scripts/ml_testing/minimal_test_pairs.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 +import sys, os +import pathlib + +#some necessary configuration: +base_path = "/home/simon/projects/hal/" + +sys.path.append(base_path + "build/lib/") #this is where your hal python lib is located +os.environ["HAL_BASE_PATH"] = base_path + "build" # hal base path +import hal_py + +lm = hal_py.LogManager() +lm.remove_sink_from_default("stdout") + +#initialize HAL +hal_py.plugin_manager.load_all_plugins() + +netlist_paths = [ + (base_path + "examples/uart/uart.v", base_path + "examples/uart/example_library.hgl"), + (base_path + "examples/toy_cipher/toy_cipher.vhd", base_path + "examples/toy_cipher/XILINX_UNISIM.hgl"), + (base_path + "examples/fsm/fsm.v", base_path + "examples/fsm/example_library.hgl"), + (base_path + "examples/simple_alu/simple_alu.vhdl", base_path + "examples/simple_alu/XILINX_UNISIM.hgl"), +] + +from hal_plugins import machine_learning + +import torch +import torch.nn.functional as F +from torch_geometric.nn import GCNConv, SAGEConv + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +print(device) + +data_set = list() + +if pathlib.Path("/home/simon/projects/hal/plugins/machine_learning/data/gate_pairs.pt").exists(): + data_set = torch.load("/home/simon/projects/hal/plugins/machine_learning/data/gate_pairs.pt") + print("Loaded dataset") +else: + for nl_path, gl_path in netlist_paths: + netlist = hal_py.NetlistFactory.load_netlist(nl_path, gl_path) + pairs, labels = machine_learning.MachineLearning.GatePairLabel.test_build_labels(netlist) + features = machine_learning.MachineLearning.GatePairFeature.test_build_feature_vec(netlist, pairs) + + #print(pairs) + #print(features) + #print(labels) + + + # print("------------------ PAIRS -----------------------------") + # for g_a, g_b in pairs: + # print("A: {} B: {}".format(g_a.id, g_b.id)) + # print("------------------------------------------------------") + + x = torch.Tensor(features).float().to(device) + y = torch.Tensor(labels).long().to(device) + + data_set.append((x, y)) + + + #torch.save(data_set, "/home/simon/projects/hal/plugins/machine_learning/data/gate_pairs.pt") + print("Saved dataset") + + +split = int(len(data_set) / 2) +training_set = data_set[:split] +evaluation_set = data_set[split:] + +#num_classes = max(list(max(d[2]) for d in data_set)).item() + 1 +num_classes = 2 +num_features = len(data_set[0][0][0]) + +print("Num classes: {}".format(num_classes)) +print("Num features: {}".format(num_features)) + +class NN(torch.nn.Module): + def __init__(self): + super().__init__() + self.l1 = torch.nn.Linear(num_features, 128) + self.l2 = torch.nn.Linear(128, 128) + self.l3 = torch.nn.Linear(128, 128) + self.l4 = torch.nn.Linear(128, 128) + self.l5 = torch.nn.Linear(128, num_classes) + + def forward(self, x): + x = self.l1(x) + x = F.leaky_relu(x) + x = F.dropout(x, p=0.5, training=self.training) + x = self.l2(x) + x = F.leaky_relu(x) + x = F.dropout(x, p=0.5, training=self.training) + x = self.l3(x) + x = F.leaky_relu(x) + x = F.dropout(x, p=0.5, training=self.training) + x = self.l4(x) + x = F.leaky_relu(x) + x = F.dropout(x, p=0.5, training=self.training) + x = self.l5(x) + # x = F.leaky_relu(x) + # x = F.dropout(x, p=0.5, training=self.training) + + return x + #return F.log_softmax(x) + +model = NN().to(device) +optimizer = torch.optim.Adam(model.parameters(), lr=0.0001) + +for epoch in range(200): + for x, _y in training_set: + optimizer.zero_grad() + + out = model(x) + + y = torch.eye(num_classes).to(device)[_y].squeeze() + + # print(x) + # print(out) + # print(y) + + loss = F.mse_loss(out, y) + loss.backward() + optimizer.step() + + print("Epoch {} - Loss: {}".format(epoch, loss)) + +# evaluate model: +model.eval() + +with torch.no_grad(): + for x, _y in evaluation_set: + out = model(x) + + y = torch.eye(num_classes).to(device)[_y].squeeze() + + torch.set_printoptions(profile="full") + print(x) + print(y) + torch.set_printoptions(profile="default") # reset + # print(out) + + loss = F.mse_loss(out, y) + print("Eval Loss: {}".format(loss)) + + +#unload everything hal related +hal_py.plugin_manager.unload_all_plugins() \ No newline at end of file diff --git a/plugins/machine_learning/scripts/ml_testing/minimal_test_pairs_benchmarks.py b/plugins/machine_learning/scripts/ml_testing/minimal_test_pairs_benchmarks.py new file mode 100644 index 00000000000..ee6fcf7f36e --- /dev/null +++ b/plugins/machine_learning/scripts/ml_testing/minimal_test_pairs_benchmarks.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python3 +import sys, os +import pathlib + +#some necessary configuration: +base_path = "/home/simon/projects/hal/" + +sys.path.append(base_path + "build/lib/") #this is where your hal python lib is located +os.environ["HAL_BASE_PATH"] = base_path + "build" # hal base path +import hal_py + +lm = hal_py.LogManager() +lm.remove_sink_from_default("stdout") + +#initialize HAL +hal_py.plugin_manager.load_all_plugins() + +benchmarks_base_path = pathlib.Path("/home/simon/projects/benchmarks") +netlist_base_paths = benchmarks_base_path / "netlists_preprocessed/yosys/NangateOpenCellLibrary_functional" + +netlist_paths = list() +for netlist_path in netlist_base_paths.glob("**/*.hal"): + # netlist_base_path = netlist_path.parent + # netlist_information_path = netlist_base_path / "netlist_information.json" + # netlist_information = json.load(json_file) + + netlist_paths.append((netlist_path, benchmarks_base_path / "gate_libraries" / "NangateOpenCellLibrary_functional.hgl")) + +from hal_plugins import machine_learning + +import torch +import torch.nn.functional as F + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +print(device) + +data_set = list() + +if pathlib.Path("/home/simon/projects/hal/plugins/machine_learning/data/gate_pairs.pt").exists(): + data_set = torch.load("/home/simon/projects/hal/plugins/machine_learning/data/gate_pairs.pt") + print("Loaded dataset") +else: + total_pairs = 0 + for nl_path, gl_path in netlist_paths[:2]: + #print(nl_path) + + netlist = hal_py.NetlistFactory.load_netlist(nl_path, gl_path) + pairs, labels = machine_learning.MachineLearning.GatePairLabel.test_build_labels(netlist) + + total_pairs += len(pairs) + print("Found {} pairs!".format(len(pairs))) + + features = machine_learning.MachineLearning.GatePairFeature.test_build_feature_vec(netlist, pairs) + + #print(pairs) + #print(features) + #print(labels) + + + # print("------------------ PAIRS -----------------------------") + # for g_a, g_b in pairs: + # print("A: {} B: {}".format(g_a.id, g_b.id)) + # print("------------------------------------------------------") + + x = torch.Tensor(features).float() + y = torch.Tensor(labels).long() + data_set.append((x, y)) + + # torch.save(data_set, "/home/simon/projects/hal/plugins/machine_learning/data/gate_pairs_benchmarks.pt") + print("Found a total of {:.2} Billion pairs!".format(total_pairs / 1_000_000_000)) + print("Saved dataset") + + +split = int(len(data_set) + 1 / 2) +training_set = data_set[:split] +evaluation_set = data_set[split:] + +#num_classes = max(list(max(d[2]) for d in data_set)).item() + 1 +num_classes = 2 +num_features = len(data_set[0][0][0]) + +print("Num classes: {}".format(num_classes)) +print("Num features: {}".format(num_features)) + +class NN(torch.nn.Module): + def __init__(self): + super().__init__() + self.l1 = torch.nn.Linear(num_features, 128) + self.l2 = torch.nn.Linear(128, 128) + self.l3 = torch.nn.Linear(128, 128) + self.l4 = torch.nn.Linear(128, 128) + self.l5 = torch.nn.Linear(128, num_classes) + + def forward(self, x): + x = self.l1(x) + x = F.leaky_relu(x) + x = F.dropout(x, p=0.5, training=self.training) + x = self.l2(x) + x = F.leaky_relu(x) + x = F.dropout(x, p=0.5, training=self.training) + x = self.l3(x) + x = F.leaky_relu(x) + x = F.dropout(x, p=0.5, training=self.training) + x = self.l4(x) + x = F.leaky_relu(x) + x = F.dropout(x, p=0.5, training=self.training) + x = self.l5(x) + # x = F.leaky_relu(x) + # x = F.dropout(x, p=0.5, training=self.training) + + return x + #return F.log_softmax(x) + +model = NN().to(device) +optimizer = torch.optim.Adam(model.parameters(), lr=0.0001) + +for epoch in range(2000): + for x, _y in training_set: + optimizer.zero_grad() + + out = model(x.to(device)) + + y = torch.eye(num_classes).to(device)[_y.to(device)].squeeze() + + # print(x) + # print(out) + # print(y) + + loss = F.mse_loss(out, y) + loss.backward() + optimizer.step() + + print("Epoch {} - Loss: {}".format(epoch, loss)) + +# evaluate model: +model.eval() + +with torch.no_grad(): + for x, _y in evaluation_set: + out = model(x) + + y = torch.eye(num_classes).to(device)[_y].squeeze() + + torch.set_printoptions(profile="full") + print(x) + print(y) + torch.set_printoptions(profile="default") # reset + # print(out) + + loss = F.mse_loss(out, y) + print("Eval Loss: {}".format(loss)) + + +#unload everything hal related +hal_py.plugin_manager.unload_all_plugins() \ No newline at end of file diff --git a/plugins/machine_learning/scripts/ml_testing/test_noise_resistance.py b/plugins/machine_learning/scripts/ml_testing/test_noise_resistance.py new file mode 100644 index 00000000000..a3b98f388da --- /dev/null +++ b/plugins/machine_learning/scripts/ml_testing/test_noise_resistance.py @@ -0,0 +1,70 @@ +import torch + +samples = 128 +noise_dimension = 1024 + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +a = torch.randint(2, (samples,), device=device) +b = torch.randint(2, (samples,), device=device) + +c = a & b + +#n = torch.randint(2, (samples, noise_dimension)) +n = torch.randint(2, (samples, noise_dimension), device=device) + +x = torch.stack([a, b], dim=1, out=None) +x = torch.cat((n, x), 1) +y = c.unsqueeze(1) + +# print(a) +# print(b) +# print(c) +# print(n) +# print(x) +# print(y) + +num_classes = 1 +num_features = 2 + noise_dimension + +h = 4 + +import torch.nn.functional as F + + +class NN(torch.nn.Module): + def __init__(self): + super().__init__() + self.l1 = torch.nn.Linear(num_features, h) + self.l2 = torch.nn.Linear(h, num_classes) + #self.l3 = torch.nn.Linear(h, num_classes) + + def forward(self, x): + x = self.l1(x) + x = F.relu6(x) + #x = F.dropout(x, p=0.1, training=self.training) + x = self.l2(x) + x = F.relu6(x) + #x = F.dropout(x, p=0.1, training=self.training) + # x = self.l3(x) + # x = F.relu(x) + + return x + +model = NN().to(device) +optimizer = torch.optim.Adam(model.parameters(), lr=0.001) + +for epoch in range(10000): + optimizer.zero_grad() + + out = model(x.float()) + + # print(out) + # print(y) + + loss = F.mse_loss(out, y.float()) + loss.backward() + optimizer.step() + + if (epoch % 50) == 0: + print("Epoch {} - Loss: {}".format(epoch, loss)) \ No newline at end of file diff --git a/plugins/machine_learning/scripts/ml_testing/test_normalization.py b/plugins/machine_learning/scripts/ml_testing/test_normalization.py new file mode 100644 index 00000000000..80550a64066 --- /dev/null +++ b/plugins/machine_learning/scripts/ml_testing/test_normalization.py @@ -0,0 +1,13 @@ +import torch + +t = torch.tensor([1, 1, 1, 1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 8], dtype=torch.float32) + +t_mean = torch.mean(t) +t_var = torch.var(t) + +print(t_mean) +print(t_var) + +t_norm = (t - t_mean) / torch.sqrt(t_var) + +print(t_norm) \ No newline at end of file diff --git a/plugins/machine_learning/src/features/gate_feature.cpp b/plugins/machine_learning/src/features/gate_feature.cpp new file mode 100644 index 00000000000..8372e10dbf3 --- /dev/null +++ b/plugins/machine_learning/src/features/gate_feature.cpp @@ -0,0 +1,309 @@ +#include "machine_learning/features/gate_feature.h" + +#include "hal_core/defines.h" +#include "hal_core/netlist/decorators/netlist_abstraction_decorator.h" +#include "hal_core/netlist/decorators/netlist_traversal_decorator.h" +#include "hal_core/netlist/gate.h" +#include "hal_core/netlist/net.h" +#include "hal_core/netlist/netlist.h" + +#include + +#define MAX_DISTANCE 255 + +namespace hal +{ + namespace machine_learning + { + namespace gate_feature + { + const NetlistAbstraction& FeatureContext::get_sequential_abstraction() + { + if (!m_seqential_abstraction.has_value()) + { + const auto seq_gates = nl->get_gates([](const auto* g) { return g->get_type()->has_property(GateTypeProperty::sequential); }); + + const std::vector forbidden_pins = { + PinType::clock, /*PinType::done, PinType::error, PinType::error_detection,*/ /*PinType::none,*/ PinType::ground, PinType::power /*, PinType::status*/}; + + const auto endpoint_filter = [forbidden_pins](const auto* ep, const auto& _d) { + UNUSED(_d); + return std::find(forbidden_pins.begin(), forbidden_pins.end(), ep->get_pin()->get_type()) == forbidden_pins.end(); + }; + + m_seqential_abstraction = NetlistAbstraction(nl, seq_gates, true, endpoint_filter, endpoint_filter); + + // TODO remove debug print + std::cout << "Built abstraction" << std::endl; + } + + return m_seqential_abstraction.value(); + } + + const std::vector& FeatureContext::get_possible_gate_type_properties() + { + if (!m_possible_gate_type_properties.has_value()) + { + std::set properties; + + // for (const auto& [_name, gt] : nl->get_gate_library()->get_gate_types()) + // { + // properties.insert(gt->get_properties().begin(), gt->get_properties().end()); + // } + + for (auto& [gtp, _name] : EnumStrings::data) + { + UNUSED(_name); + properties.insert(gtp); + } + + auto properties_vec = utils::to_vector(properties); + // sort alphabetically + std::sort(properties_vec.begin(), properties_vec.end(), [](const auto& a, const auto& b) { return enum_to_string(a) < enum_to_string(b); }); + m_possible_gate_type_properties = properties_vec; + } + + return m_possible_gate_type_properties.value(); + } + + std::vector ConnectedGlobalIOs::calculate_feature(FeatureContext& fc, const Gate* g) const + { + UNUSED(fc); + + u32 connected_global_inputs = 0; + u32 connected_global_outputs = 0; + + for (const auto& in : g->get_fan_in_nets()) + { + if (in->is_global_input_net()) + { + connected_global_inputs += 1; + } + } + + for (const auto& out : g->get_fan_out_nets()) + { + if (out->is_global_output_net()) + { + connected_global_outputs += 1; + } + } + + return {connected_global_inputs, connected_global_outputs}; + } + + std::string ConnectedGlobalIOs::get_name() const + { + return "ConnectedGlobalIOs"; + } + + std::vector DistanceGlobalIO::calculate_feature(FeatureContext& fc, const Gate* g) const + { + // necessary workaround to please compiler + const auto& direction = m_direction; + const auto distance = NetlistTraversalDecorator(*fc.nl).get_shortest_path_distance( + g, [direction](const auto* ep) { return (direction == PinDirection::output) ? ep->get_net()->is_global_output_net() : ep->get_net()->is_global_input_net(); }, m_direction); + + if (distance.is_error()) + { + log_error("machine_learning", "{}", distance.get_error().get()); + } + + if (!distance.get().has_value()) + { + return {MAX_DISTANCE}; + } + + return {std::min(distance.get().value(), u32(MAX_DISTANCE))}; + } + + std::string DistanceGlobalIO::get_name() const + { + return "DistanceGlobalIO"; + } + + std::vector SequentialDistanceGlobalIO::calculate_feature(FeatureContext& fc, const Gate* g) const + { + // necessary workaround to please compiler + const auto& direction = m_direction; + const auto distance = NetlistAbstractionDecorator(fc.get_sequential_abstraction()) + .get_shortest_path_distance( + g, + [direction](const auto* ep, const auto& nla) { + const auto global_io_connections = (direction == PinDirection::output) ? nla.get_global_output_successors(ep) : nla.get_global_input_predecessors(ep); + return !global_io_connections.empty(); + }, + m_direction); + + if (distance.is_error()) + { + log_error("machine_learning", "{}", distance.get_error().get()); + } + + if (!distance.get().has_value()) + { + return {MAX_DISTANCE}; + } + + return {std::min(distance.get().value(), u32(MAX_DISTANCE))}; + } + + std::string SequentialDistanceGlobalIO::get_name() const + { + return "SequentialDistanceGlobalIO"; + } + + std::vector IODegrees::calculate_feature(FeatureContext& fc, const Gate* g) const + { + u32 input_io_degree = g->get_fan_in_nets().size(); + u32 output_io_degree = g->get_fan_out_nets().size(); + + return {input_io_degree, output_io_degree}; + } + + std::string IODegrees::get_name() const + { + return "IODegrees"; + } + + std::vector GateTypeOneHot::calculate_feature(FeatureContext& fc, const Gate* g) const + { + const auto& all_properties = fc.get_possible_gate_type_properties(); + + // TODO remove debug print + // std::cout << "Got following gate type properties: " << std::endl; + // for (const auto& gtp : all_properties) + // { + // std::cout << enum_to_string(gtp) << std::endl; + // } + + std::vector feature = std::vector(all_properties.size(), 0); + + for (const auto& gtp : g->get_type()->get_properties()) + { + const u32 index = std::distance(all_properties.begin(), std::find(all_properties.begin(), all_properties.end(), gtp)); + feature.at(index) += 1; + } + + return feature; + } + + std::string GateTypeOneHot::get_name() const + { + return "GateTypeOneHot"; + } + + std::vector NeighboringGateTypes::calculate_feature(FeatureContext& fc, const Gate* g) const + { + const auto& all_properties = fc.get_possible_gate_type_properties(); + + std::vector feature = std::vector(all_properties.size(), 0); + + std::set neighborhood; + + // fix to make compiler happpy + const auto depth = m_depth; + + if (m_direction == PinDirection::input || m_direction == PinDirection::inout) + { + const auto in_gates = NetlistTraversalDecorator(*fc.nl).get_next_matching_gates_until( + g, + false, + [](const auto* g) { return true; }, + true, + [depth](const auto* _ep, const auto current_depth) { + UNUSED(_ep); + return current_depth <= depth; + }, + nullptr); + + if (in_gates.is_error()) + { + log_error("machine_learining", "cannot build NeighboringGateTypes feature: {}", in_gates.get_error().get()); + } + + neighborhood.insert(in_gates.get().begin(), in_gates.get().end()); + } + + if (m_direction == PinDirection::output || m_direction == PinDirection::inout) + { + const auto in_gates = NetlistTraversalDecorator(*fc.nl).get_next_matching_gates_until( + g, + true, + [](const auto* g) { return true; }, + true, + [depth](const auto* _ep, const auto current_depth) { + UNUSED(_ep); + return current_depth <= depth; + }, + nullptr); + + if (in_gates.is_error()) + { + log_error("machine_learining", "cannot build NeighboringGateTypes feature: {}", in_gates.get_error().get()); + } + + neighborhood.insert(in_gates.get().begin(), in_gates.get().end()); + } + + for (const auto& gn : neighborhood) + { + for (const auto& gtp : gn->get_type()->get_properties()) + { + const u32 index = std::distance(all_properties.begin(), std::find(all_properties.begin(), all_properties.end(), gtp)); + feature.at(index) += 1; + } + } + + return feature; + } + + std::string NeighboringGateTypes::get_name() const + { + return "NeighboringGateTypes"; + } + + std::vector build_feature_vec(const std::vector& features, const Gate* g) + { + FeatureContext fc(g->get_netlist()); + return build_feature_vec(features, g); + } + + std::vector build_feature_vec(FeatureContext& fc, const std::vector& features, const Gate* g) + { + std::vector feature_vec; + + for (const auto& gf : features) + { + const auto new_features = gf->calculate_feature(fc, g); + feature_vec.insert(feature_vec.end(), new_features.begin(), new_features.end()); + } + + return feature_vec; + } + + std::vector> build_feature_vecs(const std::vector& features, const std::vector& gates) + { + if (gates.empty()) + { + return {}; + } + + FeatureContext fc(gates.front()->get_netlist()); + return build_feature_vecs(features, gates); + } + + std::vector> build_feature_vecs(FeatureContext& fc, const std::vector& features, const std::vector& gates) + { + std::vector> feature_vecs; + + for (const auto& g : gates) + { + feature_vecs.push_back(build_feature_vec(fc, features, g)); + } + + return feature_vecs; + } + } // namespace gate_feature + } // namespace machine_learning +} // namespace hal \ No newline at end of file diff --git a/plugins/machine_learning/src/features/gate_pair_feature.cpp b/plugins/machine_learning/src/features/gate_pair_feature.cpp new file mode 100644 index 00000000000..050a07d7c0f --- /dev/null +++ b/plugins/machine_learning/src/features/gate_pair_feature.cpp @@ -0,0 +1,391 @@ +#include "machine_learning/features/gate_pair_feature.h" + +#include "hal_core/netlist/decorators/netlist_abstraction_decorator.h" +#include "hal_core/netlist/decorators/netlist_traversal_decorator.h" +#include "hal_core/netlist/gate.h" +#include "hal_core/netlist/netlist.h" +#include "hal_core/utilities/log.h" + +#define MAX_DISTANCE 255 + +namespace hal +{ + namespace machine_learning + { + namespace gate_pair_feature + { + const NetlistAbstraction& FeatureContext::get_sequential_abstraction() + { + if (!m_seqential_abstraction.has_value()) + { + const auto seq_gates = nl->get_gates([](const auto* g) { return g->get_type()->has_property(GateTypeProperty::sequential); }); + + const std::vector forbidden_pins = { + PinType::clock, /*PinType::done, PinType::error, PinType::error_detection,*/ /*PinType::none,*/ PinType::ground, PinType::power /*, PinType::status*/}; + + const auto endpoint_filter = [forbidden_pins](const auto* ep, const auto& _d) { + UNUSED(_d); + return std::find(forbidden_pins.begin(), forbidden_pins.end(), ep->get_pin()->get_type()) == forbidden_pins.end(); + }; + + m_seqential_abstraction = NetlistAbstraction(nl, seq_gates, true, endpoint_filter, endpoint_filter); + } + + return m_seqential_abstraction.value(); + } + + std::vector LogicalDistance::calculate_feature(FeatureContext& fc, const Gate* g_a, const Gate* g_b) const + { + if (g_a == g_b) + { + return {0}; + } + + const auto res = NetlistTraversalDecorator(*fc.nl).get_shortest_path_distance(g_a, g_b, m_direction); + if (res.is_error()) + { + log_error("machine_learning", "failed to calculate shortest path feature: {}", res.get_error().get()); + return {}; + } + + const auto shortest_path = res.get(); + + if (!shortest_path.has_value()) + { + return {MAX_DISTANCE}; + } + + if (shortest_path.value() < 1) + { + log_error("machine_learning", "Found shortest path with no components, this is unexpected!"); + } + + const u32 distance = shortest_path.value(); + return {std::min(distance, u32(MAX_DISTANCE))}; + }; + + std::string LogicalDistance::get_name() const + { + return "LogicalDistance"; + } + + std::vector SequentialDistance::calculate_feature(FeatureContext& fc, const Gate* g_a, const Gate* g_b) const + { + if (g_a == g_b) + { + return {0}; + } + + const auto res = NetlistAbstractionDecorator(fc.get_sequential_abstraction()).get_shortest_path_distance(g_a, g_b, m_direction); + if (res.is_error()) + { + log_error("machine_learning", "failed to calculate shortest path feature: {}", res.get_error().get()); + return {}; + } + + const auto shortest_path = res.get(); + + if (!shortest_path.has_value()) + { + return {MAX_DISTANCE}; + } + + if (shortest_path.value() <= 1) + { + log_error("machine_learning", "Found shortest path with only one component, this is unexpected!"); + } + + const u32 distance = shortest_path.value(); + return {std::min(distance, u32(MAX_DISTANCE))}; + }; + + std::string SequentialDistance::get_name() const + { + return "SequentialDistance"; + } + + std::vector PhysicalDistance::calculate_feature(FeatureContext& fc, const Gate* g_a, const Gate* g_b) const + { + log_error("machine_learning", "Physical distance currently not implemented as gate pair feature."); + + return {}; + }; + + std::string PhysicalDistance::get_name() const + { + return "PhysicalDistance"; + } + + std::vector SharedControlSignals::calculate_feature(FeatureContext& fc, const Gate* g_a, const Gate* g_b) const + { + static const std::vector ctrl_pin_types = { + PinType::clock, + PinType::enable, + PinType::reset, + PinType::set, + }; + + std::set nets_a; + std::set nets_b; + + for (const auto& t : ctrl_pin_types) + { + for (const auto& pin : g_a->get_type()->get_pins([t](const GatePin* p) { return p->get_type() == t; })) + { + nets_a.insert(g_a->get_fan_in_net(pin)); + } + + for (const auto& pin : g_b->get_type()->get_pins([t](const GatePin* p) { return p->get_type() == t; })) + { + nets_b.insert(g_b->get_fan_in_net(pin)); + } + } + + std::set shared; + std::set_intersection(nets_a.begin(), nets_a.end(), nets_b.begin(), nets_b.end(), std::inserter(shared, shared.begin())); + + return {u32(shared.size())}; + }; + + std::string SharedControlSignals::get_name() const + { + return "SharedControlSignals"; + } + + std::vector SharedSequentialNeighbors::calculate_feature(FeatureContext& fc, const Gate* g_a, const Gate* g_b) const + { + // TODO add caching + std::unordered_map>* cache; + + const auto get_n_next_sequential_gates = [&fc, &cache](const Gate* g, const bool direction, const u32 depth) -> Result> { + std::set total_neighbors; + + std::vector current_neighbors = {g}; + std::vector next_neighbors; + + for (u32 i = 0; i < depth; i++) + { + u32 prev_size = total_neighbors.size(); + + for (const auto& g : current_neighbors) + { + const auto& res = NetlistTraversalDecorator(*fc.nl).get_next_sequential_gates(g, direction); + if (res.is_error()) + { + return res; + } + total_neighbors.insert(res.get().begin(), res.get().end()); + next_neighbors.insert(next_neighbors.end(), res.get().begin(), res.get().end()); + } + + if (prev_size == total_neighbors.size()) + { + break; + } + + current_neighbors = next_neighbors; + next_neighbors.clear(); + } + + return OK(total_neighbors); + }; + + std::set neighbors_a = g_a->get_type()->has_property(GateTypeProperty::sequential) ? std::set{g_a} : std::set{}; + std::set neighbors_b = g_b->get_type()->has_property(GateTypeProperty::sequential) ? std::set{g_b} : std::set{}; + + if (m_direction == PinDirection::output) + { + const auto res_a = get_n_next_sequential_gates(g_a, true, m_depth); + if (res_a.is_error()) + { + log_error("machine_learning", "cannot calculate feature {}: ", this->get_name()); + return {}; + } + + for (const auto g_n : res_a.get()) + { + neighbors_a.insert(g_n); + } + + const auto res_b = get_n_next_sequential_gates(g_b, true, m_depth); + if (res_b.is_error()) + { + log_error("machine_learning", "cannot calculate feature {}: ", this->get_name()); + return {}; + } + + for (const auto g_n : res_b.get()) + { + neighbors_b.insert(g_n); + } + } + + if (m_direction == PinDirection::input) + { + const auto res_a = get_n_next_sequential_gates(g_a, false, m_depth); + if (res_a.is_error()) + { + log_error("machine_learning", "cannot calculate feature {}: ", this->get_name()); + return {}; + } + + for (const auto g_n : res_a.get()) + { + neighbors_a.insert(g_n); + } + + const auto res_b = get_n_next_sequential_gates(g_b, false, m_depth); + if (res_b.is_error()) + { + log_error("machine_learning", "cannot calculate feature {}: ", this->get_name()); + return {}; + } + + for (const auto g_n : res_b.get()) + { + neighbors_b.insert(g_n); + } + } + + if (m_direction == PinDirection::inout) + { + // NOTE this is either trivial by just combining both directions or more complex by building a real undirected graph (predecessors of a successor would also be neighbors eventhough the direction siwtches) + log_error("machine_learning", "SharedSequentialNeighbors bidirectional feature not implemented yet"); + } + + std::set shared; + std::set_intersection(neighbors_a.begin(), neighbors_a.end(), neighbors_b.begin(), neighbors_b.end(), std::inserter(shared, shared.begin())); + + return {u32(shared.size())}; + }; + + std::string SharedSequentialNeighbors::get_name() const + { + return "SharedSequentialNeighbors"; + } + + std::vector SharedNeighbors::calculate_feature(FeatureContext& fc, const Gate* g_a, const Gate* g_b) const + { + std::set neighbors_a = {g_a}; + std::set neighbors_b = {g_b}; + + if (m_direction == PinDirection::output || m_direction == PinDirection::input) + { + const bool search_successors = m_direction == PinDirection::output; + + auto subgraph_a_res = NetlistTraversalDecorator(*fc.nl).get_next_matching_gates_until_depth(g_a, search_successors, m_depth); + auto subgraph_b_res = NetlistTraversalDecorator(*fc.nl).get_next_matching_gates_until_depth(g_b, search_successors, m_depth); + + if (subgraph_a_res.is_error()) + { + log_error( + "machine_learning", "cannot calculate shared neighbors feature: failed to build subgraph of depth {} for gate {} with ID {}", m_depth, g_a->get_name(), g_a->get_id()); + return {}; + } + + if (subgraph_b_res.is_error()) + { + log_error( + "machine_learning", "cannot calculate shared neighbors feature: failed to build subgraph of depth {} for gate {} with ID {}", m_depth, g_b->get_name(), g_b->get_id()); + return {}; + } + + for (const auto g_n : subgraph_a_res.get()) + { + neighbors_a.insert(g_n); + } + + for (const auto g_n : subgraph_b_res.get()) + { + neighbors_b.insert(g_n); + } + + // TODO remove debug print + std::cout << "Subgraph A: " << std::endl; + for (const auto& g_a : neighbors_a) + { + std::cout << "\t" << g_a->get_id() << " / " << g_a->get_name() << std::endl; + } + + std::cout << "Subgraph B: " << std::endl; + for (const auto& g_b : neighbors_b) + { + std::cout << "\t" << g_b->get_id() << " / " << g_b->get_name() << std::endl; + } + } + + if (m_direction == PinDirection::inout) + { + // NOTE this is either trivial by just combining both directions or more complex by building a real undirected graph (predecessors of a successor would also be neighbors eventhough the direction switches) + log_error("machine_learning", "SharedNeighbors bidirectional feature not implemented yet"); + } + + std::set shared; + std::set_intersection(neighbors_a.begin(), neighbors_a.end(), neighbors_b.begin(), neighbors_b.end(), std::inserter(shared, shared.begin())); + + return {u32(shared.size())}; + }; + + std::string SharedNeighbors::get_name() const + { + return "SharedNeighbors"; + } + + std::vector build_feature_vec(const std::vector& features, const Gate* g_a, const Gate* g_b) + { + FeatureContext fc(g_a->get_netlist()); + return build_feature_vec(fc, features, g_a, g_b); + } + + std::vector build_feature_vec(FeatureContext& fc, const std::vector& features, const Gate* g_a, const Gate* g_b) + { + std::vector feature_vec; + + for (const auto& gf : features) + { + // TODO remove + // std::cout << "Calculating feature " << gf->get_name() << " for g_a " << g_a->get_id() << " and g_b " << g_b->get_id() << std::endl; + + const auto new_features = gf->calculate_feature(fc, g_a, g_b); + feature_vec.insert(feature_vec.end(), new_features.begin(), new_features.end()); + } + + return feature_vec; + } + + std::vector build_feature_vec(const std::vector& features, const std::pair& gate_pair) + { + return build_feature_vec(features, gate_pair.first, gate_pair.second); + } + + std::vector build_feature_vec(FeatureContext& fc, const std::vector& features, const std::pair& gate_pair) + { + return build_feature_vec(fc, features, gate_pair.first, gate_pair.second); + } + + std::vector> build_feature_vecs(const std::vector& features, const std::vector>& gate_pairs) + { + if (gate_pairs.empty()) + { + return {}; + } + + FeatureContext fc(gate_pairs.front().first->get_netlist()); + return build_feature_vecs(fc, features, gate_pairs); + } + + std::vector> + build_feature_vecs(FeatureContext& fc, const std::vector& features, const std::vector>& gate_pairs) + { + std::vector> feature_vecs; + + for (const auto& gp : gate_pairs) + { + feature_vecs.push_back(build_feature_vec(fc, features, gp)); + } + + return feature_vecs; + } + } // namespace gate_pair_feature + } // namespace machine_learning +} // namespace hal \ No newline at end of file diff --git a/plugins/machine_learning/src/graph_neural_network.cpp b/plugins/machine_learning/src/graph_neural_network.cpp new file mode 100644 index 00000000000..806bb6c4241 --- /dev/null +++ b/plugins/machine_learning/src/graph_neural_network.cpp @@ -0,0 +1,97 @@ +#include "machine_learning/graph_neural_network.h" + +#include "hal_core/defines.h" +#include "hal_core/netlist/gate.h" +#include "hal_core/netlist/netlist.h" +#include "hal_core/utilities/utils.h" +#include "machine_learning/features/gate_feature.h" + +#include + +namespace hal +{ + namespace machine_learning + { + namespace graph + { + NetlistGraph construct_netlist_graph(const Netlist* nl, const std::vector& gates, const GraphDirection& dir) + { + std::unordered_map gate_to_idx; + // init gate to index mapping + for (u32 g_idx = 0; g_idx < gates.size(); g_idx++) + { + const Gate* g = gates.at(g_idx); + gate_to_idx.insert({g, g_idx}); + } + + // edge list + std::vector sources; + std::vector destinations; + + for (const auto& g : gates) + { + const u32 g_idx = gate_to_idx.at(g); + if (dir == GraphDirection::directed_backward) + { + for (const auto& pre : g->get_unique_predecessors()) + { + sources.push_back(gate_to_idx.at(pre)); + destinations.push_back(g_idx); + } + } + + if (dir == GraphDirection::directed_forward) + { + for (const auto& suc : g->get_unique_successors()) + { + sources.push_back(g_idx); + destinations.push_back(gate_to_idx.at(suc)); + } + } + + if (dir == GraphDirection::bidirectional) + { + for (const auto& suc : g->get_unique_successors()) + { + sources.push_back(g_idx); + destinations.push_back(gate_to_idx.at(suc)); + + sources.push_back(gate_to_idx.at(suc)); + destinations.push_back(g_idx); + } + } + } + + return {{sources, destinations}, dir}; + } + + void annotate_netlist_graph(Netlist* nl, const std::vector& gates, const NetlistGraph& nlg, const std::vector>& node_features) + { + for (u32 g_idx = 0; g_idx < gates.size(); g_idx++) + { + gates.at(g_idx)->set_data("netlist_graph", "gate_index", "string", std::to_string(g_idx)); + + const auto feature_vec = node_features.at(g_idx); + const auto feature_str = utils::join(", ", feature_vec.begin(), feature_vec.end(), [](const u32 u) { return std::to_string(u); }); + + gates.at(g_idx)->set_data("netlist_graph", "features", "string", feature_str); + } + + std::unordered_map> edges; + for (u32 edge_idx = 0; edge_idx < nlg.edge_list.first.size(); edge_idx++) + { + const auto src = nlg.edge_list.first.at(edge_idx); + const auto dst = nlg.edge_list.second.at(edge_idx); + + edges[src].push_back(dst); + } + + for (const auto [src, dsts] : edges) + { + const auto vec_str = utils::join(", ", dsts.begin(), dsts.end(), [](const u32 u) { return std::to_string(u); }); + gates.at(src)->set_data("netlist_graph", "destinations", "string", vec_str); + } + } + } // namespace graph + } // namespace machine_learning +} // namespace hal \ No newline at end of file diff --git a/plugins/machine_learning/src/labels/gate_pair_label.cpp b/plugins/machine_learning/src/labels/gate_pair_label.cpp new file mode 100644 index 00000000000..271fd10dca0 --- /dev/null +++ b/plugins/machine_learning/src/labels/gate_pair_label.cpp @@ -0,0 +1,394 @@ +#include "machine_learning/labels/gate_pair_label.h" + +#include "hal_core/netlist/gate.h" +#include "hal_core/netlist/netlist.h" +#include "hal_core/utilities/log.h" +#include "nlohmann_json/json.hpp" + +#include + +namespace hal +{ + namespace machine_learning + { + namespace gate_pair_label + { + namespace + { + MultiBitInformation calculate_multi_bit_information(const std::vector& gates) + { + std::map, std::set>> word_to_gates_unsorted; + + for (const auto g : gates) + { + if (!g->has_data("preprocessing_information", "multi_bit_indexed_identifiers")) + { + log_error("machine_learning", "unable to find indexed identifiers for gate with ID {}", g->get_id()); + continue; + } + + const std::string json_string = std::get<1>(g->get_data("preprocessing_information", "multi_bit_indexed_identifiers")); + + // TODO remove + // std::cout << "Trying to parse string: " << json_string << std::endl; + + // TODO catch exceptions and return result + const nlohmann::json j = nlohmann::json::parse(json_string); + const std::vector> index_information = j; + + // TODO remove + // if (!index_information.empty()) + // { + // std::cout << "For gate " << g->get_id() << " found " << std::get<0>(index_information.front()) << " - " << std::get<1>(index_information.front()) << std::endl; + // } + + for (const auto& [name, index, _origin, direction] : index_information) + { + word_to_gates_unsorted[{name, direction}].insert({index, g}); + } + } + + // 1. Sort out words with the same name by checking whether they contain duplicate indices + // 2. Dedupe all words by only keeping one word/name_direction for each multi_bit_signal/vector of gates. + std::map, std::pair> gates_to_word; + + for (const auto& [name_direction, word] : word_to_gates_unsorted) + { + std::set indices; + std::vector gates; + + // TODO remove + // std::cout << "Order Word: " << std::endl; + for (const auto& [index, gate] : word) + { + // TODO remove + // std::cout << index << std::endl; + + indices.insert(index); + gates.push_back(gate); + } + + // sanity check + if (indices.size() != word.size()) + { + // TODO return result + log_error("machine_learning", "Found index double in word {}-{}!", name_direction.first, name_direction.second); + + // TODO remove + std::cout << "Insane Word: " << std::endl; + for (const auto& [index, gate] : word) + { + std::cout << index << ": " << gate->get_id() << std::endl; + } + + continue; + } + + if (const auto it = gates_to_word.find(gates); it == gates_to_word.end()) + { + gates_to_word.insert({gates, name_direction}); + } + // NOTE could think about a priorization of shorter names or something similar + // else + } + + MultiBitInformation mbi; + + for (const auto& [gates, name_direction] : gates_to_word) + { + mbi.word_to_gates[name_direction] = gates; + for (const auto g : gates) + { + mbi.gate_to_words[g].push_back(name_direction); + } + } + + return mbi; + } + } // namespace + + const MultiBitInformation& LabelContext::get_multi_bit_information() + { + if (!mbi.has_value()) + { + mbi = calculate_multi_bit_information(gates); + } + + return mbi.value(); + } + + std::vector> SharedSignalGroup::calculate_gate_pairs(LabelContext& lc, const Netlist* nl, const std::vector& gates) const + { + const auto& mbi = lc.get_multi_bit_information(); + + std::vector> pairs; + + for (const auto& g : gates) + { + // positive labels + std::unordered_set pos_gates; + if (mbi.gate_to_words.find(g) == mbi.gate_to_words.end()) + { + // gate is only in a group with itself + pairs.push_back({g, g}); + pos_gates.insert(g); + } + else + { + // add all gates that are part of at least one other signal group as positive pair + for (const auto& name_direction : mbi.gate_to_words.at(g)) + { + const auto& gates = mbi.word_to_gates.at(name_direction); + for (const auto g_i : gates) + { + pairs.push_back({g, g_i}); + pos_gates.insert(g); + } + } + } + + // negative labels (equal amount to the positive labels) + const u64 pos_count = pos_gates.size(); + const u64 neg_count = std::min(gates.size() - pos_count, pos_count); + + std::set chosen_gates; + for (u32 i = 0; i < neg_count; i++) + { + const u32 start = std::rand() % lc.nl->get_gates().size(); + for (u32 idx = start; idx < start + lc.nl->get_gates().size(); idx++) + { + const auto g_i = lc.nl->get_gates().at(idx % lc.nl->get_gates().size()); + if (pos_gates.find(g_i) == pos_gates.end() && chosen_gates.find(g_i) == chosen_gates.end()) + { + pairs.push_back({g, g_i}); + chosen_gates.insert(g_i); + break; + } + } + } + } + + return pairs; + }; + + std::vector SharedSignalGroup::calculate_label(LabelContext& lc, const Gate* g_a, const Gate* g_b) const + { + const auto& mbi = lc.get_multi_bit_information(); + const auto& words_a = mbi.gate_to_words.at(g_a); + const auto& words_b = mbi.gate_to_words.at(g_b); + + for (const auto& wa : words_a) + { + for (const auto& wb : words_b) + { + if (wa == wb) + { + return {1}; + } + } + } + + return {0}; + }; + + std::vector> SharedSignalGroup::calculate_labels(LabelContext& lc, const std::vector>& gate_pairs) const + { + std::vector> labels; + + for (const auto& p : gate_pairs) + { + labels.push_back(calculate_label(lc, p.first, p.second)); + } + + return labels; + } + + std::pair>, std::vector>> SharedSignalGroup::calculate_labels(LabelContext& lc) const + { + const auto& mbi = lc.get_multi_bit_information(); + + std::vector> pairs; + std::vector> labels; + + for (const auto& g : lc.nl->get_gates([](const Gate* g_i) { return g_i->get_type()->has_property(GateTypeProperty::ff); })) + { + // positive labels + std::unordered_set pos_gates; + if (mbi.gate_to_words.find(g) == mbi.gate_to_words.end()) + { + // gate is only in a group with itself + pairs.push_back({g, g}); + labels.push_back({{1}}); + pos_gates.insert(g); + } + else + { + // add all gates that are part of at least one other signal group as positive pair + for (const auto& name_direction : mbi.gate_to_words.at(g)) + { + const auto& gates = mbi.word_to_gates.at(name_direction); + for (const auto g_i : gates) + { + pairs.push_back({g, g_i}); + labels.push_back({{1}}); + pos_gates.insert(g); + } + } + } + + // negative labels (equal amount to the positive labels) + const u64 pos_count = pos_gates.size(); + const u64 neg_count = std::min(lc.nl->get_gates().size() - pos_count, pos_count); + + // TODO remove + // std::cout << "Found " << all_connected.size() << " connections for gate " << g->get_id() << ". Trying to find " << neg_count << " opposites!" << std::endl; + + std::set chosen_gates; + for (u32 i = 0; i < neg_count; i++) + { + const u32 start = std::rand() % lc.nl->get_gates().size(); + for (u32 idx = start; idx < start + lc.nl->get_gates().size(); idx++) + { + const auto g_i = lc.nl->get_gates().at(idx % lc.nl->get_gates().size()); + if (pos_gates.find(g_i) == pos_gates.end() && chosen_gates.find(g_i) == chosen_gates.end()) + { + pairs.push_back({g, g_i}); + labels.push_back({{0}}); + + chosen_gates.insert(g_i); + break; + } + } + } + } + + return {pairs, labels}; + }; + + namespace + { + std::unordered_set get_all_connected_gates(const Gate* g) + { + std::unordered_set connected; + for (const auto pre : g->get_unique_predecessors()) + { + connected.insert(pre); + } + + for (const auto suc : g->get_unique_successors()) + { + connected.insert(suc); + } + + return connected; + } + } // namespace + + std::vector> SharedConnection::calculate_gate_pairs(LabelContext& lc, const Netlist* nl, const std::vector& gates) const + { + std::vector> pairs; + + for (const auto& g : gates) + { + // positive labels + const auto all_connected = get_all_connected_gates(g); + for (const auto g_c : all_connected) + { + pairs.push_back({g, g_c}); + } + + // negative labels (equal amount to the positive labels) + const u32 neg_count = std::min(nl->get_gates().size() - all_connected.size(), all_connected.size()); + + // TODO remove + // std::cout << "Found " << all_connected.size() << " connections for gate " << g->get_id() << ". Trying to find " << neg_count << " opposites!" << std::endl; + + std::set chosen_gates; + for (u32 i = 0; i < neg_count; i++) + { + const u32 start = std::rand() % gates.size(); + for (u32 idx = start; idx < start + gates.size(); idx++) + { + const auto g_i = gates.at(idx % gates.size()); + if (all_connected.find(g_i) == all_connected.end() && chosen_gates.find(g_i) == chosen_gates.end()) + { + pairs.push_back({g, g_i}); + chosen_gates.insert(g_i); + break; + } + } + } + } + + return pairs; + }; + + std::vector SharedConnection::calculate_label(LabelContext& lc, const Gate* g_a, const Gate* g_b) const + { + const auto all_connected = get_all_connected_gates(g_a); + + if (all_connected.find(g_b) == all_connected.end()) + { + return {0}; + } + + return {1}; + }; + + std::vector> SharedConnection::calculate_labels(LabelContext& lc, const std::vector>& gate_pairs) const + { + std::vector> labels; + + for (const auto& p : gate_pairs) + { + labels.push_back(calculate_label(lc, p.first, p.second)); + } + + return labels; + } + + std::pair>, std::vector>> SharedConnection::calculate_labels(LabelContext& lc) const + { + std::vector> pairs; + std::vector> labels; + + for (const auto& g : lc.nl->get_gates()) + { + // positive labels + u64 pos_count = 0; + const auto all_connected = get_all_connected_gates(g); + for (const auto g_c : all_connected) + { + pairs.push_back({g, g_c}); + labels.push_back({{1}}); + + pos_count += 1; + } + + // negative labels (equal amount to the positive labels) + const u64 neg_count = std::min(lc.nl->get_gates().size() - pos_count, pos_count); + + // TODO remove + // std::cout << "Found " << all_connected.size() << " connections for gate " << g->get_id() << ". Trying to find " << neg_count << " opposites!" << std::endl; + + for (u32 i = 0; i < neg_count; i++) + { + const u32 start = std::rand() % lc.nl->get_gates().size(); + for (u32 idx = start; idx < start + lc.nl->get_gates().size(); idx++) + { + const auto g_i = lc.nl->get_gates().at(idx % lc.nl->get_gates().size()); + if (all_connected.find(g_i) == all_connected.end()) + { + pairs.push_back({g, g_i}); + labels.push_back({{0}}); + break; + } + } + } + } + + return {pairs, labels}; + }; + } // namespace gate_pair_label + } // namespace machine_learning +} // namespace hal \ No newline at end of file diff --git a/plugins/machine_learning/src/plugin_machine_learning.cpp b/plugins/machine_learning/src/plugin_machine_learning.cpp new file mode 100644 index 00000000000..1fdc5dc178e --- /dev/null +++ b/plugins/machine_learning/src/plugin_machine_learning.cpp @@ -0,0 +1,29 @@ +#include "machine_learning/plugin_machine_learning.h" + +namespace hal +{ + extern std::unique_ptr create_plugin_instance() + { + return std::make_unique(); + } + + std::string MachineLearningPlugin::get_name() const + { + return std::string("machine_learning"); + } + + std::string MachineLearningPlugin::get_version() const + { + return std::string("0.1"); + } + + std::string MachineLearningPlugin::get_description() const + { + return "TODO"; + } + + std::set MachineLearningPlugin::get_dependencies() const + { + return {}; + } +} // namespace hal