Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[GPU] Use optional and nested namespaces features from cpp17 #28628

Open
wants to merge 4 commits into from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
8 changes: 2 additions & 6 deletions src/plugins/intel_gpu/include/intel_gpu/op/convolution.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,7 @@

#include "openvino/op/util/convolution_base.hpp"

namespace ov {
namespace intel_gpu {
namespace op {
namespace ov::intel_gpu::op {

// Common node for v1::Convolution and v1::GroupConvolution with few extensions
// - Relaxed type requirements
Expand Down Expand Up @@ -76,6 +74,4 @@ std::vector<ov::PartialShape> shape_infer(const Convolution* op,
CoordinateDiff& pads_begin,
CoordinateDiff& pads_end);

} // namespace op
} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu::op
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,7 @@
#include "openvino/core/node.hpp"
#include "openvino/op/op.hpp"

namespace ov {
namespace intel_gpu {
namespace op {
namespace ov::intel_gpu::op {

class FullyConnected : public ov::op::Op {
public:
Expand All @@ -34,6 +32,4 @@ class FullyConnected : public ov::op::Op {
ov::element::Type m_output_type;
};

} // namespace op
} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu::op
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,7 @@

#include "fully_connected.hpp"

namespace ov {
namespace intel_gpu {
namespace op {
namespace ov::intel_gpu::op {

class FullyConnectedCompressed : public FullyConnected {
public:
Expand Down Expand Up @@ -42,6 +40,4 @@ class FullyConnectedCompressed : public FullyConnected {
std::shared_ptr<Node> clone_with_new_inputs(const ov::OutputVector& new_args) const override;
};

} // namespace op
} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu::op
8 changes: 2 additions & 6 deletions src/plugins/intel_gpu/include/intel_gpu/op/gemm.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,7 @@
#include "openvino/op/matmul.hpp"
#include "openvino/op/op.hpp"

namespace ov {
namespace intel_gpu {
namespace op {
namespace ov::intel_gpu::op {

class Gemm : public ov::op::v0::MatMul {
public:
Expand Down Expand Up @@ -56,6 +54,4 @@ std::vector<ov::PartialShape> shape_infer(const Gemm* op,
const std::vector<int64_t>& order_b,
const std::vector<int64_t>& order_c);

} // namespace op
} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu::op
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,7 @@
#include "openvino/core/partial_shape.hpp"
#include "openvino/op/op.hpp"

namespace ov {
namespace intel_gpu {
namespace op {
namespace ov::intel_gpu::op {

class IndirectGemm : public ov::intel_gpu::op::Gemm {
public:
Expand Down Expand Up @@ -49,6 +47,4 @@ class IndirectGemm : public ov::intel_gpu::op::Gemm {
int64_t m_indirect_axis = 0;
};

} // namespace op
} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu::op
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,7 @@
#include "openvino/core/partial_shape.hpp"
#include "openvino/op/op.hpp"

namespace ov {
namespace intel_gpu {
namespace op {
namespace ov::intel_gpu::op {

class IndirectSDPA : public ov::intel_gpu::op::SDPA {
public:
Expand Down Expand Up @@ -55,6 +53,4 @@ class IndirectSDPA : public ov::intel_gpu::op::SDPA {
int64_t m_indirect_axis = -1;
};

} // namespace op
} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu::op
8 changes: 2 additions & 6 deletions src/plugins/intel_gpu/include/intel_gpu/op/kv_cache.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,7 @@
#include "openvino/op/util/variable_extension.hpp"
#include "ov_ops/dynamic_quantize.hpp"

namespace ov {
namespace intel_gpu {
namespace op {
namespace ov::intel_gpu::op {

/// \brief Operator that implements Key-Values cache subgraph for large language models.
/// This operation updates data of the corresponding Variable
Expand Down Expand Up @@ -71,6 +69,4 @@ class KVCache : public ov::op::Op, public ov::op::util::VariableExtension {

std::vector<ov::PartialShape> shape_infer(const KVCache* op, const std::vector<ov::PartialShape>& input_shapes);

} // namespace op
} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu::op
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,7 @@
#include "intel_gpu/op/kv_cache.hpp"
#include "ov_ops/dynamic_quantize.hpp"

namespace ov {
namespace intel_gpu {
namespace op {
namespace ov::intel_gpu::op {

/// \brief Operator that implements Key-Values cache subgraph for large language models.
/// This operation updates data of the corresponding Variable
Expand Down Expand Up @@ -51,6 +49,4 @@ class KVCacheCompressed : public ov::intel_gpu::op::KVCache {
std::vector<ov::PartialShape> shape_infer(const KVCacheCompressed* op,
const std::vector<ov::PartialShape>& input_shapes);

} // namespace op
} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu::op
8 changes: 2 additions & 6 deletions src/plugins/intel_gpu/include/intel_gpu/op/placeholder.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,7 @@

#include "openvino/op/op.hpp"

namespace ov {
namespace intel_gpu {
namespace op {
namespace ov::intel_gpu::op {

class Placeholder : public ov::op::Op {
public:
Expand All @@ -21,6 +19,4 @@ class Placeholder : public ov::op::Op {
std::shared_ptr<Node> clone_with_new_inputs(const ov::OutputVector& new_args) const override;
};

} // namespace op
} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu::op
8 changes: 2 additions & 6 deletions src/plugins/intel_gpu/include/intel_gpu/op/read_value.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,7 @@
#include "openvino/op/util/variable.hpp"
#include "openvino/op/util/variable_extension.hpp"

namespace ov {
namespace intel_gpu {
namespace op {
namespace ov::intel_gpu::op {

/// \brief Similar to common v6::ReadValue, but it's not derived from ReadValueBase class to avoid ReadValue-Assign pairing check
/// This is needed to have ReadValue-KVCache pair instead of ReadValue-Assign
Expand Down Expand Up @@ -42,6 +40,4 @@ class ReadValue : public ov::op::Op, public ov::op::util::VariableExtension {
}
};

} // namespace op
} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu::op
8 changes: 2 additions & 6 deletions src/plugins/intel_gpu/include/intel_gpu/op/read_values.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,7 @@

#include "intel_gpu/op/read_value.hpp"

namespace ov {
namespace intel_gpu {
namespace op {
namespace ov::intel_gpu::op {

/// \brief This operation handles the OpenVINO GPU Plugin's custom variable
// representation (which can store multiple states in a single variable) at the graph level.
Expand Down Expand Up @@ -37,6 +35,4 @@ class ReadValues : public ReadValue {
std::vector<ov::op::util::VariableInfo> m_internal_states_infos;
};

} // namespace op
} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu::op
8 changes: 2 additions & 6 deletions src/plugins/intel_gpu/include/intel_gpu/op/sdpa.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,7 @@
#include "openvino/op/scaled_dot_product_attention.hpp"
#include "ov_ops/dynamic_quantize.hpp"

namespace ov {
namespace intel_gpu {
namespace op {
namespace ov::intel_gpu::op {

class SDPA : public ov::op::v13::ScaledDotProductAttention {
public:
Expand Down Expand Up @@ -83,6 +81,4 @@ std::vector<ov::PartialShape> shape_infer(const SDPA* op,
const std::vector<int64_t>& order_out);


} // namespace op
} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu::op
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,8 @@

#include "openvino/runtime/iasync_infer_request.hpp"
#include "intel_gpu/plugin/sync_infer_request.hpp"
#include <string>
#include <map>

namespace ov {
namespace intel_gpu {
namespace ov::intel_gpu {

class AsyncInferRequest : public ov::IAsyncInferRequest {
public:
Expand All @@ -29,5 +26,4 @@ class AsyncInferRequest : public ov::IAsyncInferRequest {
std::shared_ptr<ov::threading::ITaskExecutor> m_wait_executor;
};

} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
#include <tuple>
#include "intel_gpu/runtime/layout.hpp"
#include "intel_gpu/runtime/memory.hpp"
#include "intel_gpu/runtime/optionals.hpp"

#include "intel_gpu/runtime/shape_predictor.hpp"
#include "openvino/core/layout.hpp"
#include "openvino/core/node.hpp"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,7 @@
#include "intel_gpu/runtime/execution_config.hpp"
#include "openvino/runtime/icompiled_model.hpp"

namespace ov {
namespace intel_gpu {
namespace ov::intel_gpu {

class CompiledModel : public ov::ICompiledModel {
public:
Expand Down Expand Up @@ -74,5 +73,4 @@ class CompiledModel : public ov::ICompiledModel {
bool m_loaded_from_cache;
};

} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,12 @@
#pragma once
#include <memory>
#include <string>
#include <sstream>
#include <vector>
#include <map>
#include "pugixml.hpp"
#include "intel_gpu/runtime/tensor.hpp"
#include "intel_gpu/runtime/format.hpp"

namespace ov {
namespace intel_gpu {
namespace ov::intel_gpu {

using CustomLayerPtr = std::shared_ptr<class CustomLayer>;
using CustomLayerMap = std::map<std::string, CustomLayerPtr>;
Expand Down Expand Up @@ -82,5 +80,4 @@ class CustomLayer{
std::string m_ErrorMessage;
};

} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu
9 changes: 2 additions & 7 deletions src/plugins/intel_gpu/include/intel_gpu/plugin/graph.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,22 +9,18 @@
#endif

#include "intel_gpu/graph/network.hpp"
#include "intel_gpu/graph/topology.hpp"
#include "intel_gpu/plugin/custom_layer.hpp"
#include "intel_gpu/plugin/remote_context.hpp"
#include "intel_gpu/plugin/program_builder.hpp"

#include <vector>
#include <map>
#include <set>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <condition_variable>

namespace ov {
namespace intel_gpu {
namespace ov::intel_gpu {
struct HostTimeProfilingEntry {
int64_t inputs_processing = 0;
int64_t enqueue = 0;
Expand Down Expand Up @@ -106,5 +102,4 @@ class Graph final {
std::shared_ptr<ov::Model> get_runtime_model(std::vector<cldnn::primitive_info>& pi, bool filter_const_primitives = true);
};

} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@
#include "intel_gpu/plugin/variable_state.hpp"
#include "openvino/core/partial_shape.hpp"

namespace ov {
namespace intel_gpu {
namespace ov::intel_gpu {

class MultiTensorState : public VariableStateBase {
public:
Expand Down Expand Up @@ -72,5 +71,4 @@ class VariableStateIndirectKVCacheCompressed : public VariableStateIndirectKVCac
private:
bool m_has_zp_state = false;
};
} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu
6 changes: 2 additions & 4 deletions src/plugins/intel_gpu/include/intel_gpu/plugin/plugin.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,7 @@
#include <string>
#include <memory>

namespace ov {
namespace intel_gpu {
namespace ov::intel_gpu {

class Plugin : public ov::IPlugin {
private:
Expand Down Expand Up @@ -67,5 +66,4 @@ class Plugin : public ov::IPlugin {
ov::SoPtr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override;
};

} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,7 @@ void __register ## _ ## op_name ## _ ## op_version() {
}); \
}

namespace ov {
namespace intel_gpu {
namespace ov::intel_gpu {

template<class T>
struct is_smart_pointer : std::false_type {};
Expand Down Expand Up @@ -195,5 +194,4 @@ inline bool ends_with(const std::string& value, const std::string& suffix) {
return std::equal(suffix.rbegin(), suffix.rend(), value.rbegin());
}

} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,7 @@
#include <memory>
#include <atomic>

namespace ov {
namespace intel_gpu {
namespace ov::intel_gpu {

class RemoteContextImpl : public ov::IRemoteContext {
public:
Expand Down Expand Up @@ -74,5 +73,4 @@ inline RemoteContextImpl::Ptr get_context_impl(ov::SoPtr<ov::IRemoteContext> ptr
return casted;
}

} // namespace intel_gpu
} // namespace ov
} // namespace ov::intel_gpu
Loading
Loading