From 91cfdae420940ebf992d3893faa9755c008731ad Mon Sep 17 00:00:00 2001 From: "River.Li" Date: Fri, 6 Oct 2023 13:48:39 +0800 Subject: [PATCH] Update for reviewers' comments --- src/plugins/intel_cpu/src/compiled_model.cpp | 35 +++----- src/plugins/intel_cpu/src/compiled_model.h | 1 - src/plugins/intel_cpu/src/cpu_memory.cpp | 59 +++++++------ .../intel_cpu/src/cpu_streams_calculation.cpp | 2 +- src/plugins/intel_cpu/src/graph.cpp | 17 ++-- src/plugins/intel_cpu/src/infer_request.cpp | 87 +++++++++---------- src/plugins/intel_cpu/src/infer_request.h | 7 +- src/plugins/intel_cpu/src/nodes/convert.cpp | 3 +- src/plugins/intel_cpu/src/plugin.cpp | 10 ++- src/plugins/intel_cpu/src/plugin.h | 2 +- .../intel_cpu/src/utils/ngraph_utils.hpp | 3 - .../skip_tests_config.cpp | 2 +- .../functional/test_utils/cpu_test_utils.cpp | 6 +- 13 files changed, 106 insertions(+), 128 deletions(-) diff --git a/src/plugins/intel_cpu/src/compiled_model.cpp b/src/plugins/intel_cpu/src/compiled_model.cpp index c88911980e18d1..b6079b97e42d00 100644 --- a/src/plugins/intel_cpu/src/compiled_model.cpp +++ b/src/plugins/intel_cpu/src/compiled_model.cpp @@ -12,7 +12,6 @@ #include "nodes/memory.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/intel_cpu/properties.hpp" -#include "precision_utils.h" #include "serialize.h" #include "threading/ie_executor_manager.hpp" #include "transformations/transformation_pipeline.h" @@ -21,10 +20,6 @@ # include #endif -#include "ie_ngraph_utils.hpp" -#include "ie_system_conf.h" -#include "openvino/core/preprocess/pre_post_process.hpp" -#include "openvino/opsets/opset1.hpp" #include "openvino/runtime/properties.hpp" #include "openvino/util/common_util.hpp" #include "threading/ie_cpu_streams_executor.hpp" @@ -209,23 +204,6 @@ std::shared_ptr CompiledModel::get_runtime_model() const { return get_graph()._graph.dump(); } -ov::Any CompiledModel::get_property(const std::string& name) const { - if (m_graphs.empty()) - OPENVINO_THROW("No graph was found"); - - if (name == ov::loaded_from_cache) { - return m_loaded_from_cache; - } - - Config engConfig = get_graph()._graph.getConfig(); - auto option = engConfig._config.find(name); - if (option != engConfig._config.end()) { - return option->second; - } - - return get_metric(name); -} - ov::Any CompiledModel::get_metric_legacy(const std::string& name, const GraphGuard& graph) const { OPENVINO_SUPPRESS_DEPRECATED_START if (name == METRIC_KEY(NETWORK_NAME)) { @@ -255,9 +233,20 @@ ov::Any CompiledModel::get_metric_legacy(const std::string& name, const GraphGua OPENVINO_SUPPRESS_DEPRECATED_END } -ov::Any CompiledModel::get_metric(const std::string& name) const { +ov::Any CompiledModel::get_property(const std::string& name) const { if (m_graphs.empty()) OPENVINO_THROW("No graph was found"); + + if (name == ov::loaded_from_cache) { + return m_loaded_from_cache; + } + + Config engConfig = get_graph()._graph.getConfig(); + auto option = engConfig._config.find(name); + if (option != engConfig._config.end()) { + return option->second; + } + // @todo Can't we just use local copy (_cfg) instead? auto graphLock = get_graph(); const auto& graph = graphLock._graph; diff --git a/src/plugins/intel_cpu/src/compiled_model.h b/src/plugins/intel_cpu/src/compiled_model.h index 2ee8b8c7c36f68..9da2db08292d3b 100644 --- a/src/plugins/intel_cpu/src/compiled_model.h +++ b/src/plugins/intel_cpu/src/compiled_model.h @@ -81,7 +81,6 @@ class CompiledModel : public ov::ICompiledModel { */ GraphGuard::Lock get_graph() const; - ov::Any get_metric(const std::string& name) const; ov::Any get_metric_legacy(const std::string& name, const GraphGuard& graph) const; }; diff --git a/src/plugins/intel_cpu/src/cpu_memory.cpp b/src/plugins/intel_cpu/src/cpu_memory.cpp index 1048ad500d93c2..2b7fe3f4001987 100644 --- a/src/plugins/intel_cpu/src/cpu_memory.cpp +++ b/src/plugins/intel_cpu/src/cpu_memory.cpp @@ -23,6 +23,16 @@ using namespace dnnl; namespace ov { namespace intel_cpu { +template <> +DnnlMemoryDescPtr IMemory::getDescWithType() const { + return MemoryDescUtils::convertToDnnlMemoryDesc(getDescPtr()); +} + +template <> +BlockedMemoryDescPtr IMemory::getDescWithType() const { + return MemoryDescUtils::convertToBlockedMemoryDesc(getDescPtr()); +} + namespace { inline void setSubnormalsToZero(float *data, size_t size) { uint32_t *u32data = reinterpret_cast(data); @@ -35,29 +45,28 @@ namespace { void transferData(const IMemory& src, const IMemory& dst, bool ftz) { node::Reorder::reorderData(src, dst); - try { - auto src_data_type = DnnlExtensionUtils::IEPrecisionToDataType(src.getDesc().getPrecision()); - auto dst_data_type = DnnlExtensionUtils::IEPrecisionToDataType(dst.getDesc().getPrecision()); - if (src_data_type != memory::data_type::f32 || dst_data_type == memory::data_type::bf16) { - return; - } - } catch (ov::Exception&) { + + if (!ftz) { return; } - - auto localPrim = dst.getPrimitive(); - auto desc = localPrim.get_desc(); - dnnl::impl::memory_desc_wrapper wrapper(desc.get()); - - if (ftz - && !wrapper.is_wino_desc() - // WA: to avoid zero filling auxiliary information - && !wrapper.is_rnn_packed_desc()) { - // Internal blobs don't have strides yet. - auto *memData = static_cast(dst.getData()); - memData += wrapper.offset0(); - setSubnormalsToZero(memData, dst.getSize() / sizeof(float)); + if (src.getDesc().getPrecision() != Precision::FP32 || dst.getDesc().getPrecision() == Precision::BF16) { + return; + } + size_t offset = 0; + if (dst.getDesc().getType() & MemoryDescType::Dnnl) { + // here we can safely cast to DnnlMemoryDesc + auto dnnl_desc = dst.getDescWithType(); + auto desc = dnnl_desc->getDnnlDesc(); + dnnl::impl::memory_desc_wrapper wrapper(desc.get()); + offset = wrapper.offset0(); + if (wrapper.is_wino_desc() || wrapper.is_rnn_packed_desc()) { + return; + } } + // actual FTZ + auto* memData = static_cast(dst.getData()); + memData += offset; + setSubnormalsToZero(memData, dst.getSize() / sizeof(float)); } } // namespace @@ -129,16 +138,6 @@ void Memory::redefineDesc(MemoryDescPtr desc) { this->create(desc, nullptr, false); } -template<> -DnnlMemoryDescPtr IMemory::getDescWithType() const { - return MemoryDescUtils::convertToDnnlMemoryDesc(getDescPtr()); -} - -template<> -BlockedMemoryDescPtr IMemory::getDescWithType() const { - return MemoryDescUtils::convertToBlockedMemoryDesc(getDescPtr()); -} - void Memory::update() { if (dnnlMemHandle.isInit()) { auto prim = dnnlMemHandle.getPrim(); diff --git a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp index 3174190956b066..b000c35c646648 100644 --- a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp +++ b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp @@ -469,7 +469,7 @@ int get_model_prefer_threads(const int num_streams, model_prefer = proc_type_table[0][ALL_PROC]; } #else - bool fp_intesive = !ov::op::util::has_op_with_type(model); + bool fp_intesive = !ov::op::util::has_op_with_type(model); const int int8_threshold = 4; // ~relative efficiency of the VNNI-intensive code for Big vs Little cores; const int fp32_threshold = 2; // ~relative efficiency of the AVX2 fp32 code for Big vs Little cores; // by default the latency case uses (faster) Big cores only, depending on the compute ratio diff --git a/src/plugins/intel_cpu/src/graph.cpp b/src/plugins/intel_cpu/src/graph.cpp index 3ef68d78be08b4..5b2b41a84a6ac4 100644 --- a/src/plugins/intel_cpu/src/graph.cpp +++ b/src/plugins/intel_cpu/src/graph.cpp @@ -868,10 +868,10 @@ bool Graph::ProcessDynNodes() { return result; } -void Graph::PushInputData(const std::string& name, const ov::SoPtr &input) { +void Graph::PushInputData(const std::string& name, const ov::SoPtr& input) { if (!IsReady()) OPENVINO_THROW("Wrong state. Topology not ready."); - auto _input = inputNodesMap.find(name); - if (_input != inputNodesMap.end()) { + auto input_itr = inputNodesMap.find(name); + if (input_itr != inputNodesMap.end()) { auto create_mem_desc = [&](const ov::SoPtr& tensor) -> CpuBlockedMemoryDesc { auto element_type = tensor->get_element_type(); auto shape = tensor->get_shape(); @@ -899,13 +899,14 @@ void Graph::PushInputData(const std::string& name, const ov::SoPtr &inp return byte_stride / element_type.size(); }); } - InferenceEngine::TensorDesc tensorDesc(ie::details::convertPrecision(tensor->get_element_type()), - shape, - ie::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}); + InferenceEngine::TensorDesc tensorDesc( + InferenceEngine::details::convertPrecision(tensor->get_element_type()), + shape, + InferenceEngine::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}); return MemoryDescUtils::convertToCpuBlockedMemoryDesc(tensorDesc); }; - auto node = _input->second; + auto node = input_itr->second; auto childEdge = node->getChildEdgeAt(0); const auto& outDims = node->getOutputShapeAtPort(0); @@ -915,7 +916,7 @@ void Graph::PushInputData(const std::string& name, const ov::SoPtr &inp // Convert data if precision mismatch auto& inter_mem_desc = childEdge->getMemory().getDesc(); auto inter_precision = inter_mem_desc.getPrecision(); - auto ext_precision = ie::details::convertPrecision(input->get_element_type()); + auto ext_precision = InferenceEngine::details::convertPrecision(input->get_element_type()); if (ext_precision != inter_precision) { if ((inter_data_ptr == nullptr) || (ext_data_ptr == nullptr)) { OPENVINO_THROW("Get tensor has no allocated memory"); diff --git a/src/plugins/intel_cpu/src/infer_request.cpp b/src/plugins/intel_cpu/src/infer_request.cpp index e7d0fd6f0a8af0..dbe6388eced6ea 100644 --- a/src/plugins/intel_cpu/src/infer_request.cpp +++ b/src/plugins/intel_cpu/src/infer_request.cpp @@ -401,9 +401,9 @@ static InferenceEngine::TensorDesc create_tensor_desc(const ov::SoPtr& }); } OPENVINO_SUPPRESS_DEPRECATED_START - return ie::TensorDesc{ie::details::convertPrecision(element_type), + return InferenceEngine::TensorDesc{InferenceEngine::details::convertPrecision(element_type), shape, - ie::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}}; + InferenceEngine::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}}; OPENVINO_SUPPRESS_DEPRECATED_END } @@ -421,9 +421,9 @@ const ov::Output& SyncInferRequest::get_internal_port(const ov:: auto name = get_port_name(port, m_is_legacy_api); bool is_input = ov::op::util::is_parameter(port.get_node()); if (is_input) { - return m_input_ports_map[name]; + return m_input_ports_map.at(name); } else { - return m_output_ports_map[name]; + return m_output_ports_map.at(name); } } @@ -448,17 +448,17 @@ void SyncInferRequest::set_tensor(const ov::Output& in_port, con const auto netInPrc = port.get_element_type(); if (netInPrc != tensor->get_element_type()) { IE_THROW(ParameterMismatch) << "Failed to set input tensor with precision: " << tensor->get_element_type() - << ", if model input tensor precision is: " << netInPrc; + << ", since the model input tensor precision is: " << netInPrc; } const auto& shape = port.get_partial_shape(); const bool isDynamic = shape.is_dynamic(); if (!shape.compatible(ov::PartialShape(tensor->get_shape()))) { - OPENVINO_THROW("The tensor size is not equal to model, can't set input tensor with name: ", + OPENVINO_THROW("Can't set input tensor with name: ", name, - ", because model input (shape=", + ", because the model input (shape=", shape, - ") and tensor (shape=", + ") and the tensor (shape=", vec2str(tensor->get_shape()), ") are incompatible"); } @@ -466,9 +466,9 @@ void SyncInferRequest::set_tensor(const ov::Output& in_port, con if (!isDynamic && ov::shape_size(shape.to_shape()) != tensor->get_size()) { OPENVINO_THROW("Can't set input tensor with name: ", name, - ", because model input size = ", + ", because the model input size = ", ov::shape_size(shape.to_shape()), - " and tensor size = ", + " and the tensor size = ", tensor->get_size(), " are different."); } @@ -501,21 +501,21 @@ void SyncInferRequest::set_tensor(const ov::Output& in_port, con const bool isDynamic = shape.is_dynamic(); if (!shape.compatible(ov::PartialShape(tensor->get_shape()))) { - OPENVINO_THROW("The tensor size is not equal to model, can't set output tensor with name: ", + OPENVINO_THROW("Can't set the output tensor with name: ", name, - ", because model output (shape=", + ", because the model output tensor (shape=", shape, - ") and blob (shape=", + ") and the current tensor (shape=", vec2str(tensor->get_shape()), ") are incompatible"); } if (!isDynamic && ov::shape_size(shape.to_shape()) != tensor->get_size()) { - OPENVINO_THROW("Can't set output tensor with name: ", + OPENVINO_THROW("Can't set the output tensor with name: ", name, - ", because model output size = ", + ", because the model output size = ", ov::shape_size(shape.to_shape()), - " and blob size = ", + " and the currernt tensor size = ", tensor->get_size(), " are different."); } @@ -556,33 +556,33 @@ void SyncInferRequest::init_tensor(const std::string& name) { auto input = inMap.find(name); if (input != inMap.end()) { auto input_port = m_input_ports_map.find(name); - if (input_port != m_input_ports_map.end()) { - auto& port = input_port->second; - tensor = ov::ISyncInferRequest::get_tensor(port); - - if (!tensor) { - const auto& shape = port.get_partial_shape(); - const bool isDynamic = shape.is_dynamic(); - ov::Shape tensor_shape; - if (isDynamic) { - tensor_shape = ov::Shape(shape.rank().get_length(), 0); - } else { - tensor_shape = shape.to_shape(); - } + OPENVINO_ASSERT(input_port != m_input_ports_map.end(), + "Tensor with name: ", + name, + " exists in CPU plugin graph, but absents in network inputs"); + auto& port = input_port->second; + tensor = ov::ISyncInferRequest::get_tensor(port); + + if (!tensor) { + const auto& shape = port.get_partial_shape(); + const bool isDynamic = shape.is_dynamic(); + ov::Shape tensor_shape; + if (isDynamic) { + tensor_shape = ov::Shape(shape.rank().get_length(), 0); + } else { + tensor_shape = shape.to_shape(); + } - tensor = ov::make_tensor(port.get_element_type(), tensor_shape); - ov::ISyncInferRequest::set_tensor(port, tensor); + tensor = ov::make_tensor(port.get_element_type(), tensor_shape); + ov::ISyncInferRequest::set_tensor(port, tensor); - auto desc = create_tensor_desc(tensor); - if (!isDynamic && - desc == MemoryDescUtils::convertToTensorDesc( - graph->getInputNodeByName(name)->getChildEdgesAtPort(0)[0]->getMemory().getDesc()) && - graph->_normalizePreprocMap.find(name) == graph->_normalizePreprocMap.end()) { - external_ptr[name] = tensor; - } + auto desc = create_tensor_desc(tensor); + if (!isDynamic && + desc == MemoryDescUtils::convertToTensorDesc( + graph->getInputNodeByName(name)->getChildEdgesAtPort(0)[0]->getMemory().getDesc()) && + graph->_normalizePreprocMap.find(name) == graph->_normalizePreprocMap.end()) { + external_ptr[name] = tensor; } - } else { - OPENVINO_THROW("Tensor with name: ", name, " exists in CPU plugin graph, but absents in network inputs"); } } @@ -629,11 +629,6 @@ void SyncInferRequest::init_tensor(const std::string& name) { outputControlBlocks.emplace(std::make_pair(name, std::move(control_block))); } else { tensor_shape = shape.to_shape(); - - InferenceEngine::TensorDesc desc( - InferenceEngine::details::convertPrecision(port.get_element_type()), - tensor_shape, - InferenceEngine::TensorDesc::getLayoutByRank(tensor_shape.size())); tensor = ov::make_tensor(port.get_element_type(), tensor_shape); } ov::ISyncInferRequest::set_tensor(port, tensor); @@ -659,7 +654,7 @@ void SyncInferRequest::init_tensor(const std::string& name) { if (netOutPrc != tensor->get_element_type()) { IE_THROW(ParameterMismatch) << "Network input and output use the same name: " << name - << " but expect blobs with different precision: " << tensor->get_element_type() + << " but expect tensor with different precision: " << tensor->get_element_type() << " for input and " << netOutPrc << " for output."; } } diff --git a/src/plugins/intel_cpu/src/infer_request.h b/src/plugins/intel_cpu/src/infer_request.h index 3ee71425870c53..51bbdabfbe6cb7 100644 --- a/src/plugins/intel_cpu/src/infer_request.h +++ b/src/plugins/intel_cpu/src/infer_request.h @@ -46,7 +46,7 @@ class SyncInferRequest : public ov::ISyncInferRequest { void throw_if_canceled() const; -protected: +private: class OutputControlBlock { public: using MemMngrPtr = std::shared_ptr; @@ -92,7 +92,6 @@ class SyncInferRequest : public ov::ISyncInferRequest { }; std::unordered_map outputControlBlocks; -private: void create_infer_request(); void pushInput(const std::string& inputName, ov::SoPtr& inputBlob, InferenceEngine::Precision dataType); @@ -115,8 +114,8 @@ class SyncInferRequest : public ov::ISyncInferRequest { std::vector> m_memory_states; AsyncInferRequest* m_asyncRequest = nullptr; - mutable std::unordered_map> m_input_ports_map; - mutable std::unordered_map> m_output_ports_map; + std::unordered_map> m_input_ports_map; + std::unordered_map> m_output_ports_map; std::unordered_map> m_outputs; void change_default_ptr(); diff --git a/src/plugins/intel_cpu/src/nodes/convert.cpp b/src/plugins/intel_cpu/src/nodes/convert.cpp index ba91f38f239ed8..daeda2ccb5edd8 100644 --- a/src/plugins/intel_cpu/src/nodes/convert.cpp +++ b/src/plugins/intel_cpu/src/nodes/convert.cpp @@ -128,8 +128,7 @@ void Convert::initSupportedPrimitiveDescriptors() { auto creators = BlockedDescCreator::getCommonCreators(); // As long as convert is placed right before the output, only planar layout makes sense since the output tensor - // is always in a planar layout (ngraph limitation), so there is no reason to convert in nspc (per Chanel - // layout) and reorder to the planar one. + // is always in a planar layout (ngraph limitation), so there is no reason to convert in any other layout. bool hasOutputChild = false; for (auto& childEdge : getChildEdgesAtPort(0)) { if (Type::Output == childEdge->getChild()->getType()) { diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index d2b9ec78ae1a7f..0690d6bb446c45 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -589,10 +589,12 @@ ov::Any Engine::get_property(const std::string& name, const ov::AnyMap& options) if (name == ov::optimal_number_of_infer_requests) { const auto streams = engConfig.streamExecutorConfig._streams; - return decltype(ov::optimal_number_of_infer_requests)::value_type(streams); // ov::optimal_number_of_infer_requests has no negative values + return decltype(ov::optimal_number_of_infer_requests)::value_type( + streams); // ov::optimal_number_of_infer_requests has no negative values } else if (name == ov::num_streams) { const auto streams = engConfig.streamExecutorConfig._streams; - return decltype(ov::num_streams)::value_type(streams); // ov::num_streams has special negative values (AUTO = -1, NUMA = -2) + return decltype(ov::num_streams)::value_type( + streams); // ov::num_streams has special negative values (AUTO = -1, NUMA = -2) } else if (name == ov::affinity) { const auto affinity = engConfig.streamExecutorConfig._threadBindingType; switch (affinity) { @@ -634,7 +636,7 @@ ov::Any Engine::get_property(const std::string& name, const ov::AnyMap& options) } else if (name == ov::hint::execution_mode) { return engConfig.executionMode; } - return get_metric(name, options); + return get_ro_property(name, options); } ov::Any Engine::get_metric_legacy(const std::string& name, const ov::AnyMap& options) const { @@ -693,7 +695,7 @@ ov::Any Engine::get_metric_legacy(const std::string& name, const ov::AnyMap& opt OPENVINO_SUPPRESS_DEPRECATED_END } -ov::Any Engine::get_metric(const std::string& name, const ov::AnyMap& options) const { +ov::Any Engine::get_ro_property(const std::string& name, const ov::AnyMap& options) const { if (is_legacy_api()) return get_metric_legacy(name, options); diff --git a/src/plugins/intel_cpu/src/plugin.h b/src/plugins/intel_cpu/src/plugin.h index 1fc357a19a67a5..2b2031196a5825 100644 --- a/src/plugins/intel_cpu/src/plugin.h +++ b/src/plugins/intel_cpu/src/plugin.h @@ -60,7 +60,7 @@ class Engine : public ov::IPlugin { private: bool is_legacy_api() const; - ov::Any get_metric(const std::string& name, const ov::AnyMap& options) const; + ov::Any get_ro_property(const std::string& name, const ov::AnyMap& options) const; ov::Any get_metric_legacy(const std::string& name, const ov::AnyMap& options) const; ov::Any get_property_legacy(const std::string& name, const ov::AnyMap& options) const; diff --git a/src/plugins/intel_cpu/src/utils/ngraph_utils.hpp b/src/plugins/intel_cpu/src/utils/ngraph_utils.hpp index 3ad38c019d1f02..13df4be15c0dcf 100644 --- a/src/plugins/intel_cpu/src/utils/ngraph_utils.hpp +++ b/src/plugins/intel_cpu/src/utils/ngraph_utils.hpp @@ -53,9 +53,6 @@ inline std::string get_port_name(const ov::Output& port, const b std::string name; // TODO: Should use tensor name as the port name, but many legacy tests still use legacy name // plus sometimes it will get empty tensor name. - if (!is_legacy_api) { - name = {}; - } if (name.empty()) { bool is_input = ov::op::util::is_parameter(port.get_node()); if (is_input) { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index a2fc7644c4c019..8f355cb0e2a9ca 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -225,7 +225,7 @@ std::vector disabledTestPatterns() { // Issue: 121313 R"(smoke_GroupConvBackpropData.*paddingDefined/GroupConvBackpropLayerTest.Inference.*f16.*)", R"(smoke_GroupConvBackpropData.*paddingDefined/GroupConvBackpropLayerTest.Inference.*f32.*)", - // CPU Plugin API 2.0, convert node will use planar layout if its output is result node, skip nhwc/nChw8c/nChw16c test cases + // Issue: 121812 R"(.*ConvertCPULayerTest.*outFmts=(nhwc|nChw8c|nChw16c).*)", }; #if defined(__APPLE__) && defined(OPENVINO_ARCH_ARM64) diff --git a/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.cpp b/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.cpp index e4f4b9c960335f..5067ed23c1c6c0 100644 --- a/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.cpp +++ b/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.cpp @@ -219,10 +219,8 @@ void CPUTestsBase::CheckPluginRelatedResultsImpl(const std::shared_ptr