diff --git a/src/bindings/python/tests/test_runtime/test_core.py b/src/bindings/python/tests/test_runtime/test_core.py index a864f73228106b..33540294117909 100644 --- a/src/bindings/python/tests/test_runtime/test_core.py +++ b/src/bindings/python/tests/test_runtime/test_core.py @@ -65,7 +65,8 @@ def test_core_class(device): input_tensor = Tensor(input_data) results = request.infer({"data": input_tensor}) - assert np.allclose(results[list(results)[0]], expected_output) + # convert node may be introduced by API 2.0, which brings some deviation + assert np.allclose(results[list(results)[0]], expected_output, 1e-4, 1e-4) # request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request diff --git a/src/bindings/python/tests/test_runtime/test_infer_request.py b/src/bindings/python/tests/test_runtime/test_infer_request.py index 85e4296f691081..8b04e915a8923d 100644 --- a/src/bindings/python/tests/test_runtime/test_infer_request.py +++ b/src/bindings/python/tests/test_runtime/test_infer_request.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform from collections.abc import Iterable from copy import deepcopy @@ -97,7 +98,12 @@ def abs_model_with_data(device, ov_type, numpy_dtype): def test_get_profiling_info(device): core = Core() - param = ops.parameter([1, 3, 32, 32], np.float32, name="data") + if platform.system() == "Darwin" and platform.machine() == "arm64": + # arm64 prefers fp16, and fp32 input will trigger a convert node + # to be added, so assert 'Softmax' will failed. + param = ops.parameter([1, 3, 32, 32], np.float16, name="data") + else: + param = ops.parameter([1, 3, 32, 32], np.float32, name="data") softmax = ops.softmax(param, 1, name="fc_out") model = Model([softmax], [param], "test_model") diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index 95bbe6cb1c7308..e9558163fe8b28 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -294,9 +294,12 @@ void Transformations::PreLpt(const std::vector& defaultPrecis }; type_to_fuse_map empty_fuse_map = {}; const bool keep_precision_sensitive_in_fp32 = true; - CPU_REGISTER_PASS_COMMON(manager, ov::pass::ConvertPrecision, fp_convert_precision_map, - empty_fuse_map, - keep_precision_sensitive_in_fp32); + CPU_REGISTER_PASS_COMMON(manager, + ov::pass::ConvertPrecision, + fp_convert_precision_map, + empty_fuse_map, + keep_precision_sensitive_in_fp32, + false); } #endif CPU_REGISTER_PASS_COMMON(manager, ov::pass::KeepConstAndDecompression);