From 7666e39cf30d53f8b2e93bc31558b7b5624cf43c Mon Sep 17 00:00:00 2001 From: Roman Lyamin Date: Wed, 6 Nov 2024 09:40:06 +0400 Subject: [PATCH 001/182] [GPU] Disable crop fusing (#27417) ### Details: - *Disable crop fusing that was enabled in the https://github.com/openvinotoolkit/openvino/pull/26951* ### Tickets: - *[156367](https://jira.devtools.intel.com/browse/CVS-156367)* --- .../graph/graph_optimizer/prepare_primitive_fusing.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp index 39cbc1aa89b4e2..c323109850c489 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp @@ -736,8 +736,6 @@ void prepare_primitive_fusing::fuse_simple_primitives(program &p) { should_fuse |= input.is_type(); - should_fuse |= input.is_type(); - bool legacy_fusion = activation_node.get_dependencies().size() == 1 && !input.can_be_optimized() && !activation_node.is_constant() && @@ -922,8 +920,7 @@ void prepare_primitive_fusing::fuse_simple_primitives(program &p) { (parents[i].first->is_type()) || (parents[i].first->is_type() && reduce_supports_fusings(parents[i].first->as())) || - (parents[i].first->is_type()) || - (parents[i].first->is_type()); + (parents[i].first->is_type()); } // Disable fusion to a node on constant path when second input is in data flow @@ -1063,6 +1060,9 @@ void prepare_primitive_fusing::fuse_simple_primitives(program &p) { fused_node->get_input_pshape().rbegin()->is_dynamic(); if (is_fc_lora || is_conv_lora || is_gemm_lora) { + if (!can_fuse_parents[peer_idx]) { + return; + } std::swap(peer_node, fused_node); } } From 8a3277fa7121509148a7a03c19e53cae1b1712d4 Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Wed, 6 Nov 2024 09:03:50 +0100 Subject: [PATCH 002/182] [GHA] Changed android runner (#27414) ### Details: - Increased RAM ### Tickets: - *156361* --------- Co-authored-by: Alina Kladieva --- .github/workflows/android_x64.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/android_x64.yml b/.github/workflows/android_x64.yml index 1cdb2023784979..47ea6d2cd90ffe 100644 --- a/.github/workflows/android_x64.yml +++ b/.github/workflows/android_x64.yml @@ -73,7 +73,7 @@ jobs: defaults: run: shell: bash - runs-on: aks-linux-16-cores-32gb + runs-on: aks-linux-16-cores-64gb container: image: ${{ fromJSON(needs.docker.outputs.images).ov_build.ubuntu_22_04_android }} volumes: From cd15be95e3bb2b3581661180a851aaeeb4977642 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Wed, 6 Nov 2024 09:43:46 +0100 Subject: [PATCH 003/182] [PyOV] Test for export-import via file (#27320) ### Details: - add a test where exported model saved to file and then imported from file - fix a problem with `inc_ref` and `dec_ref` when python callbacks are passed ### Tickets: - *ticket-id* --- .../python/src/pyopenvino/utils/utils.cpp | 23 +++++++--- .../python/tests/test_graph/test_custom_op.py | 4 +- .../python/tests/test_graph/test_manager.py | 6 +-- .../tests/test_runtime/test_compiled_model.py | 43 ++++++++++++++++++- .../python/tests/test_runtime/test_core.py | 16 +++---- .../python/tests/test_runtime/test_model.py | 6 +-- .../test_transformations/test_compression.py | 4 +- .../test_transformations/test_offline_api.py | 30 ++++++------- .../test_public_transformations.py | 10 ++--- src/bindings/python/tests/utils/helpers.py | 13 ++++-- 10 files changed, 105 insertions(+), 50 deletions(-) diff --git a/src/bindings/python/src/pyopenvino/utils/utils.cpp b/src/bindings/python/src/pyopenvino/utils/utils.cpp index e165c2e00b4808..c747e2d3b81166 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.cpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.cpp @@ -258,20 +258,31 @@ std::map properties_to_any_map(const std::map(property_value)) { OPENVINO_THROW("The value type of ov::cache_encryption_callbacks property is expected list"); } + auto property_list = property_value.cast(); + // Wrapped to sp due-to we need to hold GIL upon destruction of python function + auto py_encrypt = std::shared_ptr(new py::function(std::move(property_list[0])), + [](py::function* py_encrypt) { + py::gil_scoped_acquire acquire; + delete py_encrypt; + }); + auto py_decrypt = std::shared_ptr(new py::function(std::move(property_list[1])), + [](py::function* py_decrypt) { + py::gil_scoped_acquire acquire; + delete py_decrypt; + }); + std::function encrypt_func = - [property_value](const std::string& in_str) -> std::string { + [py_encrypt](const std::string& in_str) -> std::string { // Acquire GIL, execute Python function py::gil_scoped_acquire acquire; - auto _list = property_value.cast(); - return _list[0](in_str).cast(); + return (*py_encrypt)(in_str).cast(); }; std::function decrypt_func = - [property_value](const std::string& in_str) -> std::string { + [py_decrypt](const std::string& in_str) -> std::string { // Acquire GIL, execute Python function py::gil_scoped_acquire acquire; - auto _list = property_value.cast(); - return _list[1](in_str).cast(); + return (*py_decrypt)(in_str).cast(); }; ov::EncryptionCallbacks encryption_callbacks{encrypt_func, decrypt_func}; properties_to_cpp[property.first] = encryption_callbacks; diff --git a/src/bindings/python/tests/test_graph/test_custom_op.py b/src/bindings/python/tests/test_graph/test_custom_op.py index 5a2e33c7ee5783..8643844e2c54fd 100644 --- a/src/bindings/python/tests/test_graph/test_custom_op.py +++ b/src/bindings/python/tests/test_graph/test_custom_op.py @@ -12,7 +12,7 @@ from openvino.runtime import DiscreteTypeInfo import openvino.runtime.opset14 as ops -from tests.utils.helpers import create_filename_for_test +from tests.utils.helpers import create_filenames_for_ir class CustomOp(Op): @@ -108,7 +108,7 @@ def visit_attributes(self, visitor): # request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request @pytest.fixture def prepared_paths(request, tmp_path): - xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path) + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path) yield xml_path, bin_path diff --git a/src/bindings/python/tests/test_graph/test_manager.py b/src/bindings/python/tests/test_graph/test_manager.py index b5fce8cc09b8d7..ff72ef43158d6e 100644 --- a/src/bindings/python/tests/test_graph/test_manager.py +++ b/src/bindings/python/tests/test_graph/test_manager.py @@ -12,7 +12,7 @@ from openvino.runtime.passes import Manager, Serialize, ConstantFolding, Version from tests.test_graph.util import count_ops_of_type -from tests.utils.helpers import create_filename_for_test, compare_models +from tests.utils.helpers import create_filenames_for_ir, compare_models def create_model(): @@ -51,7 +51,7 @@ def test_constant_folding(): # request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request @pytest.fixture def prepare_ir_paths(request, tmp_path): - xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path) + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path) yield xml_path, bin_path @@ -138,7 +138,7 @@ def test_serialize_pass_mixed_args_kwargs_v2(prepare_ir_paths): # request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request def test_serialize_pass_wrong_num_of_args(request, tmp_path): - xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path) + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path) pass_manager = Manager() with pytest.raises(TypeError) as e: diff --git a/src/bindings/python/tests/test_runtime/test_compiled_model.py b/src/bindings/python/tests/test_runtime/test_compiled_model.py index d6909fa94711d3..9e17497fc22081 100644 --- a/src/bindings/python/tests/test_runtime/test_compiled_model.py +++ b/src/bindings/python/tests/test_runtime/test_compiled_model.py @@ -2,6 +2,7 @@ # Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import os import pytest import numpy as np @@ -14,6 +15,7 @@ generate_relu_compiled_model_with_config, encrypt_base64, decrypt_base64, + create_filenames_for_ir, create_filename_for_test) from openvino import Model, Shape, Core, Tensor, serialize from openvino.runtime import ConstOutput @@ -97,6 +99,43 @@ def test_export_import_advanced(device): assert np.argmax(res[new_compiled.outputs[0]]) == 531 +# request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request +@pytest.fixture +def prepare_blob_path(request, tmp_path): + filename = create_filename_for_test(request.node.name) + path_to_blob = tmp_path / str(filename + ".blob") + yield path_to_blob + + os.remove(path_to_blob) + + +def test_export_import_via_file(prepare_blob_path, device): + import io + + core = Core() + + if props.device.Capability.EXPORT_IMPORT not in core.get_property(device, props.device.capabilities): + pytest.skip(f"{core.get_property(device, props.device.full_name)} plugin due-to export, import model API isn't implemented.") + + compiled_model = generate_relu_compiled_model(device) + + user_stream = io.BytesIO() + + compiled_model.export_model(user_stream) + path_to_blob = prepare_blob_path + + with open(path_to_blob, "wb") as f_w: + f_w.write(user_stream.getbuffer()) + + with open(path_to_blob, "rb") as f_r: + new_compiled = core.import_model(f_r.read(), device) + + img = generate_image() + res = new_compiled.infer_new_request({"data": img}) + + assert np.argmax(res[new_compiled.outputs[0]]) == 531 + + @pytest.mark.parametrize("input_arguments", [[0], ["data"], []]) def test_get_input(device, input_arguments): compiled_model = generate_relu_compiled_model(device) @@ -250,7 +289,7 @@ def test_direct_infer(device, shared_flag): # request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request def test_compiled_model_after_core_destroyed(request, tmp_path, device): core = Core() - xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path) + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path) model = get_relu_model() serialize(model, xml_path, bin_path) with open(bin_path, "rb") as f: @@ -267,7 +306,7 @@ def test_compiled_model_after_core_destroyed(request, tmp_path, device): def test_compiled_model_from_buffer_in_memory(request, tmp_path, device): core = Core() - xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path) + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path) model = get_relu_model() serialize(model, xml_path, bin_path) with open(bin_path, "rb") as f: diff --git a/src/bindings/python/tests/test_runtime/test_core.py b/src/bindings/python/tests/test_runtime/test_core.py index 8c15c2a32a05c5..d147ce2d6bcab2 100644 --- a/src/bindings/python/tests/test_runtime/test_core.py +++ b/src/bindings/python/tests/test_runtime/test_core.py @@ -28,7 +28,7 @@ get_relu_model, plugins_path, compare_models, - create_filename_for_test, + create_filenames_for_ir, get_model_with_template_extension, ) @@ -77,7 +77,7 @@ def test_core_class(device): ]) def test_compile_model(request, tmp_path, device_name): core = Core() - xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path) + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path) relu_model = get_relu_model() serialize(relu_model, xml_path, bin_path) model = core.read_model(model=xml_path, weights=bin_path) @@ -97,7 +97,7 @@ def get_model(): @pytest.fixture def get_model_path(request, tmp_path): - xml_path, _ = create_filename_for_test(request.node.name, tmp_path, True) + xml_path, _ = create_filenames_for_ir(request.node.name, tmp_path, True) serialize(get_relu_model(), xml_path) return Path(xml_path) @@ -130,7 +130,7 @@ def test_compact_api(model_type, device_name, config, request): # request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request def test_read_model_from_ir(request, tmp_path): core = Core() - xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path) + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path) relu_model = get_relu_model() serialize(relu_model, xml_path, bin_path) model = core.read_model(model=xml_path, weights=bin_path) @@ -143,7 +143,7 @@ def test_read_model_from_ir(request, tmp_path): # request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request def test_read_model_from_tensor(request, tmp_path): core = Core() - xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path, is_xml_path=True, is_bin_path=True) + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path, is_xml_path=True, is_bin_path=True) relu_model = get_relu_model() serialize(relu_model, xml_path, bin_path) arr = np.ones(shape=(10), dtype=np.int8) @@ -164,7 +164,7 @@ def test_read_model_with_wrong_input(): # request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request def test_read_model_as_path(request, tmp_path): core = Core() - xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path, True, True) + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path, True, True) relu_model = get_relu_model() serialize(relu_model, xml_path, bin_path) @@ -181,7 +181,7 @@ def test_read_model_as_path(request, tmp_path): # request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request def test_read_model_from_buffer(request, tmp_path): core = Core() - xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path) + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path) relu_model = get_relu_model() serialize(relu_model, xml_path, bin_path) with open(bin_path, "rb") as f: @@ -195,7 +195,7 @@ def test_read_model_from_buffer(request, tmp_path): # request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request def test_model_from_buffer_valid(request, tmp_path): core = Core() - xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path) + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path) relu_model = get_relu_model() serialize(relu_model, xml_path, bin_path) with open(bin_path, "rb") as f: diff --git a/src/bindings/python/tests/test_runtime/test_model.py b/src/bindings/python/tests/test_runtime/test_model.py index 62b1eac9da3865..0ae592b2d1dff5 100644 --- a/src/bindings/python/tests/test_runtime/test_model.py +++ b/src/bindings/python/tests/test_runtime/test_model.py @@ -30,7 +30,7 @@ from tests.utils.helpers import ( generate_add_model, generate_model_with_memory, - create_filename_for_test, + create_filenames_for_ir, ) @@ -618,7 +618,7 @@ def check_rt_info(model): assert model.get_rt_info(["optimization", "test"]) core = Core() - xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path) + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path) input_shape = PartialShape([1]) param = ops.parameter(input_shape, dtype=np.float32, name="data") relu1 = ops.relu(param, name="relu1") @@ -701,7 +701,7 @@ def check_rt_info(model): assert rt_info_val in ["float_empty", "nodes", "type", "directed"] core = Core() - xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path) + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path) input_shape = PartialShape([1]) param = ops.parameter(input_shape, dtype=np.float32, name="data") relu1 = ops.relu(param, name="relu1") diff --git a/src/bindings/python/tests/test_transformations/test_compression.py b/src/bindings/python/tests/test_transformations/test_compression.py index fa46b6d227f1e3..d2754fd29d9c70 100644 --- a/src/bindings/python/tests/test_transformations/test_compression.py +++ b/src/bindings/python/tests/test_transformations/test_compression.py @@ -9,7 +9,7 @@ from openvino.runtime.opset13 import add, multiply import openvino as ov -from tests.utils.helpers import create_filename_for_test +from tests.utils.helpers import create_filenames_for_ir def make_constant(values, transposed): @@ -38,7 +38,7 @@ def make_model(add_consts, mul_consts): def get_constants(model, request, tmp_path) -> List[Constant]: - model_fname, _ = create_filename_for_test(request.node.name, tmp_path) + model_fname, _ = create_filenames_for_ir(request.node.name, tmp_path) ov.save_model(model, model_fname) core = ov.Core() restored_model = core.read_model(model_fname) diff --git a/src/bindings/python/tests/test_transformations/test_offline_api.py b/src/bindings/python/tests/test_transformations/test_offline_api.py index cd336493b58246..e265cef4635988 100644 --- a/src/bindings/python/tests/test_transformations/test_offline_api.py +++ b/src/bindings/python/tests/test_transformations/test_offline_api.py @@ -18,7 +18,7 @@ from openvino import Model, PartialShape, Core, serialize, save_model import openvino.runtime as ov -from tests.utils.helpers import create_filename_for_test, compare_models, _compare_models +from tests.utils.helpers import create_filenames_for_ir, compare_models, _compare_models def get_relu_model(): @@ -171,10 +171,10 @@ def test_fused_names_cleanup(): def prepare_test_model_for_serialize(request, tmp_path, is_path_xml, is_path_bin): - xml_path, bin_path = create_filename_for_test(request.node.name, - tmp_path, - is_path_xml, - is_path_bin) + xml_path, bin_path = create_filenames_for_ir(request.node.name, + tmp_path, + is_path_xml, + is_path_bin) shape = [100, 100, 2] parameter_a = ov.opset8.parameter(shape, dtype=np.float32, name="A") parameter_b = ov.opset8.parameter(shape, dtype=np.float32, name="B") @@ -267,10 +267,10 @@ def test_compress_model_transformation(): ) def test_version_default(request, tmp_path, is_path_xml, is_path_bin): core = Core() - xml_path, bin_path = create_filename_for_test(request.node.name, - tmp_path, - is_path_xml, - is_path_bin) + xml_path, bin_path = create_filenames_for_ir(request.node.name, + tmp_path, + is_path_xml, + is_path_bin) shape = [100, 100, 2] parameter_a = ov.opset8.parameter(shape, dtype=np.float32, name="A") parameter_b = ov.opset8.parameter(shape, dtype=np.float32, name="B") @@ -297,10 +297,10 @@ def test_version_default(request, tmp_path, is_path_xml, is_path_bin): ], ) def test_serialize_default_bin(request, tmp_path, is_path_xml, is_path_bin): - xml_path, bin_path = create_filename_for_test(request.node.name, - tmp_path, - is_path_xml, - is_path_bin) + xml_path, bin_path = create_filenames_for_ir(request.node.name, + tmp_path, + is_path_xml, + is_path_bin) model = get_relu_model() serialize(model, xml_path) assert os.path.exists(bin_path) @@ -311,7 +311,7 @@ def test_serialize_default_bin(request, tmp_path, is_path_xml, is_path_bin): # request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request def test_version_ir_v10(request, tmp_path): core = Core() - xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path) + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path) shape = [100, 100, 2] parameter_a = ov.opset8.parameter(shape, dtype=np.float32, name="A") parameter_b = ov.opset8.parameter(shape, dtype=np.float32, name="B") @@ -332,7 +332,7 @@ def test_version_ir_v10(request, tmp_path): # request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request def test_version_ir_v11(request, tmp_path): core = Core() - xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path) + xml_path, bin_path = create_filenames_for_ir(request.node.name, tmp_path) shape = [100, 100, 2] parameter_a = ov.opset8.parameter(shape, dtype=np.float32, name="A") parameter_b = ov.opset8.parameter(shape, dtype=np.float32, name="B") diff --git a/src/bindings/python/tests/test_transformations/test_public_transformations.py b/src/bindings/python/tests/test_transformations/test_public_transformations.py index 429bc6c192acc4..a10fea786b9770 100644 --- a/src/bindings/python/tests/test_transformations/test_public_transformations.py +++ b/src/bindings/python/tests/test_transformations/test_public_transformations.py @@ -17,7 +17,7 @@ ) from tests.test_transformations.utils.utils import count_ops, get_relu_model -from tests.utils.helpers import create_filename_for_test, compare_models +from tests.utils.helpers import create_filenames_for_ir, compare_models def get_model(): @@ -132,10 +132,10 @@ def test_low_latency2(): ) def test_serialize_pass(request, tmp_path, is_path_xml, is_path_bin): core = Core() - xml_path, bin_path = create_filename_for_test(request.node.name, - tmp_path, - is_path_xml, - is_path_bin) + xml_path, bin_path = create_filenames_for_ir(request.node.name, + tmp_path, + is_path_xml, + is_path_bin) model = get_relu_model() diff --git a/src/bindings/python/tests/utils/helpers.py b/src/bindings/python/tests/utils/helpers.py index 2ea00484e9840c..c14bd6e5e779da 100644 --- a/src/bindings/python/tests/utils/helpers.py +++ b/src/bindings/python/tests/utils/helpers.py @@ -303,7 +303,14 @@ def generate_abs_compiled_model_with_data(device, ov_type, numpy_dtype): return compiled_model, request, tensor1, array1 -def create_filename_for_test(test_name, tmp_path, is_xml_path=False, is_bin_path=False): +def create_filename_for_test(test_name): + python_version = str(sys.version_info.major) + "_" + str(sys.version_info.minor) + filename = test_name.replace("test_", "").replace("[", "_").replace("]", "_") + filename = filename + "_" + python_version + return filename + + +def create_filenames_for_ir(test_name, tmp_path, is_xml_path=False, is_bin_path=False): """Return a tuple with automatically generated paths for xml and bin files. :param test_name: Name used in generating. @@ -311,9 +318,7 @@ def create_filename_for_test(test_name, tmp_path, is_xml_path=False, is_bin_path :param is_bin_path: True if bin file should be pathlib.Path object, otherwise return string. :return: Tuple with two objects representing xml and bin files. """ - python_version = str(sys.version_info.major) + "_" + str(sys.version_info.minor) - filename = test_name.replace("test_", "").replace("[", "_").replace("]", "_") - filename = filename + "_" + python_version + filename = create_filename_for_test(test_name) path_to_xml = tmp_path / Path(filename + ".xml") path_to_bin = tmp_path / Path(filename + ".bin") _xml = path_to_xml if is_xml_path else str(path_to_xml) From 362ebe99aa07549976d38daec17d8ace4041f69f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Nov 2024 09:10:05 +0000 Subject: [PATCH 004/182] Bump codecov/codecov-action from 4.5.0 to 4.6.0 (#26882) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 4.5.0 to 4.6.0.
Release notes

Sourced from codecov/codecov-action's releases.

v4.6.0

What's Changed

... (truncated)

Commits
  • b9fd7d1 chore(release):4.6.0 (#1587)
  • 6f7612c fix: bump eslint parser deps (#1586)
  • 26c7e28 build(deps): bump actions/checkout from 4.1.7 to 4.2.0 (#1583)
  • 6f744f7 build(deps): bump github/codeql-action from 3.26.8 to 3.26.9 (#1584)
  • 543c3d4 chore: fix typo of OSS (#1578)
  • e379426 build(deps-dev): bump @​vercel/ncc from 0.38.1 to 0.38.2 (#1577)
  • 42656e4 build(deps): bump github/codeql-action from 3.26.7 to 3.26.8 (#1575)
  • 2296b6b build(deps-dev): bump eslint from 8.57.0 to 8.57.1 (#1571)
  • bd77bc3 build(deps): bump github/codeql-action from 3.26.6 to 3.26.7 (#1569)
  • 180b964 build(deps-dev): bump @​types/jest from 29.5.12 to 29.5.13 (#1567)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=codecov/codecov-action&package-manager=github_actions&previous-version=4.5.0&new-version=4.6.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
> **Note** > Automatic rebases have been disabled on this pull request as it has been open for over 30 days. Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index db5ba3de1a3c85..6cb0b2c5b6233c 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -138,6 +138,6 @@ jobs: lcov --capture --directory ${{ github.workspace }}/. --output-file coverage.info genhtml coverage.info --output-directory coverage-report - name: Collect coverage - uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673 # v4.5.0 + uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4.6.0 with: verbose: true From b6fe65f01ad71c9298fe658da5978371ffbc2e3c Mon Sep 17 00:00:00 2001 From: Alexandra Sidorova Date: Wed, 6 Nov 2024 13:12:15 +0400 Subject: [PATCH 005/182] [Snippets] Fixed LoopManager::update_loop_ports (#27300) ### Details: - *To remind, `LoopPort` is expression port connected to another expression port which is not in the same Loop. It's like entry (of exit) point of the Loop. It means that some expression port cannot be port of the Loop if all consumers (or sources) are from the same Loop. However, the method `LoopManager::update_loop_ports` sometimes creates these situation. This PR fixes this method. The screenshot below describes this situation: red loop is inner loop and blue loops is outer loop. However, some of output ports of this Loop is inside (green question sign) - invalid situation which is fixed by these changes.* image - *Added the corresponding checks to validate pass* - *Remove parts in `init_is_incremented` which handle invalid case by `is_incremented=false`.* ### Tickets: - *CVS-156299* --- .../snippets/lowered/pass/init_loops.hpp | 2 +- .../lowered/pass/validate_unified_loops.hpp | 7 +++ .../include/snippets/utils/loop_utils.hpp | 6 ++ .../snippets/src/lowered/loop_manager.cpp | 52 ++++++++++++------ .../snippets/src/lowered/pass/init_loops.cpp | 50 ++--------------- .../lowered/pass/validate_unified_loops.cpp | 55 +++++++++++++++---- src/common/snippets/src/utils/loop_utils.cpp | 9 +++ 7 files changed, 107 insertions(+), 74 deletions(-) diff --git a/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp b/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp index 169dbd30e35cc2..e94e1977974716 100644 --- a/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp @@ -25,7 +25,7 @@ class InitLoops : public Pass { bool run(LinearIR& linear_ir) override; private: - static void update_compile_parameters(const UnifiedLoopInfoPtr& loop_info, size_t loop_id); + static void update_compile_parameters(const UnifiedLoopInfoPtr& loop_info); }; } // namespace pass diff --git a/src/common/snippets/include/snippets/lowered/pass/validate_unified_loops.hpp b/src/common/snippets/include/snippets/lowered/pass/validate_unified_loops.hpp index 80c1b7be19d1f0..d78aaaa668363e 100644 --- a/src/common/snippets/include/snippets/lowered/pass/validate_unified_loops.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/validate_unified_loops.hpp @@ -6,6 +6,9 @@ #include "pass.hpp" +#include "snippets/lowered/loop_manager.hpp" + + namespace ov { namespace snippets { namespace lowered { @@ -27,6 +30,10 @@ class ValidateUnifiedLoops : public Pass { OPENVINO_RTTI("ValidateUnifiedLoops", "Pass") ValidateUnifiedLoops() = default; bool run(LinearIR& linear_ir) override; + +private: + static void validate_loop_infos(const LoopManagerPtr& loop_manager); + static void validate_loop_port_presence(const LinearIR& linear_ir); }; } // namespace pass diff --git a/src/common/snippets/include/snippets/utils/loop_utils.hpp b/src/common/snippets/include/snippets/utils/loop_utils.hpp index b0597008adc1c4..c9f9412ff5c574 100644 --- a/src/common/snippets/include/snippets/utils/loop_utils.hpp +++ b/src/common/snippets/include/snippets/utils/loop_utils.hpp @@ -21,6 +21,12 @@ void update_data_pointer_shifts(const ov::snippets::lowered::UnifiedLoopInfoPtr& * @brief Updates work amount and updates data pointer shifts of the provided "loop_info" */ void update_runtime_parameters(const ov::snippets::lowered::UnifiedLoopInfoPtr& loop_info); +/** + * @brief Check if the passed expression port should be port of the Loop with ID `loop_id`: + * the target expression port should be connected to an expression from another Loop (missed in the loop with ID `loop_id`), + */ +bool should_be_loop_port(const ov::snippets::lowered::ExpressionPort& port, size_t loop_id); + } // namespace utils } // namespace snippets } // namespace ov \ No newline at end of file diff --git a/src/common/snippets/src/lowered/loop_manager.cpp b/src/common/snippets/src/lowered/loop_manager.cpp index 21f4ecc83c57b0..f0e5306c5878bc 100644 --- a/src/common/snippets/src/lowered/loop_manager.cpp +++ b/src/common/snippets/src/lowered/loop_manager.cpp @@ -11,6 +11,7 @@ #include "openvino/core/graph_util.hpp" #include "openvino/core/type.hpp" +#include "snippets/utils/loop_utils.hpp" #include "snippets/itt.hpp" @@ -349,30 +350,45 @@ void LoopManager::fuse_loop_ports(std::vector& output_ports, } void LoopManager::update_loop_ports(const ExpressionPtr& expr) { - auto output_ports = expr->get_output_ports(); - for (size_t i = 0; i < expr->get_input_count(); ++i) { - const auto& source = expr->get_input_port_connector(i)->get_source(); - const auto common_outer_loop_ids = get_common_outer_loops(expr, source.get_expr()); - // The source output port can have several consumers (including the current expr) that can be potential output ports - // So we should verify on the possible future output ports - size_t count_of_common_outer_loops = common_outer_loop_ids.size(); - for (const auto& source_consumer : source.get_connected_ports()) { - if (source_consumer.get_expr() == expr) + auto update_ports = [&](const ov::snippets::lowered::ExpressionPort& connected_port) { + const auto is_output = connected_port.get_type() == ExpressionPort::Output; + // Iterate through all Loops of the connected expression + for (const auto& loop_id : connected_port.get_expr()->get_loop_ids()) { + const auto& loop_info = get_loop_info(loop_id); + // If the connected expression port is not Loop port - nothing to update + // If the target expression is not from the same Loop - nothing to update + if (!loop_info->is_loop_port(connected_port) || !is_loop_id_found(expr, loop_id)) continue; - count_of_common_outer_loops = std::min(count_of_common_outer_loops, get_common_outer_loops(source.get_expr(), source_consumer.get_expr()).size()); - } - replace_loop_ports({common_outer_loop_ids.cbegin(), common_outer_loop_ids.cbegin() + count_of_common_outer_loops}, source, output_ports); - // Save previous port - if (count_of_common_outer_loops != common_outer_loop_ids.size()) { - output_ports.insert(output_ports.begin(), source); - replace_loop_ports({common_outer_loop_ids.cbegin() + count_of_common_outer_loops, common_outer_loop_ids.cend()}, source, output_ports); + + std::vector new_ports; + // Check if some ports of target expression must be Loop port + const auto target_expr_ports = is_output ? expr->get_output_ports() : expr->get_input_ports(); + for (const auto& port : target_expr_ports) { + if (utils::should_be_loop_port(port, loop_id)) + new_ports.push_back(port); + } + // Leave the connected expression port as Loop port if needed + if (utils::should_be_loop_port(connected_port, loop_id)) + new_ports.push_back(connected_port); + + // Nothing should be updated + if (new_ports.size() == 1 && new_ports.front() == connected_port) + continue; + + loop_info->replace_with_new_ports(connected_port, new_ports); } + }; + + // The case with parent loops: source -> target expr + for (size_t i = 0; i < expr->get_input_count(); ++i) { + update_ports(expr->get_input_port_connector(i)->get_source()); } - const auto input_ports = expr->get_input_ports(); + + // The case with child loops: target expr -> consumers for (size_t i = 0; i < expr->get_output_count(); ++i) { const auto& consumers = expr->get_output_port_connector(i)->get_consumers(); for (const auto& consumer : consumers) { - replace_loop_ports(get_common_outer_loops(expr, consumer.get_expr()), consumer, input_ports); + update_ports(consumer); } } } diff --git a/src/common/snippets/src/lowered/pass/init_loops.cpp b/src/common/snippets/src/lowered/pass/init_loops.cpp index aa7d0ab042e1a4..69d336094f1a14 100644 --- a/src/common/snippets/src/lowered/pass/init_loops.cpp +++ b/src/common/snippets/src/lowered/pass/init_loops.cpp @@ -16,46 +16,10 @@ namespace lowered { namespace pass { namespace { -inline void init_is_incremented(LoopPort& port, size_t loop_id) { +inline void init_is_incremented(LoopPort& port) { const auto& expr = port.expr_port->get_expr(); - const auto& expr_loops = expr->get_loop_ids(); if (!std::dynamic_pointer_cast(expr->get_node())) { port.is_incremented = false; - } else if (expr_loops.back() != loop_id) { - // Note: LoopPort connected to Buffer between two loops should not be incremented in the outermost loop - // Consider the example below: - // Store; Loop ids [0,1,2,3] - // Buffer; Loop ids [0,1] - // Load; Loop ids [0,1,4,5] - // Store is output port of Loop-1, but it should be incremented only in Loop-2 and Loop-3. Similar with Load. - auto is_ignored = [=](const ExpressionPtr& target_expr) { - if (ov::is_type(target_expr)) { - const auto& target_loops = target_expr->get_loop_ids(); - const auto i_max = std::min(expr_loops.size(), target_loops.size()); - for (size_t i = 0; i < i_max && expr_loops[i] == target_loops[i]; i++) { - if (target_loops[i] == loop_id) - return true; - } - } - return false; - }; - if (port.expr_port->get_type() == ExpressionPort::Type::Output) { - const auto& out_connector = expr->get_output_port_connector(port.expr_port->get_index()); - for (const auto& consumer : out_connector->get_consumers()) { - if (is_ignored(consumer.get_expr())) { - port.is_incremented = false; - return; - } - } - } else if (port.expr_port->get_type() == ExpressionPort::Type::Input) { - const auto& in_connector = expr->get_input_port_connector(port.expr_port->get_index()); - if (is_ignored(in_connector->get_source().get_expr())) { - port.is_incremented = false; - return; - } - } else { - OPENVINO_THROW("Unexpected LoopPort type"); - } } } @@ -71,11 +35,11 @@ inline int64_t get_data_size(const LoopPort& loop_port) { } } // namespace -void InitLoops::update_compile_parameters(const UnifiedLoopInfoPtr& loop_info, size_t loop_id) { +void InitLoops::update_compile_parameters(const UnifiedLoopInfoPtr& loop_info) { OPENVINO_ASSERT(loop_info != nullptr, "UnifiedLoopInfo is nullptr, nothing to update"); loop_info->iterate_through_infos( - [loop_id](LoopPort& loop_port, UnifiedLoopInfo::LoopPortDesc& ptr_shifts_params) { - init_is_incremented(loop_port, loop_id); + [](LoopPort& loop_port, UnifiedLoopInfo::LoopPortDesc& ptr_shifts_params) { + init_is_incremented(loop_port); ptr_shifts_params.data_size = get_data_size(loop_port); }); } @@ -85,12 +49,10 @@ bool InitLoops::run(LinearIR& linear_ir) { if (linear_ir.empty()) return false; - const auto& loop_manager = linear_ir.get_loop_manager(); - const auto& loops = loop_manager->get_map(); + const auto& loops = linear_ir.get_loop_manager()->get_map(); for (const auto& loop : loops) { - const auto& loop_id = loop.first; const auto& loop_info = ov::as_type_ptr(loop.second); - update_compile_parameters(loop_info, loop_id); + update_compile_parameters(loop_info); ov::snippets::utils::update_runtime_parameters(loop_info); } diff --git a/src/common/snippets/src/lowered/pass/validate_unified_loops.cpp b/src/common/snippets/src/lowered/pass/validate_unified_loops.cpp index bdfb8896405847..ec43f02d28792f 100644 --- a/src/common/snippets/src/lowered/pass/validate_unified_loops.cpp +++ b/src/common/snippets/src/lowered/pass/validate_unified_loops.cpp @@ -7,6 +7,7 @@ #include "snippets/itt.hpp" #include "snippets/lowered/linear_ir.hpp" #include "snippets/lowered/loop_manager.hpp" +#include "snippets/utils/loop_utils.hpp" #include "snippets/utils/utils.hpp" namespace ov { @@ -14,14 +15,7 @@ namespace snippets { namespace lowered { namespace pass { -bool ValidateUnifiedLoops::run(LinearIR& linear_ir) { - OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::ValidateUnifiedLoops") - if (linear_ir.empty()) - return false; - - const auto& loop_manager = linear_ir.get_loop_manager(); - const auto& loops = loop_manager->get_map(); - +void ValidateUnifiedLoops::validate_loop_infos(const LoopManagerPtr& loop_manager) { // Already validated vectors of Loop IDs std::set> validated_nested_loops; auto is_already_verified = [&validated_nested_loops](const std::vector& ids) { @@ -66,10 +60,9 @@ bool ValidateUnifiedLoops::run(LinearIR& linear_ir) { validated_nested_loops.insert(loop_ids); }; - for (const auto& pair : loops) { + for (const auto& pair : loop_manager->get_map()) { const auto& loop_info = ov::as_type_ptr(pair.second); - OPENVINO_ASSERT(loop_info, - "ValidateUnifiedLoops expects only UnifiedLoopInfo in LoopManager"); + OPENVINO_ASSERT(loop_info, "ValidateUnifiedLoops expects only UnifiedLoopInfo in LoopManager"); loop_info->iterate_through_ports(validate_loop_port); // Validate that iteration dimnsion is broadcastable @@ -88,6 +81,46 @@ bool ValidateUnifiedLoops::run(LinearIR& linear_ir) { OPENVINO_ASSERT(unique_dimensions.size() <= 1, "Loop ports have incompatible dimensions, by which the loop iterates"); } +} + +void ValidateUnifiedLoops::validate_loop_port_presence(const LinearIR& linear_ir) { + auto validate_loop_port = [](const ExpressionPort& expr_port, const LoopInfoPtr& loop_info, size_t loop_id) { + if (utils::should_be_loop_port(expr_port, loop_id)) { + OPENVINO_ASSERT(loop_info->is_loop_port(expr_port), + "Expression port with idx ", expr_port.get_index(), " with node ", + expr_port.get_expr()->get_node()->get_friendly_name(), " is not Loop port but should be!"); + } else { + OPENVINO_ASSERT(!loop_info->is_loop_port(expr_port), + "Expression port with idx ", expr_port.get_index(), " with node ", + expr_port.get_expr()->get_node()->get_friendly_name(), " is Loop port but should not be!"); + } + }; + + const auto& loop_manager = linear_ir.get_loop_manager(); + for (const auto& expr : linear_ir) { + const auto& op = expr->get_node(); + if (ov::is_type(op)) + continue; + + for (const auto& loop_id : expr->get_loop_ids()) { + const auto& loop_info = loop_manager->get_loop_info(loop_id); + + for (size_t i = 0; i < expr->get_input_count(); ++i) + validate_loop_port(expr->get_input_port(i), loop_info, loop_id); + + for (size_t i = 0; i < expr->get_output_count(); ++i) + validate_loop_port(expr->get_output_port(i), loop_info, loop_id); + } + } +} + +bool ValidateUnifiedLoops::run(LinearIR& linear_ir) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::ValidateUnifiedLoops") + if (linear_ir.empty()) + return false; + + validate_loop_infos(linear_ir.get_loop_manager()); + validate_loop_port_presence(linear_ir); return true; } diff --git a/src/common/snippets/src/utils/loop_utils.cpp b/src/common/snippets/src/utils/loop_utils.cpp index dabd129fce451d..3d6b274c7613a8 100644 --- a/src/common/snippets/src/utils/loop_utils.cpp +++ b/src/common/snippets/src/utils/loop_utils.cpp @@ -82,6 +82,15 @@ void update_runtime_parameters(const UnifiedLoopInfoPtr& loop_info) { update_data_pointer_shifts(loop_info); } +bool should_be_loop_port(const ov::snippets::lowered::ExpressionPort& port, size_t loop_id) { + const auto& connected_ports = port.get_connected_ports(); + return std::any_of(connected_ports.cbegin(), connected_ports.cend(), + [&](const ExpressionPort& connected_port) { + const auto& loops = connected_port.get_expr()->get_loop_ids(); + return std::find(loops.cbegin(), loops.cend(), loop_id) == loops.cend(); + }); +} + } // namespace utils } // namespace snippets } // namespace ov \ No newline at end of file From d8befa1a5f6f803a711b356c5b0b2ea93f9ada53 Mon Sep 17 00:00:00 2001 From: Roman Lyamin Date: Wed, 6 Nov 2024 14:22:21 +0400 Subject: [PATCH 006/182] [GPU] Added convolution_gpu_b_fs_zyx_fsv16_imad shape agnostic kernel (#26404) ### Tickets: - *[148550](https://jira.devtools.intel.com/browse/CVS-148550)* --- .../src/graph/impls/ocl/convolution.cpp | 4 + .../src/graph/impls/ocl/convolution.hpp | 3 +- src/plugins/intel_gpu/src/graph/program.cpp | 25 +--- .../convolution_gpu_b_fs_zyx_fsv16_imad.cl | 7 +- .../intel_gpu/src/kernel_selector/jitter.cpp | 3 +- ...convolution_kernel_b_fs_zyx_fsv16_imad.cpp | 68 +++++++-- .../convolution_kernel_b_fs_zyx_fsv16_imad.h | 17 +-- .../convolution/convolution_kernel_base.h | 15 ++ .../unit/test_cases/convolution_gpu_test.cpp | 138 ++++++++++++++++++ 9 files changed, 233 insertions(+), 47 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.cpp index e8043fa9fe90a9..cda7d8f1a4cedc 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.cpp @@ -32,6 +32,10 @@ struct convolution_impl : typed_primitive_impl_ocl { if (is_dynamic()) { auto& kernel_selector = kernel_selector_t::Instance(); auto kernel_impl = kernel_selector.GetImplementation(_kernel_data.kernelName); + + const kernel_impl_params* impl_params = reinterpret_cast(ib.getKernelImplParams()); + _kernel_data.params = std::make_shared(get_kernel_params(*impl_params, true)); + kernel_impl->GetUpdateDispatchDataFunc(_kernel_data); } } diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.hpp b/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.hpp index 69ef9f0f8a2a7c..1771da5a5a63ba 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.hpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.hpp @@ -55,7 +55,8 @@ struct ConvolutionImplementationManager : public ImplementationManager { static const std::vector supported_dyn_formats = { format::bfyx, format::bfzyx, - format::b_fs_yx_fsv16 + format::b_fs_yx_fsv16, + format::b_fs_zyx_fsv16 }; if (!one_of(input_fmt.value, supported_dyn_formats) || !one_of(output_fmt.value, supported_dyn_formats)) diff --git a/src/plugins/intel_gpu/src/graph/program.cpp b/src/plugins/intel_gpu/src/graph/program.cpp index 07fad4873659cd..7a66d32795c17c 100644 --- a/src/plugins/intel_gpu/src/graph/program.cpp +++ b/src/plugins/intel_gpu/src/graph/program.cpp @@ -1796,15 +1796,9 @@ void program::save(cldnn::BinaryOutputBuffer& ob) const { for (auto& impl_id : impl_ids) { std::string type_name = get_node_ptr(impl_id)->get_selected_impl()->m_manager->get_type_info().name; ob << type_name; - if (get_node_ptr(impl_id)->get_selected_impl()->is_onednn()) { - ob << true; - auto params = get_node_ptr(impl_id)->get_kernel_impl_params(); - ob.setKernelImplParams(params.get()); - ob << get_node_ptr(impl_id)->selected_impl; - } else { - ob << false; - ob << get_node_ptr(impl_id)->selected_impl; - } + auto params = get_node_ptr(impl_id)->get_kernel_impl_params(); + ob.setKernelImplParams(params.get()); + ob << get_node_ptr(impl_id)->selected_impl; ob << get_node_ptr(impl_id)->get_selected_impl()->get_cached_kernel_ids(kernels_cache); } } @@ -1930,15 +1924,10 @@ void program::load(cldnn::BinaryInputBuffer& ib) { ib >> type_name; ov::DiscreteTypeInfo type(type_name.c_str()); auto impl_manager = p_node.type()->get(type); - bool is_onednn; - ib >> is_onednn; - if (is_onednn) { - auto params = p_node.get_kernel_impl_params(); - ib.setKernelImplParams(params.get()); - ib >> p_node.selected_impl; - } else { - ib >> p_node.selected_impl; - } + + auto params = p_node.get_kernel_impl_params(); + ib.setKernelImplParams(params.get()); + ib >> p_node.selected_impl; p_node.selected_impl->m_manager = impl_manager.get(); diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_b_fs_zyx_fsv16_imad.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_b_fs_zyx_fsv16_imad.cl index fc5f2c18fe7efb..07486c1b9a1498 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_b_fs_zyx_fsv16_imad.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_b_fs_zyx_fsv16_imad.cl @@ -54,6 +54,7 @@ REQD_SUB_GROUP_SIZE(SIMD) __attribute__((reqd_work_group_size(1, 1, FEATURE_SLM_SPLIT * SIMD))) KERNEL(convolution_gpu_b_fs_zyx_fsv16_imad)( + OPTIONAL_SHAPE_INFO_ARG const __global INPUT0_TYPE *conv_input, __global OUTPUT_TYPE *output, const __global FILTER_TYPE *weights @@ -606,11 +607,15 @@ KERNEL(convolution_gpu_b_fs_zyx_fsv16_imad)( __attribute__((opencl_unroll_hint(OUT_BLOCK_WIDTH))) for (uint ow = 0; ow < OUT_BLOCK_WIDTH; ow++) { + #if !IS_DYNAMIC #if OUTPUT_SIZE_X % OUT_BLOCK_WIDTH != 0 if (out_x + OUT_BLOCK_WIDTH > OUTPUT_SIZE_X && ow >= OUTPUT_SIZE_X % OUT_BLOCK_WIDTH) break; #endif - + #else + if (OUTPUT_SIZE_X % OUT_BLOCK_WIDTH != 0 && out_x + OUT_BLOCK_WIDTH > OUTPUT_SIZE_X && ow >= OUTPUT_SIZE_X % OUT_BLOCK_WIDTH) + break; + #endif if (out_f_g < FILTER_OFM_NUM) { output[dst_index + ow * FSV + oh * OUTPUT_Y_PITCH * FSV + od * OUTPUT_Z_PITCH * FSV] = result[ofb][od][oh][ow]; } diff --git a/src/plugins/intel_gpu/src/kernel_selector/jitter.cpp b/src/plugins/intel_gpu/src/kernel_selector/jitter.cpp index 33d13429fdcf3f..716e64937ec9e5 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/jitter.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/jitter.cpp @@ -364,7 +364,8 @@ JitDefinitions DataTensorJitConstant::GetDefinitions() const { if (_tensor.GetLayout() == DataLayout::bf || _tensor.GetLayout() == DataLayout::bfyx || _tensor.GetLayout() == DataLayout::bfzyx || _tensor.GetLayout() == DataLayout::bfwzyx || _tensor.GetLayout() == DataLayout::bfuwzyx || _tensor.GetLayout() == DataLayout::bfvuwzyx || - _tensor.GetLayout() == DataLayout::b_fs_yx_fsv16 || _tensor.GetLayout() == DataLayout::b_fs_yx_fsv32) { + _tensor.GetLayout() == DataLayout::b_fs_yx_fsv16 || _tensor.GetLayout() == DataLayout::b_fs_yx_fsv32 || + _tensor.GetLayout() == DataLayout::b_fs_zyx_fsv16) { definitions.push_back({_name + "_X_PITCH", "1"}); definitions.push_back({_name + "_Y_PITCH", dims_padded.x()}); definitions.push_back({_name + "_Z_PITCH", toVectorMulString({dims_padded.x(), dims_padded.y()})}); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.cpp index 38ed9e42d3718f..352470913e4751 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.cpp @@ -55,7 +55,10 @@ namespace kernel_selector { Convolution_kernel_b_fs_zyx_fsv16_imad::BlockParams Convolution_kernel_b_fs_zyx_fsv16_imad::GetBlockParams(const convolution_params& params) const { - size_t max_block_width = getOutBlock_X(params.outputs[0].X().v, params.stride.x, params.filterSize.x, params.dilation.x); + size_t max_block_width = 1; + if (!params.outputs[0].X().is_dynamic) { + max_block_width = getOutBlock_X(params.outputs[0].X().v, params.stride.x, params.filterSize.x, params.dilation.x); + } size_t max_in_block_width = (max_block_width - 1) * params.stride.x + (params.filterSize.x - 1) * params.dilation.x + 1; size_t block_width = max_block_width; @@ -90,7 +93,9 @@ Convolution_kernel_b_fs_zyx_fsv16_imad::GetBlockParams(const convolution_params& size_t max_slm_split = params.engineInfo.maxWorkGroupSize / simd; // TGLU exceptions related to SLM usage - if (params.engineInfo.deviceType == dev_type::integrated_gpu && params.engineInfo.computeUnitsCount == 96) { + if (params.is_shape_agnostic) { + max_slm_split = 2; + } else if (params.engineInfo.deviceType == dev_type::integrated_gpu && params.engineInfo.computeUnitsCount == 96) { bool split_exception_1 = params.outputs[0].X().v == 3 && params.outputs[0].Y().v == 3 && params.outputs[0].Z().v == 1 && params.outputs[0].Feature().v == 512; bool split_exception_2 = params.outputs[0].X().v == 5 && params.outputs[0].Y().v == 5 && params.outputs[0].Z().v == 1 @@ -118,13 +123,16 @@ Convolution_kernel_b_fs_zyx_fsv16_imad::GetBlockParams(const convolution_params& } } + size_t max_d = params.outputs[0].Z().is_dynamic ? 1 : 16; + size_t max_h = params.outputs[0].Y().is_dynamic ? 1 : 16; + for (size_t split = 1; split <= max_slm_split; split *= 2) { for (size_t temp_block_features = simd; temp_block_features <= simd * 2; temp_block_features += simd) { - for (size_t d = 1; d < 16; ++d) { - if (params.outputs[0].Z().v % d) + for (size_t d = 1; d < max_d; ++d) { + if (d != 1 && params.outputs[0].Z().v % d) continue; - for (size_t h = 1; h < 16; ++h) { - if (params.outputs[0].Y().v % h) + for (size_t h = 1; h < max_h; ++h) { + if (h != 1 && params.outputs[0].Y().v % h) continue; bool c_ifm_mul = CeilDiv(params.weights.IFM().v, fsv) % split == 0; @@ -174,6 +182,10 @@ Convolution_kernel_b_fs_zyx_fsv16_imad::GetBlockParams(const convolution_params& } float Convolution_kernel_b_fs_zyx_fsv16_imad::EstimateBlockParamsRatio(const convolution_params& params, const BlockParams& block) const { + if (params.has_dynamic_outputs()) { + return -10.f; + } + float occupancy_by_logic_size = static_cast(params.outputs[0].LogicalSize() / static_cast(params.engineInfo.maxThreadsPerDevice)); bool increase_max_reg_pressure = occupancy_by_logic_size >= 595.f; bool twice_increase_max_reg_pressure = occupancy_by_logic_size >= 595.f * 2.f; @@ -373,6 +385,7 @@ ParamsKey Convolution_kernel_b_fs_zyx_fsv16_imad::GetSupportedKey() const { k.EnableQuantization(QuantizationType::SYMMETRIC); k.EnableQuantization(QuantizationType::ASYMMETRIC_DATA); k.EnableDilation(); + k.EnableDynamicShapesSupport(); return k; } @@ -450,10 +463,15 @@ JitConstants Convolution_kernel_b_fs_zyx_fsv16_imad::GetJitConstants(const convo ConvolutionKernelBase::DispatchData Convolution_kernel_b_fs_zyx_fsv16_imad::SetDefault(const convolution_params& params, int) const { + const BlockParams& block_params = GetBlockParams(params); + return CalcDispatchDataWithBlockParams(params, block_params); +} // SetDefault + +ConvolutionKernelBase::DispatchData Convolution_kernel_b_fs_zyx_fsv16_imad::CalcDispatchDataWithBlockParams(const convolution_params& params, + const BlockParams& block_params) const { DispatchData dispatchData; const auto& output = params.outputs[0]; const auto& weights = params.weights; - auto block_params = GetBlockParams(params); dispatchData.gws[0] = CeilDiv(output.X().v, block_params.output_block_width); dispatchData.gws[1] = CeilDiv(output.Y().v, block_params.output_block_height) * CeilDiv(output.Z().v, block_params.output_block_depth); @@ -466,17 +484,24 @@ ConvolutionKernelBase::DispatchData Convolution_kernel_b_fs_zyx_fsv16_imad::SetD dispatchData.cldnnStyle = {0, 0, 0, 0, 0}; dispatchData.gemmStyle = {0, 0, 0, 0, 0, 0}; - + dispatchData.blockParams = { block_params.output_block_width, block_params.output_block_height, + block_params.output_block_depth, block_params.output_block_features, + block_params.input_block_width, block_params.input_block_height, + block_params.input_block_depth, block_params.feature_slm_split }; return dispatchData; -} // SetDefault +} KernelsPriority Convolution_kernel_b_fs_zyx_fsv16_imad::GetKernelsPriority(const Params& params) const { const auto& p = static_cast(params); - if (static_cast(p.weights.IFM().v) / static_cast(Align(p.weights.IFM().v, fsv)) < 0.5f) + if (!p.is_shape_agnostic) { + if (static_cast(p.weights.IFM().v) / static_cast(Align(p.weights.IFM().v, fsv)) < 0.5f) + return FORCE_PRIORITY_4; + else + return FORCE_PRIORITY_2; + } else { return FORCE_PRIORITY_4; - else - return FORCE_PRIORITY_2; + } } bool Convolution_kernel_b_fs_zyx_fsv16_imad::Validate(const Params& params) const { @@ -507,4 +532,23 @@ bool Convolution_kernel_b_fs_zyx_fsv16_imad::Validate(const Params& params) cons return true; } + +void Convolution_kernel_b_fs_zyx_fsv16_imad::GetUpdateDispatchDataFunc(KernelData& kd) const { + const auto& prim_params = static_cast(*kd.params); + const auto& dynamicDispatchData = SetDefault(prim_params); + + kd.update_dispatch_data_func = [this, dynamicDispatchData](const Params& params, KernelData& kd) { + const auto& prim_params = static_cast(params); + const auto& dispatchData = CalcDispatchDataWithBlockParams(prim_params, dynamicDispatchData.blockParams); + OPENVINO_ASSERT(kd.kernels.size() == 1, "[GPU] Invalid kernels size for update dispatch data func"); + kd.kernels[0].params.workGroups.global = dispatchData.gws; + kd.kernels[0].params.workGroups.local = dispatchData.lws; + kd.kernels[0].skip_execution = KernelData::SkipKernelExecution(prim_params); + + kd.internalBufferSizes.clear(); + kd.internalBufferSizes.push_back(prim_params.inputs[0].PhysicalSizeInBytes()); + kd.internalBufferDataType = prim_params.inputs[0].GetDType(); + }; +} + } // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.h index c2da5e66982d11..adb5125873a657 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.h +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_b_fs_zyx_fsv16_imad.h @@ -12,6 +12,7 @@ namespace kernel_selector { class Convolution_kernel_b_fs_zyx_fsv16_imad : public ConvolutionKernelBase { public: using Parent = ConvolutionKernelBase; + using BlockParams = DispatchData::BlockParams; Convolution_kernel_b_fs_zyx_fsv16_imad() : ConvolutionKernelBase("convolution_gpu_b_fs_zyx_fsv16_imad") {} virtual ~Convolution_kernel_b_fs_zyx_fsv16_imad() {} @@ -24,6 +25,7 @@ class Convolution_kernel_b_fs_zyx_fsv16_imad : public ConvolutionKernelBase { bool Validate(const Params& params) const override; JitConstants GetJitConstants(const convolution_params& params, const DispatchData& dispatchData) const override; DispatchData SetDefault(const convolution_params& params, int autoTuneIndex = -1) const override; + void GetUpdateDispatchDataFunc(KernelData& kd) const override; bool NeedPaddedInput() const override { return true; } WeightsLayout GetPreferredWeightsLayout(const convolution_params& p) const override { return p.groups > 1 ? WeightsLayout::g_os_is_zyx_osv16_isv16 : WeightsLayout::os_is_zyx_osv16_isv16; @@ -35,24 +37,11 @@ class Convolution_kernel_b_fs_zyx_fsv16_imad : public ConvolutionKernelBase { FusedOpType::ACTIVATION }; } - struct BlockParams { - size_t output_block_width; - size_t output_block_height; - size_t output_block_depth; - - size_t output_block_features; - - size_t input_block_width; - size_t input_block_height; - size_t input_block_depth; - - size_t feature_slm_split; - }; - BlockParams GetBlockParams(const convolution_params& params) const; float EstimateBlockParamsRatio(const convolution_params& params, const BlockParams& block) const; float EstimateRegPressure(const convolution_params& params, const BlockParams& block) const; float EstimateOccupancy(const convolution_params& params, const BlockParams& block) const; float EstimateSLMUsage(const convolution_params& params, const BlockParams& block) const; + DispatchData CalcDispatchDataWithBlockParams(const convolution_params& params, const BlockParams& block_params) const; }; } // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_base.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_base.h index f8d1b3bf0de956..ccdd4941cdd1d0 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_base.h +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/convolution/convolution_kernel_base.h @@ -36,8 +36,23 @@ class ConvolutionKernelBase : public WeightBiasKernelBase { size_t globalWorkSizeDZ; }; + struct BlockParams { + size_t output_block_width; + size_t output_block_height; + size_t output_block_depth; + + size_t output_block_features; + + size_t input_block_width; + size_t input_block_height; + size_t input_block_depth; + + size_t feature_slm_split; + }; + CLDNNStyle cldnnStyle; GEMMStyle gemmStyle; + BlockParams blockParams; }; std::string GetAutoTuneOptions(int autoTuneIndex) const; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp index 4f9c31064e9026..5d01d448dcfc64 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp @@ -11056,3 +11056,141 @@ INSTANTIATE_TEST_SUITE_P(smoke, conv_dyn_test, { ov::Shape{1, 32, 91}, ov::Shape{32, 1, 1, 11}, ov::Strides{1}, ov::Strides{1}, ov::CoordinateDiff{0}, ov::CoordinateDiff{0}, 32 }, { ov::Shape{1, 64, 16, 16}, ov::Shape{64, 1, 1, 3, 3}, ov::Strides{1, 1}, ov::Strides{1, 1}, ov::CoordinateDiff{0, 0}, ov::CoordinateDiff{0, 0}, 64 }, })); + + +struct conv_dyn_3d_test_params { + ov::Shape in_shape; + ov::Shape wei_shape; + ov::Strides stride; + ov::Strides dilation; + ov::CoordinateDiff pad_begin; + ov::CoordinateDiff pad_end; + uint32_t groups; + bool is_caching_test; +}; + +class conv_dyn_3d_test : public testing::TestWithParam {}; + +TEST_P(conv_dyn_3d_test, convolution_gpu_b_fs_zyx_fsv16_imad_quantized) { + auto& engine = get_test_engine(); + auto p = GetParam(); + + auto calculate_ref = [&](memory::ptr input, memory::ptr weights, + memory::ptr a_zp, memory::ptr compensation, ExecutionConfig config) { + auto in_layout = input->get_layout(); + + topology topology_ref( + input_layout("input", in_layout), + data("weights", weights), + data("a_zp", a_zp), + data("compensation", compensation), + convolution("conv", input_info("input"), "weights", no_bias, "", "a_zp", "compensation", + p.groups, p.stride, p.dilation, p.pad_begin, p.pad_end, false, data_types::f32)); + + network network_ref(engine, topology_ref, config); + network_ref.set_input_data("input", input); + + auto outputs_ref = network_ref.execute(); + + return outputs_ref.at("conv").get_memory(); + }; + + ov::PartialShape dyn_input_pshape; + for (size_t i = 0; i < p.in_shape.size(); ++i) { + dyn_input_pshape.emplace_back(ov::Dimension()); + } + dyn_input_pshape[1] = p.in_shape[1]; + + auto in_layout = layout{dyn_input_pshape, data_types::u8, format::b_fs_zyx_fsv16}; + auto input = engine.allocate_memory({ p.in_shape, data_types::u8, format::b_fs_zyx_fsv16 }); + auto weights = engine.allocate_memory({p.wei_shape, data_types::i8, format::bfzyx}); + + auto a_zp_shape = ov::Shape(p.in_shape.size(), 1); + a_zp_shape[1] = p.in_shape[1]; + auto a_zp = engine.allocate_memory({ a_zp_shape, data_types::u8, format::bfyx }); + + auto compensation = engine.allocate_memory({ a_zp_shape, data_types::f32, format::bfyx }); + + tests::random_generator rg(GET_SUITE_NAME); + VF input_rnd = rg.generate_random_1d(ov::shape_size(p.in_shape), 0, 10); + VF weights_rnd = rg.generate_random_1d(ov::shape_size(p.wei_shape), -5, 5); + VF a_zp_rnd = rg.generate_random_1d(ov::shape_size(a_zp_shape), 1, 5); + VF compensation_rnd = rg.generate_random_1d(ov::shape_size(a_zp_shape), -5, 5); + + set_values(input, input_rnd); + set_values(weights, weights_rnd); + set_values(a_zp, a_zp_rnd); + set_values(compensation, compensation_rnd); + + topology topology( + input_layout("input", in_layout), + data("weights", weights), + data("a_zp", a_zp), + data("compensation", compensation), + convolution("conv", input_info("input"), "weights", no_bias, "", "a_zp", "compensation", + p.groups, p.stride, p.dilation, p.pad_begin, p.pad_end, false, data_types::f32)); + + ExecutionConfig config = get_test_default_config(engine); + ov::intel_gpu::ImplementationDesc conv_impl = { format::b_fs_zyx_fsv16, "convolution_gpu_b_fs_zyx_fsv16_imad", impl_types::ocl }; + config.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{ { "conv", conv_impl } })); + config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); + config.set_property(ov::enable_profiling(true)); + + cldnn::network::ptr network = get_network(engine, topology, config, get_test_stream_ptr(), p.is_caching_test); + network->set_input_data("input", input); + + auto inst = network->get_primitive("conv"); + auto impl = inst->get_impl(); + ASSERT_TRUE(impl != nullptr); + ASSERT_TRUE(impl->is_dynamic()); + + auto outputs = network->execute(); + + auto output_memory = outputs.at("conv").get_memory(); + + auto output_memory_ref = calculate_ref(input, weights, a_zp, compensation, config); + + cldnn::mem_lock output_ptr(output_memory, get_test_stream()); + cldnn::mem_lock output_ptr_ref(output_memory_ref, get_test_stream()); + + ASSERT_EQ(outputs.at("conv").get_layout(), output_memory_ref->get_layout()); + for (size_t i = 0; i < output_ptr.size(); i++) { + ASSERT_EQ(output_ptr[i], output_ptr_ref[i]); + } + + { + // Change original shape for the second run + auto new_shape = p.in_shape; + new_shape[2] += 4; + + auto input = engine.allocate_memory({ new_shape, data_types::u8, format::b_fs_zyx_fsv16 }); + + VF input_rnd = rg.generate_random_1d(ov::shape_size(p.in_shape), 0, 10); + set_values(input, input_rnd); + + network->set_input_data("input", input); + auto outputs = network->execute(); + + auto output_memory = outputs.at("conv").get_memory(); + auto output_memory_ref = calculate_ref(input, weights, a_zp, compensation, config); + + cldnn::mem_lock output_ptr(output_memory, get_test_stream()); + cldnn::mem_lock output_ptr_ref(output_memory_ref, get_test_stream()); + + ASSERT_EQ(outputs.at("conv").get_layout(), output_memory_ref->get_layout()); + for (size_t i = 0; i < output_ptr.size(); i++) { + ASSERT_EQ(output_ptr[i], output_ptr_ref[i]); + } + } +} + +INSTANTIATE_TEST_SUITE_P(smoke, conv_dyn_3d_test, + testing::ValuesIn(std::vector{ + { ov::Shape{1, 16, 5, 5, 5}, ov::Shape{2, 16, 3, 3, 3}, ov::Strides{1, 1, 1}, ov::Strides{1, 1, 1}, ov::CoordinateDiff{0, 0, 0}, ov::CoordinateDiff{0, 0, 0}, 1, false}, + { ov::Shape{1, 16, 5, 5, 5}, ov::Shape{2, 16, 3, 3, 3}, ov::Strides{1, 1, 1}, ov::Strides{1, 1, 1}, ov::CoordinateDiff{0, 0, 0}, ov::CoordinateDiff{0, 0, 0}, 1, true}, + { ov::Shape{2, 32, 30, 30, 30}, ov::Shape{16, 32, 10, 10, 10}, ov::Strides{1, 1, 1}, ov::Strides{1, 1, 1}, ov::CoordinateDiff{0, 0, 0}, ov::CoordinateDiff{0, 0, 0}, 1, false }, + { ov::Shape{2, 32, 30, 30, 30}, ov::Shape{16, 32, 10, 10, 10}, ov::Strides{1, 1, 1}, ov::Strides{1, 1, 1}, ov::CoordinateDiff{0, 0, 0}, ov::CoordinateDiff{0, 0, 0}, 1, true }, + { ov::Shape{1, 16, 5, 5, 5}, ov::Shape{2, 16, 3, 3, 3}, ov::Strides{2, 2, 2}, ov::Strides{1, 1, 1}, ov::CoordinateDiff{0, 0, 0}, ov::CoordinateDiff{0, 0, 0}, 1, false }, + { ov::Shape{1, 16, 5, 5, 5}, ov::Shape{2, 16, 3, 3, 3}, ov::Strides{1, 1, 1}, ov::Strides{1, 1, 1}, ov::CoordinateDiff{1, 1, 1}, ov::CoordinateDiff{1, 1, 1}, 1, false }, + { ov::Shape{1, 16, 5, 5, 5}, ov::Shape{16, 1, 1, 3, 3, 3}, ov::Strides{1, 1, 1}, ov::Strides{1, 1, 1}, ov::CoordinateDiff{0, 0, 0}, ov::CoordinateDiff{0, 0, 0}, 16, false } +})); From 05c365c5e4fdc78a5d7137963fad233c36da972e Mon Sep 17 00:00:00 2001 From: Przemyslaw Wysocki Date: Wed, 6 Nov 2024 12:00:55 +0100 Subject: [PATCH 007/182] [PyOV] Migrate benchmark_app from `setup.py` to `pyproject.toml` (#27305) ### Details: - Added `--no-deps` because `pip wheel .` with `pyproject.toml` would save the `numpy` wheel in cwd - Since requirements are added to the benchmark_app wheel itself, when installing it they'll be pulled from PyPI anyway ### Tickets: - CVS-156008 --------- Signed-off-by: p-wysocki Co-authored-by: Anastasia Kuporosova Co-authored-by: Michal Lukaszewski --- tools/benchmark_tool/pyproject.toml | 31 +++++++++++++++++++ tools/benchmark_tool/requirements.txt | 1 - tools/benchmark_tool/setup.py | 43 --------------------------- 3 files changed, 31 insertions(+), 44 deletions(-) create mode 100644 tools/benchmark_tool/pyproject.toml delete mode 100644 tools/benchmark_tool/requirements.txt delete mode 100644 tools/benchmark_tool/setup.py diff --git a/tools/benchmark_tool/pyproject.toml b/tools/benchmark_tool/pyproject.toml new file mode 100644 index 00000000000000..61fe23ffbbf539 --- /dev/null +++ b/tools/benchmark_tool/pyproject.toml @@ -0,0 +1,31 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Use this configuration file to create a wheel with OpenVINO™ Python* benchmark tool: +# $ python -m pip wheel . --no-deps + +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "benchmark_tool" +version = "0.0.0" +description = "OpenVINO™ Python* tools package" +readme = "README.md" +authors = [ + {name = "Intel® Corporation", email = "openvino_pushbot@intel.com"} +] +urls = {homepage = "https://github.com/openvinotoolkit/openvino"} +classifiers = [ + "Programming Language :: Python :: 3", + "Operating System :: OS Independent", + "License :: OSI Approved :: Apache Software License" +] +requires-python = ">=3.9" +dependencies = [ + "numpy>=1.16.6,<2.2.0", +] + +[project.scripts] +benchmark_app = "openvino.tools.benchmark.main:main" diff --git a/tools/benchmark_tool/requirements.txt b/tools/benchmark_tool/requirements.txt deleted file mode 100644 index 5594ff31cd35c5..00000000000000 --- a/tools/benchmark_tool/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -numpy>=1.16.6,<2.1.0 diff --git a/tools/benchmark_tool/setup.py b/tools/benchmark_tool/setup.py deleted file mode 100644 index 98f2a369a60360..00000000000000 --- a/tools/benchmark_tool/setup.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -""" -Use this script to create a wheel with OpenVINO™ Python* tools: - -$ python -m pip wheel . -""" -from setuptools import setup, find_packages - - -with open('requirements.txt', 'r', encoding='utf-8') as f: - raw_contents = f.readlines() - reqs = [line.strip() for line in raw_contents] - -with open('README.md', 'r', encoding='utf-8') as f: - long_description = f.read() - -setup( - name='benchmark_tool', - version='0.0.0', - author='Intel® Corporation', - license='OSI Approved :: Apache Software License', - author_email='openvino_pushbot@intel.com', - url='https://github.com/openvinotoolkit/openvino', - description='OpenVINO™ Python* tools package', - long_description=long_description, - long_description_content_type='text/markdown', - entry_points={ - 'console_scripts': [ - 'benchmark_app = openvino.tools.benchmark.main:main'], - }, - classifiers=[ - 'Programming Language :: Python :: 3', - 'Operating System :: OS Independent', - ], - packages=find_packages(), - install_requires=reqs, - data_files=[('.', ['requirements.txt'])], - python_requires='>=3.9', -) From eabe5282511563d3fc4325f25234e338b0d8a28a Mon Sep 17 00:00:00 2001 From: Bogdan Pereanu Date: Wed, 6 Nov 2024 14:17:17 +0200 Subject: [PATCH 008/182] [NPU] Fix compiler clang build (#27405) ### Details: - *Change from define to constexpr and remove unused code* - *Fix clang build* ### Tickets: - *EISW-146067* --------- Signed-off-by: Bogdan Pereanu --- .../intel_npu/src/al/include/intel_npu/prefix.hpp | 10 +++------- .../src/compiler_adapter/src/ze_graph_ext_wrappers.cpp | 1 - .../utils/include/intel_npu/utils/zero/zero_types.hpp | 4 ++-- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/src/plugins/intel_npu/src/al/include/intel_npu/prefix.hpp b/src/plugins/intel_npu/src/al/include/intel_npu/prefix.hpp index 9be19d67454d82..f80390ef61ca7e 100644 --- a/src/plugins/intel_npu/src/al/include/intel_npu/prefix.hpp +++ b/src/plugins/intel_npu/src/al/include/intel_npu/prefix.hpp @@ -11,9 +11,9 @@ namespace intel_npu { // // Prefix for ReadValue and Assign operations in compiler. // -#define READVALUE_PREFIX std::string("vpux_ie_read_value_") -#define ASSIGN_PREFIX std::string("vpux_ie_assign_") -#define SHAPE_TENSOR_PREFIX std::string("vpux_ie_shape_") +constexpr std::string_view READVALUE_PREFIX = "vpux_ie_read_value_"; +constexpr std::string_view ASSIGN_PREFIX = "vpux_ie_assign_"; +constexpr std::string_view SHAPE_TENSOR_PREFIX = "vpux_ie_shape_"; inline bool isStateInputName(const std::string& name) { return !name.compare(0, READVALUE_PREFIX.length(), READVALUE_PREFIX); @@ -25,8 +25,4 @@ inline bool isShapeTensorName(const std::string& name) { return !name.compare(0, SHAPE_TENSOR_PREFIX.length(), SHAPE_TENSOR_PREFIX); } -inline std::string stateOutputToStateInputName(const std::string& name) { - return READVALUE_PREFIX + name.substr(ASSIGN_PREFIX.length()); -} - } // namespace intel_npu diff --git a/src/plugins/intel_npu/src/compiler_adapter/src/ze_graph_ext_wrappers.cpp b/src/plugins/intel_npu/src/compiler_adapter/src/ze_graph_ext_wrappers.cpp index c094838cc22ef0..095f470e15e02f 100644 --- a/src/plugins/intel_npu/src/compiler_adapter/src/ze_graph_ext_wrappers.cpp +++ b/src/plugins/intel_npu/src/compiler_adapter/src/ze_graph_ext_wrappers.cpp @@ -396,7 +396,6 @@ ze_graph_handle_t ZeGraphExtWrappers::getGraphHandle( const uint32_t& flags) const { ze_graph_handle_t graphHandle; - _logger.info("compileIR Using extension version: %s", typeid(TableExtension).name()); createGraph(std::move(serializedIR), buildFlags, flags, &graphHandle); return graphHandle; diff --git a/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_types.hpp b/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_types.hpp index 032dd867306004..188da886f5d9e4 100644 --- a/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_types.hpp +++ b/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_types.hpp @@ -70,7 +70,7 @@ struct ze_graph_dditable_ext_decorator final { } ~ze_graph_dditable_ext_decorator() = default; - inline const uint32_t version() const { + inline uint32_t version() const { return _driverExtVersion; } @@ -202,7 +202,7 @@ struct ze_command_queue_npu_dditable_ext_decorator final { _commandQueueExtVersion(commandQueueExtVersion) {} ~ze_command_queue_npu_dditable_ext_decorator() = default; - inline const uint32_t version() const { + inline uint32_t version() const { return _commandQueueExtVersion; } From 6c715998b36012c26d59e73fe86fca00fee761d3 Mon Sep 17 00:00:00 2001 From: Przemyslaw Wysocki Date: Wed, 6 Nov 2024 13:48:23 +0100 Subject: [PATCH 009/182] [PyOV] Use `python -m build` only for old `pip` versions (#27429) ### Details: - An alternative to https://github.com/openvinotoolkit/openvino/pull/27426 - The functionality branching based on pip version has recently been removed in https://github.com/openvinotoolkit/openvino/pull/27190 - This fix has originated from discussion at https://github.com/openvinotoolkit/openvino/pull/27190/files#r1830707810 - Fix for issues with NPU building on Windows machines - A disadvantage is that we need to have `build` as a dependency no matter what, there's no environment marker for `pip` version which would allow us to install it conditionally ### Tickets: - EISW-146038 Signed-off-by: p-wysocki --- src/bindings/python/wheel/CMakeLists.txt | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/src/bindings/python/wheel/CMakeLists.txt b/src/bindings/python/wheel/CMakeLists.txt index 72828a155dd865..d943c3caa21e52 100644 --- a/src/bindings/python/wheel/CMakeLists.txt +++ b/src/bindings/python/wheel/CMakeLists.txt @@ -100,14 +100,31 @@ set(openvino_wheel_path "${openvino_wheels_output_dir}/${openvino_wheel_name}") # create target for openvino.wheel # -# for --config-setting explanation see https://github.com/pypa/setuptools/issues/2491 -set(wheel_build_command - ${Python3_EXECUTABLE} -m build "${CMAKE_CURRENT_SOURCE_DIR}" +execute_process(COMMAND ${Python3_EXECUTABLE} -m pip --version + OUTPUT_VARIABLE pip_version OUTPUT_STRIP_TRAILING_WHITESPACE) + +string(REGEX MATCH "pip[ ]+([\\.0-9]*)" pip_version "${pip_version}") +set(pip_version ${CMAKE_MATCH_1}) + +if(pip_version VERSION_GREATER_EQUAL 22.0) + set(wheel_build_command + ${Python3_EXECUTABLE} -m pip wheel + --no-deps + --wheel-dir ${openvino_wheels_output_dir} + --verbose + --build-option --build-number=${WHEEL_BUILD} + --build-option --plat-name=${PLATFORM_TAG} + "${CMAKE_CURRENT_SOURCE_DIR}") +else() + # for --config-setting explanation see https://github.com/pypa/setuptools/issues/2491 + set(wheel_build_command + ${Python3_EXECUTABLE} -m build "${CMAKE_CURRENT_SOURCE_DIR}" --outdir ${openvino_wheels_output_dir} --config-setting=--build-option=--build-number=${WHEEL_BUILD} --config-setting=--build-option=--plat-name=${PLATFORM_TAG} --config-setting=--quiet --wheel) +endif() add_custom_command(OUTPUT ${openvino_wheel_path} COMMAND ${setup_py_env} ${wheel_build_command} From 95d6be201f9cbb2e41c5d58d05f6638e84c71606 Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Wed, 6 Nov 2024 15:01:33 +0100 Subject: [PATCH 010/182] [GHA] WA parallel make issue (#27422) ### Details: - To address the issue mentioned [here](https://unix.stackexchange.com/questions/316644/is-make-j-with-no-argument-dangerous), we need to set the number of parallel jobs explicitly ### Tickets: - *156361* --- .github/workflows/android_arm64.yml | 2 +- .github/workflows/android_x64.yml | 4 ++-- .github/workflows/coverity.yml | 2 +- .github/workflows/dev_cpu_linux_snippets_libxsmm.yml | 2 +- .github/workflows/job_build_linux.yml | 10 +++++----- .github/workflows/job_build_windows.yml | 4 ++-- .github/workflows/job_onnx_runtime.yml | 2 +- .github/workflows/linux_conditional_compilation.yml | 4 ++-- .github/workflows/linux_riscv.yml | 4 ++-- .github/workflows/mac.yml | 6 +++--- .github/workflows/mac_arm64.yml | 6 +++--- .github/workflows/ubuntu_22.yml | 2 +- .github/workflows/windows_conditional_compilation.yml | 4 ++-- 13 files changed, 26 insertions(+), 26 deletions(-) diff --git a/.github/workflows/android_arm64.yml b/.github/workflows/android_arm64.yml index fca16f2848f7bb..e0954871f4b51e 100644 --- a/.github/workflows/android_arm64.yml +++ b/.github/workflows/android_arm64.yml @@ -169,7 +169,7 @@ jobs: run: ${SCCACHE_PATH} --zero-stats - name: Cmake - build - run: cmake --build ${BUILD_DIR} --parallel + run: cmake --build ${BUILD_DIR} --parallel $(nproc) - name: Show ccache stats run: ${SCCACHE_PATH} --show-stats diff --git a/.github/workflows/android_x64.yml b/.github/workflows/android_x64.yml index 47ea6d2cd90ffe..b0b46c662abdbb 100644 --- a/.github/workflows/android_x64.yml +++ b/.github/workflows/android_x64.yml @@ -73,7 +73,7 @@ jobs: defaults: run: shell: bash - runs-on: aks-linux-16-cores-64gb + runs-on: aks-linux-16-cores-32gb container: image: ${{ fromJSON(needs.docker.outputs.images).ov_build.ubuntu_22_04_android }} volumes: @@ -144,7 +144,7 @@ jobs: run: ${SCCACHE_PATH} --zero-stats - name: Cmake - build - run: cmake --build ${BUILD_DIR} --parallel + run: cmake --build ${BUILD_DIR} --parallel $(nproc) - name: Show ccache stats run: ${SCCACHE_PATH} --show-stats diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml index 9797414cde56c8..5e3aa3f9bf0fc2 100644 --- a/.github/workflows/coverity.yml +++ b/.github/workflows/coverity.yml @@ -164,7 +164,7 @@ jobs: popd - name: Cmake build - OpenVINO with Coverity - run: ${COVERITY_TOOL_DIR}/cov-analysis*/bin/cov-build --dir ${BUILD_DIR}/cov-int cmake --build ${BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} + run: ${COVERITY_TOOL_DIR}/cov-analysis*/bin/cov-build --dir ${BUILD_DIR}/cov-int cmake --build ${BUILD_DIR} --parallel $ENV:NUMBER_OF_PROCESSORS --config ${{ env.CMAKE_BUILD_TYPE }} - name: Show sccache stats run: ${SCCACHE_PATH} --show-stats diff --git a/.github/workflows/dev_cpu_linux_snippets_libxsmm.yml b/.github/workflows/dev_cpu_linux_snippets_libxsmm.yml index 2f6d646f8e271d..ba458da5d3ec1a 100644 --- a/.github/workflows/dev_cpu_linux_snippets_libxsmm.yml +++ b/.github/workflows/dev_cpu_linux_snippets_libxsmm.yml @@ -143,7 +143,7 @@ jobs: run: ${SCCACHE_PATH} --zero-stats - name: Cmake build - OpenVINO - run: cmake --build ${BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} + run: cmake --build ${BUILD_DIR} --parallel $(nproc) --config ${{ env.CMAKE_BUILD_TYPE }} - name: Show sccache stats run: ${SCCACHE_PATH} --show-stats diff --git a/.github/workflows/job_build_linux.yml b/.github/workflows/job_build_linux.yml index e491388a4b3606..3964f049be2abb 100644 --- a/.github/workflows/job_build_linux.yml +++ b/.github/workflows/job_build_linux.yml @@ -168,7 +168,7 @@ jobs: run: ${SCCACHE_PATH} --zero-stats - name: Cmake build - OpenVINO - run: cmake --build ${BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} + run: cmake --build ${BUILD_DIR} --parallel $(nproc) --config ${{ env.CMAKE_BUILD_TYPE }} - name: Show sccache stats run: ${SCCACHE_PATH} --show-stats @@ -210,7 +210,7 @@ jobs: -DPython3_EXECUTABLE=$python_exec \ -DCPACK_GENERATOR=DEB \ ${BUILD_DIR} - cmake --build ${BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --target package + cmake --build ${BUILD_DIR} --parallel $(nproc) --config ${{ env.CMAKE_BUILD_TYPE }} --target package - name: Cmake & Build - OpenVINO Contrib if: ${{ inputs.build-contrib }} @@ -221,7 +221,7 @@ jobs: -DENABLE_WHEEL=OFF \ -S ${OPENVINO_REPO} \ -B ${BUILD_DIR} - cmake --build ${BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} + cmake --build ${BUILD_DIR} --parallel $(nproc) --config ${{ env.CMAKE_BUILD_TYPE }} - name: CMake configure, build and install - OpenVINO JS API if: ${{ fromJSON(inputs.affected-components).JS_API && inputs.build-js }} @@ -230,7 +230,7 @@ jobs: -DCPACK_GENERATOR=NPM \ -DENABLE_SYSTEM_TBB=OFF \ -DENABLE_WHEEL=OFF - cmake --build ${BUILD_DIR} --parallel + cmake --build ${BUILD_DIR} --parallel $(nproc) cmake --install ${BUILD_DIR} --prefix ${INSTALL_DIR_JS} - name: Build RPM packages @@ -243,7 +243,7 @@ jobs: -DENABLE_WHEEL=OFF \ -DENABLE_TESTS=OFF \ ${BUILD_DIR} - cmake --build ${BUILD_DIR} --parallel --target package --verbose + cmake --build ${BUILD_DIR} --parallel $(nproc) --target package --verbose # # Upload build artifacts and logs diff --git a/.github/workflows/job_build_windows.yml b/.github/workflows/job_build_windows.yml index 0567109e0dea46..4ea82590e7abd5 100644 --- a/.github/workflows/job_build_windows.yml +++ b/.github/workflows/job_build_windows.yml @@ -164,7 +164,7 @@ jobs: run: '& ccache --zero-stats' - name: Cmake build - OpenVINO - run: cmake --build ${{ env.BUILD_DIR }} --config ${{ env.CMAKE_BUILD_TYPE }} --parallel --verbose + run: cmake --build ${{ env.BUILD_DIR }} --config ${{ env.CMAKE_BUILD_TYPE }} --parallel $ENV:NUMBER_OF_PROCESSORS --verbose - name: Show ccache stats run: '& ccache --show-stats' @@ -201,7 +201,7 @@ jobs: -DCPACK_GENERATOR=NPM ` -DENABLE_SYSTEM_TBB=OFF ` -DENABLE_WHEEL=OFF - cmake --build ${{ env.BUILD_DIR }} --parallel + cmake --build ${{ env.BUILD_DIR }} --parallel $ENV:NUMBER_OF_PROCESSORS cmake --install ${{ env.BUILD_DIR }} --config ${{ env.CMAKE_BUILD_TYPE }} --prefix ${{ env.INSTALL_DIR_JS }} # diff --git a/.github/workflows/job_onnx_runtime.yml b/.github/workflows/job_onnx_runtime.yml index 966d258a2fc609..df50c4f3e2ad3c 100644 --- a/.github/workflows/job_onnx_runtime.yml +++ b/.github/workflows/job_onnx_runtime.yml @@ -96,7 +96,7 @@ jobs: --config RelWithDebInfo \ --use_openvino CPU \ --build_shared_lib \ - --parallel \ + --parallel $(nproc) \ --skip_tests \ --compile_no_warning_as_error \ --allow_running_as_root \ diff --git a/.github/workflows/linux_conditional_compilation.yml b/.github/workflows/linux_conditional_compilation.yml index acb7e1271d7a34..27f54da6ecdc60 100644 --- a/.github/workflows/linux_conditional_compilation.yml +++ b/.github/workflows/linux_conditional_compilation.yml @@ -169,7 +169,7 @@ jobs: - name: Cmake build - CC COLLECT run: | cmake --build ${BUILD_DIR} --parallel 8 --config ${{ env.CMAKE_BUILD_TYPE }} - cmake --build ${BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --target sea_itt_lib + cmake --build ${BUILD_DIR} --parallel $(nproc) --config ${{ env.CMAKE_BUILD_TYPE }} --target sea_itt_lib - name: Show sccache stats run: ${SCCACHE_PATH} --show-stats @@ -182,7 +182,7 @@ jobs: - name: Build C++ samples - OpenVINO build tree run: | cmake -G "${{ env.CMAKE_GENERATOR }}" -DOpenVINO_DIR=${BUILD_DIR} -S ${INSTALL_DIR}/samples/cpp -B ${BUILD_DIR}/cpp_samples - cmake --build ${BUILD_DIR}/cpp_samples --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --target hello_query_device + cmake --build ${BUILD_DIR}/cpp_samples --parallel $(nproc) --config ${{ env.CMAKE_BUILD_TYPE }} --target hello_query_device - name: Build C samples - OpenVINO install tree run: ${INSTALL_DIR}/samples/c/build_samples.sh -i ${INSTALL_DIR} -b ${BUILD_DIR}/c_samples diff --git a/.github/workflows/linux_riscv.yml b/.github/workflows/linux_riscv.yml index c450a5d30768e4..85b0db8c36294e 100644 --- a/.github/workflows/linux_riscv.yml +++ b/.github/workflows/linux_riscv.yml @@ -213,13 +213,13 @@ jobs: source ${OPENVINO_BUILD_DIR}/dependencies/deactivate_conanbuild.sh - name: Cmake - Build - run: cmake --build ${OPENVINO_BUILD_DIR} --parallel + run: cmake --build ${OPENVINO_BUILD_DIR} --parallel $(nproc) - name: Show ccache stats run: ccache --show-stats - name: Cmake - Install - run: cmake --build ${OPENVINO_BUILD_DIR} --parallel --target install + run: cmake --build ${OPENVINO_BUILD_DIR} --parallel $(nproc) --target install - name: Build OpenVINO C++ samples run: | diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index 5492ad40aa17b4..c587c5ad7323b3 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -159,7 +159,7 @@ jobs: -B ${{ env.BUILD_DIR }} - name: Cmake build - OpenVINO - run: cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} + run: cmake --build ${{ env.BUILD_DIR }} --parallel $(nproc) --config ${{ env.CMAKE_BUILD_TYPE }} - name: Show ccache stats run: ccache --show-stats @@ -186,7 +186,7 @@ jobs: -DOPENVINO_EXTRA_MODULES=${{ env.OPENVINO_CONTRIB_REPO }}/modules/custom_operations \ -S ${{ env.OPENVINO_REPO }} \ -B ${{ env.BUILD_DIR }} - cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} + cmake --build ${{ env.BUILD_DIR }} --parallel $(nproc) --config ${{ env.CMAKE_BUILD_TYPE }} - name: CMake configure, build and install - OpenVINO JS API if: fromJSON(needs.smart_ci.outputs.affected_components).JS_API @@ -196,7 +196,7 @@ jobs: -S ${{ env.OPENVINO_REPO }} \ -B ${{ env.BUILD_DIR }} - cmake --build ${{ env.BUILD_DIR }} --parallel + cmake --build ${{ env.BUILD_DIR }} --parallel $(nproc) cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR_JS }} -P ${{ env.BUILD_DIR }}/cmake_install.cmake # diff --git a/.github/workflows/mac_arm64.yml b/.github/workflows/mac_arm64.yml index 8100b74734ab17..0708a844fe6b8b 100644 --- a/.github/workflows/mac_arm64.yml +++ b/.github/workflows/mac_arm64.yml @@ -159,7 +159,7 @@ jobs: -B ${{ env.BUILD_DIR }} - name: Cmake build - OpenVINO - run: cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} + run: cmake --build ${{ env.BUILD_DIR }} --parallel $(nproc) --config ${{ env.CMAKE_BUILD_TYPE }} - name: Show ccache stats run: ccache --show-stats @@ -187,7 +187,7 @@ jobs: -DOPENVINO_EXTRA_MODULES=${{ env.OPENVINO_CONTRIB_REPO }}/modules/custom_operations \ -S ${{ env.OPENVINO_REPO }} \ -B ${{ env.BUILD_DIR }} - cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} + cmake --build ${{ env.BUILD_DIR }} --parallel $(nproc) --config ${{ env.CMAKE_BUILD_TYPE }} - name: CMake configure, build and install - OpenVINO JS API if: fromJSON(needs.smart_ci.outputs.affected_components).JS_API @@ -197,7 +197,7 @@ jobs: -S ${{ env.OPENVINO_REPO }} \ -B ${{ env.BUILD_DIR }} - cmake --build ${{ env.BUILD_DIR }} --parallel + cmake --build ${{ env.BUILD_DIR }} --parallel $(nproc) cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR_JS }} -P ${{ env.BUILD_DIR }}/cmake_install.cmake # diff --git a/.github/workflows/ubuntu_22.yml b/.github/workflows/ubuntu_22.yml index 92178fce7f5054..f4caec8b2458a0 100644 --- a/.github/workflows/ubuntu_22.yml +++ b/.github/workflows/ubuntu_22.yml @@ -477,7 +477,7 @@ jobs: -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF \ -S ${OPENVINO_CONTRIB_REPO}/modules/nvidia_plugin \ -B ${NVIDIA_BUILD_DIR} - cmake --build ${NVIDIA_BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --verbose -- ov_nvidia_func_tests ov_nvidia_unit_tests + cmake --build ${NVIDIA_BUILD_DIR} --parallel $(nproc) --config ${{ env.CMAKE_BUILD_TYPE }} --verbose -- ov_nvidia_func_tests ov_nvidia_unit_tests - name: Show ccache stats run: ${SCCACHE_PATH} --show-stats diff --git a/.github/workflows/windows_conditional_compilation.yml b/.github/workflows/windows_conditional_compilation.yml index d93c737606a2b4..6a5846b514dbd7 100644 --- a/.github/workflows/windows_conditional_compilation.yml +++ b/.github/workflows/windows_conditional_compilation.yml @@ -193,7 +193,7 @@ jobs: - name: Build C++ samples - OpenVINO build tree run: | cmake -G "${{ env.CMAKE_GENERATOR }}" -DOpenVINO_DIR=${{ env.BUILD_DIR }} -S ${{ env.INSTALL_DIR }}/samples/cpp -B ${{ env.BUILD_DIR }}/cpp_samples - cmake --build ${{ env.BUILD_DIR }}/cpp_samples --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --target hello_query_device + cmake --build ${{ env.BUILD_DIR }}/cpp_samples --parallel $ENV:NUMBER_OF_PROCESSORS --config ${{ env.CMAKE_BUILD_TYPE }} --target hello_query_device - name: Build C samples - OpenVINO install tree run: | @@ -331,7 +331,7 @@ jobs: -B ${{ env.BUILD_DIR }} - name: Cmake build - CC ON - run: cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --target benchmark_app --verbose + run: cmake --build ${{ env.BUILD_DIR }} --parallel $ENV:NUMBER_OF_PROCESSORS --config ${{ env.CMAKE_BUILD_TYPE }} --target benchmark_app --verbose - name: List bin files shell: cmd From 42d4377a97e025d34c8f38b9504b85151b5473e9 Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Wed, 6 Nov 2024 19:47:51 +0400 Subject: [PATCH 011/182] [GPU] Fix accuracy issue in PagedAttention kernel for large prompts (#27433) ### Details: - Fix accuracy issue in PagedAttention kernel for large prompts (4K/8K tokens) by correcting index calculation in sub_group_broadcast function to ensure accurate data broadcasting within the subgroup ### Tickets: - [CVS-154881](https://jira.devtools.intel.com/browse/CVS-154881) --- .../intel_gpu/src/kernel_selector/cl_kernels/pa_sdpa_opt.cl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pa_sdpa_opt.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pa_sdpa_opt.cl index a3bdd7e12dcd49..00c43829d02ea7 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pa_sdpa_opt.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pa_sdpa_opt.cl @@ -436,7 +436,7 @@ KERNEL(pa_sdpa_finalization_stage)( partition_num * HEAD_SIZE + head_size_idx; OUTPUT_TYPE out_val = tmp_out[tmp_out_offset]; - acc += TO_SOFTMAX_ACCUMULATOR_TYPE(out_val) * TO_SOFTMAX_ACCUMULATOR_TYPE(sub_group_broadcast(exp_sum[partition_num / SUBGROUP_SIZE], partition_num)) / TO_SOFTMAX_ACCUMULATOR_TYPE(global_sum); + acc += TO_SOFTMAX_ACCUMULATOR_TYPE(out_val) * TO_SOFTMAX_ACCUMULATOR_TYPE(sub_group_broadcast(exp_sum[partition_num / SUBGROUP_SIZE], partition_num % SUBGROUP_SIZE)) / TO_SOFTMAX_ACCUMULATOR_TYPE(global_sum); } const uint out_offset = seq_idx * (HEADS_NUM * HEAD_SIZE) + head_num_idx * HEAD_SIZE + From 33cba008c50ddac49efc12b15cb933f4feeacba5 Mon Sep 17 00:00:00 2001 From: Alexey Smirnov Date: Wed, 6 Nov 2024 17:24:18 +0000 Subject: [PATCH 012/182] [NPUW] Support i16/u16 constants during partitioning (#27437) --- .../intel_npu/src/plugin/npuw/partitioning/partitioning.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/partitioning.cpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/partitioning.cpp index 99705fef30e8a8..dbc86c5062da9e 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/partitioning.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/partitioning.cpp @@ -1287,6 +1287,8 @@ void Partitioner::saveRepeatedConstants(const std::string& func_name) { HANDLE_CASE(boolean, bool); HANDLE_CASE(i4, int8_t); HANDLE_CASE(u4, uint8_t); + HANDLE_CASE(i16, int16_t); + HANDLE_CASE(u16, uint16_t); HANDLE_CASE(i32, int); HANDLE_CASE(i64, int64_t); HANDLE_CASE(f16, uint16_t); From 9226bbe7dc108af10bf40b022f09b605ada081bb Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Thu, 7 Nov 2024 09:42:19 +0100 Subject: [PATCH 013/182] [GHA] Coverity build regression fix (#27445) ### Details: - Fixed regression introduced by https://github.com/openvinotoolkit/openvino/pull/27422 ### Tickets: - *ticket-id* --- .github/workflows/coverity.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml index 5e3aa3f9bf0fc2..e26bd66e097e33 100644 --- a/.github/workflows/coverity.yml +++ b/.github/workflows/coverity.yml @@ -164,7 +164,7 @@ jobs: popd - name: Cmake build - OpenVINO with Coverity - run: ${COVERITY_TOOL_DIR}/cov-analysis*/bin/cov-build --dir ${BUILD_DIR}/cov-int cmake --build ${BUILD_DIR} --parallel $ENV:NUMBER_OF_PROCESSORS --config ${{ env.CMAKE_BUILD_TYPE }} + run: ${COVERITY_TOOL_DIR}/cov-analysis*/bin/cov-build --dir ${BUILD_DIR}/cov-int cmake --build ${BUILD_DIR} --parallel $(nproc) --config ${{ env.CMAKE_BUILD_TYPE }} - name: Show sccache stats run: ${SCCACHE_PATH} --show-stats From 5fad8053be6aeb954844aef8511ac0610ba96227 Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Thu, 7 Nov 2024 14:30:59 +0400 Subject: [PATCH 014/182] [GPU] Enable KV-cache compression by default for non-systolic platforms (#27410) ### Details: - Enable KV-cache compression by default for non-systolic platforms --- .../cl_kernels/dynamic_quantize_gpu_kv_cache.cl | 6 +++--- .../dynamic_quantize_kernel_opt_kv_cache.cpp | 3 +++ src/plugins/intel_gpu/src/runtime/execution_config.cpp | 5 +++++ .../functional/subgraph_tests/dynamic/kv_cache_sdpa.cpp | 5 ++++- .../tests/unit/test_cases/dynamic_quantize_gpu_test.cpp | 7 +++---- 5 files changed, 18 insertions(+), 8 deletions(-) diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/dynamic_quantize_gpu_kv_cache.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/dynamic_quantize_gpu_kv_cache.cl index 22a2f03c94564a..b0e363169e9e4d 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/dynamic_quantize_gpu_kv_cache.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/dynamic_quantize_gpu_kv_cache.cl @@ -83,11 +83,11 @@ KERNEL(dynamic_quantize_gpu_kv_cache)( #if ASYMMETRIC_QUANTIZATION min_value = work_group_reduce_min(min_value); max_value = work_group_reduce_max(max_value); - OUTPUT1_TYPE scale = (OUTPUT1_TYPE)((CHAR_MAX - CHAR_MIN) / (max_value - min_value)); - OUTPUT1_TYPE zp = (OUTPUT1_TYPE)(-min_value * scale) - CHAR_MAX; + ACCUMULATOR_TYPE scale = (ACCUMULATOR_TYPE)((CHAR_MAX - CHAR_MIN) / (max_value - min_value)); + ACCUMULATOR_TYPE zp = (ACCUMULATOR_TYPE)(-min_value * scale) - CHAR_MAX; #else max_value = work_group_reduce_max(max_value); - OUTPUT1_TYPE scale = 127.0h / max_value; + ACCUMULATOR_TYPE scale = 127.0h / max_value; #endif #ifdef APPEND_MODE diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/dynamic_quantize/dynamic_quantize_kernel_opt_kv_cache.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/dynamic_quantize/dynamic_quantize_kernel_opt_kv_cache.cpp index d0c99484e3f52e..8f7537eeeb5d7d 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/dynamic_quantize/dynamic_quantize_kernel_opt_kv_cache.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/dynamic_quantize/dynamic_quantize_kernel_opt_kv_cache.cpp @@ -141,6 +141,9 @@ JitConstants DynamicQuantizeKernelKVCache::GetJitConstants(const dynamic_quantiz jit.AddConstant(MakeJitConstant("ASYMMETRIC_QUANTIZATION", params.use_asymmetric_quantization)); jit.AddConstant(MakeJitConstant("GROUP_SCALES_WITH_ZP", params.combine_scales_and_zp)); + // Use FP32 accumulator type for scale/zp calculation + jit.Merge(MakeTypeJitConstants(Datatype::F32, "ACCUMULATOR")); + bool rearrange_scales_order = false; const auto& scales_output_order = params.scales_output_order; if (!scales_output_order.empty()) { diff --git a/src/plugins/intel_gpu/src/runtime/execution_config.cpp b/src/plugins/intel_gpu/src/runtime/execution_config.cpp index 7c8e55cddfe593..44758f73289edb 100644 --- a/src/plugins/intel_gpu/src/runtime/execution_config.cpp +++ b/src/plugins/intel_gpu/src/runtime/execution_config.cpp @@ -246,6 +246,11 @@ void ExecutionConfig::apply_user_properties(const cldnn::device_info& info) { set_property(ov::intel_gpu::queue_type(QueueTypes::in_order)); } + // Enable KV-cache compression by default for non-systolic platforms + if (!is_set_by_user(ov::hint::kv_cache_precision) && !info.supports_immad) { + set_property(ov::hint::kv_cache_precision(ov::element::i8)); + } + user_properties.clear(); } diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache_sdpa.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache_sdpa.cpp index 16db9d89c28b4d..2563fe535a93d9 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache_sdpa.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache_sdpa.cpp @@ -47,8 +47,11 @@ class SDPAWithKVCacheTest : public ::testing::Test, public ::testing::WithParamI ov::AnyMap properties = {ov::hint::inference_precision(ov::element::f16), ov::intel_gpu::hint::enable_sdpa_optimization(true)}; - if (p.compressed) + if (p.compressed) { properties.emplace(ov::hint::kv_cache_precision(ov::element::i8)); + } else { + properties.emplace(ov::hint::kv_cache_precision(ov::element::undefined)); + } const size_t n_heads = 16; const size_t n_features = 64; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp index 5a78360eb1f6d8..c0e317ff6ce915 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp @@ -121,11 +121,10 @@ class dynamic_quantization_gpu_tests: public ::testing::Test { auto outputs = network->execute(); auto output_mem = outputs.begin()->second.get_memory(); - cldnn::mem_lock output_ptr (output_mem, get_test_stream()); + cldnn::mem_lock output_ptr (output_mem, get_test_stream()); auto ref_output_mem = get_ref_results(); - cldnn::mem_lock output_ptr_ref (ref_output_mem, get_test_stream()); - + cldnn::mem_lock output_ptr_ref (ref_output_mem, get_test_stream()); size_t count = 0; float max_diff = 0.f; float avg = 0.f; @@ -135,7 +134,7 @@ class dynamic_quantization_gpu_tests: public ::testing::Test { max_diff = abs_diff; avg += abs_diff; count++; - OPENVINO_ASSERT(abs_diff < 1); + ASSERT_LE(abs_diff, 1); } GPU_DEBUG_LOG << "---> count: " << count << ", max_diff:" << max_diff << ", avg_diff: " << (avg/count) << std::endl; } From ab71e5ba54e60684e0e69680fee55af1899d707b Mon Sep 17 00:00:00 2001 From: Alexandra Sidorova Date: Thu, 7 Nov 2024 15:06:40 +0400 Subject: [PATCH 015/182] [Snippets] Added dynamic memory sharing support (#27175) ### Details: - *Added memory inplace support for Buffer with different precision (it's supported only when output Buffer has data size not more than input Buffer) - it's needed for BF16 and INT8 MHA* - *Added memory inplace support for dynamic Buffers with the marking pass "MarkInvariantShapePath"* ### Tickets: - *154732, 150148* ### Prerequisites: - [x] https://github.com/openvinotoolkit/openvino/pull/27169 - [x] https://github.com/openvinotoolkit/openvino/pull/27300 --- .../lowered/expressions/buffer_expression.hpp | 1 + .../include/snippets/lowered/loop_info.hpp | 17 +- .../lowered/pass/define_buffer_clusters.hpp | 64 ++-- .../pass/mark_invariant_shape_path.hpp | 65 ++++ .../lowered/pass/set_buffer_reg_group.hpp | 55 ++- .../snippets/include/snippets/utils/utils.hpp | 6 + .../snippets/src/lowered/expression.cpp | 9 +- .../lowered/expressions/buffer_expression.cpp | 9 +- src/common/snippets/src/lowered/loop_info.cpp | 27 +- .../src/lowered/pass/allocate_buffers.cpp | 3 +- .../lowered/pass/define_buffer_clusters.cpp | 341 ++++++++---------- .../pass/mark_invariant_shape_path.cpp | 128 +++++++ .../src/lowered/pass/set_buffer_reg_group.cpp | 104 +++--- .../snippets/src/pass/mha_tokenization.cpp | 11 +- .../x64/pass/lowered/set_tpp_leading_dim.cpp | 11 +- .../x64/lowered/buffer_allocation.cpp | 94 +++-- 16 files changed, 571 insertions(+), 374 deletions(-) create mode 100644 src/common/snippets/include/snippets/lowered/pass/mark_invariant_shape_path.hpp create mode 100644 src/common/snippets/src/lowered/pass/mark_invariant_shape_path.cpp diff --git a/src/common/snippets/include/snippets/lowered/expressions/buffer_expression.hpp b/src/common/snippets/include/snippets/lowered/expressions/buffer_expression.hpp index 3dcd98ef0a95fd..9174d9866db503 100644 --- a/src/common/snippets/include/snippets/lowered/expressions/buffer_expression.hpp +++ b/src/common/snippets/include/snippets/lowered/expressions/buffer_expression.hpp @@ -38,6 +38,7 @@ class BufferExpression : public Expression { size_t get_offset() const { return m_offset; } size_t get_allocation_size() const { return m_allocation_size; } size_t get_byte_size() const; + ov::element::Type get_data_type() const; void set_reg_group(size_t reg_group) { m_reg_group = reg_group; } void set_cluster_id(size_t cluster) { m_cluster_id = cluster; } diff --git a/src/common/snippets/include/snippets/lowered/loop_info.hpp b/src/common/snippets/include/snippets/lowered/loop_info.hpp index cc66f5f6ffcc95..23e1f14a8b7f5e 100644 --- a/src/common/snippets/include/snippets/lowered/loop_info.hpp +++ b/src/common/snippets/include/snippets/lowered/loop_info.hpp @@ -211,13 +211,20 @@ class UnifiedLoopInfo : public LoopInfo { int64_t data_size = 0; bool is_dynamic() const; + bool is_static() const; + + friend bool operator==(const LoopPortDesc& lhs, const LoopPortDesc& rhs); + friend bool operator!=(const LoopPortDesc& lhs, const LoopPortDesc& rhs); }; // The structure describes full information about port // - TODO [140365] : UnifiedLoopInfo should have the map of LoopPorts and LoopDesc as class field // instead of the separate vectors with descriptors. struct LoopPortInfo { - LoopPort port; - LoopPortDesc desc; + LoopPortInfo() = default; + LoopPortInfo(LoopPort port_, LoopPortDesc desc_) : port(std::move(port_)), desc(std::move(desc_)) {} + + LoopPort port = {}; + LoopPortDesc desc = {}; }; UnifiedLoopInfo() = default; @@ -367,6 +374,12 @@ class UnifiedLoopInfo : public LoopInfo { caller(m_output_ports[i], m_output_port_descs[i]); } + /** + * @brief Return loop port info of an expression port + * @param expr_port - expression port. + */ + LoopPortInfo get_loop_port_info(const ExpressionPort& expr_port); + protected: /** * @brief Clone LoopPortDesc[actual_port_idx] `new_count` times and insert on the place of current desc diff --git a/src/common/snippets/include/snippets/lowered/pass/define_buffer_clusters.hpp b/src/common/snippets/include/snippets/lowered/pass/define_buffer_clusters.hpp index 1597eaa2377a50..312abb02abf7b5 100644 --- a/src/common/snippets/include/snippets/lowered/pass/define_buffer_clusters.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/define_buffer_clusters.hpp @@ -6,6 +6,8 @@ #include "pass.hpp" +#include "snippets/lowered/loop_info.hpp" + namespace ov { namespace snippets { namespace lowered { @@ -45,7 +47,7 @@ class DefineBufferClusters : public RangedPass { private: using BufferCluster = std::set; using BufferClusters = std::vector; - using BufferPorts = std::unordered_map>; + using BufferMap = std::unordered_map; /** * @brief Finds Buffer cluster in set of clusters which contains the target expression with Buffer * @param target target expression with Buffer op @@ -58,76 +60,72 @@ class DefineBufferClusters : public RangedPass { * @param target_expr expression with target op - LoopEnd or MemoryAccess op * @return boolean value */ - bool is_direct_buffer(const BufferExpressionPtr& buffer_expr, const ExpressionPtr& target_expr) const; + static bool is_direct_buffer(const BufferExpressionPtr& buffer_expr, const ExpressionPtr& target_expr); /** * @brief Creates new buffer cluster if buffer_exprs is missed in clusters. If buffer_exprs is already in clusters, do nothing * @param buffer_expr expression with Buffer op */ void create_new_cluster(const BufferExpressionPtr& buffer_expr); + /** + * @brief Add buffers to the existing clusters + * @param existing_cluster existing clusters + * @param buffers buffers which will be added to the existing cluster + */ + static void add_buffers_to_cluster(BufferCluster& existing_cluster, const std::set& buffers); /** * @brief Returns common ID of cluster if all buffer inside have the same Buffer ID. Otherwise returns the default value SIZE_MAX * that means that Buffers in cluster have different IDs. * @param cluster set of Buffer expressions - cluster * @return common buffer ID or SIZE_MAX - size value */ - size_t get_cluster_buffer_id(const BufferCluster& cluster) const; + static size_t get_cluster_buffer_id(const BufferCluster& cluster); /** * @brief Analyzes Loop: if Loop has Buffer ops on inputs and outputs, Loop can read and write from/to the same memory. + * @param loop_manager loop manager * @param expr_it iterator of Linear IR which refers to the expression with LoopEnd */ - void parse_loop(const LinearIR::constExprIt& expr_it); + void parse_loop(const LoopManagerPtr& loop_manager, const LinearIR::constExprIt& expr_it); /** * @brief Analyzes full MemoryAccess op: if the op has Buffer ops on I/O, the op can read and write from/to the same memory. * @param expr expression with full MemoryAccess op */ void parse_memory_access_op(const ExpressionPtr& expr); /** - * @brief Gets input outputs buffers of Loop - * @param loop_expr expression with LoopEnd op - * @return unordered map [Expression -> set of input ports] which represents input Buffers of Loop + * @brief Find all direct buffers that are connected to the current Loop + * @param loop_info current unified loop info + * @param loop_expr the target LoopEnd expression + * @return input and output buffer maps */ - BufferPorts get_input_buffers(const ExpressionPtr& loop_expr) const; - /** - * @brief Gets output buffers of Loop - * @param loop_expr expression with LoopEnd op - * @return unordered map [Expression -> set of input ports] which represents output Buffers of Loop - */ - BufferPorts get_output_buffers(const ExpressionPtr& loop_expr) const; + static std::pair get_direct_buffers(const UnifiedLoopInfoPtr& loop_info, const ExpressionPtr& loop_expr); /** * @brief Analyzes nested Loops: unite nested buffer clusters if they can reproduce `window` sliding - * @param input_buffers unordered map [Expression -> set of input ports] which represents input Buffers of Loop - * @param output_buffers unordered map [Expression -> set of output ports (one)] which represents output Buffers of Loop + * @param loop_manager loop manager + * @param input_buffers unordered map [Expression -> LoopPortInfo] which represents input Buffers of Loop + * @param output_buffers unordered map [Expression -> LoopPortInfo] which represents output Buffers of Loop * @param outer_loop_end_expr_it iterator of Linear IR which refers to the expression with outer LoopEnd */ - void parse_nested_loops(const BufferPorts& input_buffers, const BufferPorts& output_buffers, const LinearIR::constExprIt& outer_loop_end_expr_it); + void parse_nested_loops(const LoopManagerPtr& loop_manager, const BufferMap& input_buffers, const BufferMap& output_buffers, + const LinearIR::constExprIt& outer_loop_end_expr_it); /** - * @brief Finds the last connected Loop to the target Buffer and returns the corresponding finalization offset + * @brief Finds the last connected Loop to the target Buffer and init the corresponding loop port info + * @param loop_manager loop manager * @param buffer_expr expression with Buffer op - * @return finalization offset - int64_t value - */ - int64_t get_buffer_finalization_offset(const BufferExpressionPtr& buffer_expr) const; - /** - * @brief Check if two Buffer expressions are connected to the same Loop. Set common LoopEnd as `loop` parameter and - * indexes of Loop ports `up_idx` and `down_idx` if Buffers are really neighbours - * @param up expression with upper Buffer op - * @param down expression with lower Buffer op - * @param loop expression with common LoopEnd op - * @param up_idx the reference to port index of upper Buffer op to the Loop - * @param down_idx the reference to port index of lower Buffer op to the Loop - * @return Return True if the Buffers are connected to the same Loop + * @param port_info target loop port info to be initialized + * @return status - True if loop port has been found. Otherwise, return false - not connected to the Loop. */ - static bool are_buffer_neighbours(const BufferExpressionPtr& up, const BufferExpressionPtr& down, ExpressionPtr& loop, - size_t& up_idx, size_t& down_idx); + static bool init_buffer_last_loop_port_info(const LoopManagerPtr& loop_manager, const BufferExpressionPtr& buffer_expr, + UnifiedLoopInfo::LoopPortInfo& port_info); /** * @brief Unite clusters + * @param loop_manager loop manager * @param inner_cluster_it iterator to inner cluster - buffer cluster is in the loop * @param outer_cluster buffer clusters with buffers outside the Loop * @param outer_buffer target Buffer from outer_cluster * @param is_outer_up true if outer buffer is upper in Linear IR than inner Buffers * @return Return True if clusters have been united */ - bool unite_nested_clusters(const BufferClusters::iterator& inner_cluster_it, BufferCluster& outer_cluster, + bool unite_nested_clusters(const LoopManagerPtr& loop_manager, const BufferClusters::iterator& inner_cluster_it, BufferCluster& outer_cluster, const BufferExpressionPtr& outer_buffer, bool is_outer_up); BufferClusters m_clusters; diff --git a/src/common/snippets/include/snippets/lowered/pass/mark_invariant_shape_path.hpp b/src/common/snippets/include/snippets/lowered/pass/mark_invariant_shape_path.hpp new file mode 100644 index 00000000000000..6a31a697baca77 --- /dev/null +++ b/src/common/snippets/include/snippets/lowered/pass/mark_invariant_shape_path.hpp @@ -0,0 +1,65 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "pass.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +/** + * @interface MarkInvariantShapePath + * @brief The helper pass for BufferAllocation pipeline: + * - Many buffer-relates passes (SetBufferRegGroup, DefineBufferClusters) depend on loop pointer increments. + * The increments are unknown in dynamic case, so these passes can't set reg groups and clusters efficiently. + * The current pass marks expressions port which will have the same shape. The shape and layout means + * the same loop pointer arithmetic in runtime. + * @ingroup snippets + */ +class MarkInvariantShapePath: public RangedPass { +public: + OPENVINO_RTTI("MarkInvariantShapePath", "RangedPass") + MarkInvariantShapePath() = default; + + /** + * @brief Apply the pass to the Linear IR + * @param linear_ir the target Linear IR + * @return status of the pass + */ + bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; + + /** + * @brief Returns ID (color) of the current Invariant Shape path for the passed port. + * Ports which have the same IDs of the paths - will have the same shapes in runtime. + * Note: if passed port is input port, the method returns value for source of port connector + * for the passed port. Because the shape is created by output ports of expressions. + * @param port target expression port + * @return ID + */ + static size_t getInvariantPortShapePath(const ExpressionPort& port); + +private: + /** + * @brief Sets ID (color) of the current Invariant Shape path for the passed output port. + * Ports which have the same IDs of the paths - will have the same shapes in runtime. + * @param port target expression port + * @param value ID of the path (color) + */ + static void SetInvariantPortShapePath(const ExpressionPort& port, size_t value); + + /** + * @brief Return runtime info for the passed expression port + * @param port target expression port + * @return runtime info map + */ + static ov::RTMap& get_rt_info(const ExpressionPort& port); +}; + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/include/snippets/lowered/pass/set_buffer_reg_group.hpp b/src/common/snippets/include/snippets/lowered/pass/set_buffer_reg_group.hpp index cba3f28856be42..dc22ce4beff1a0 100644 --- a/src/common/snippets/include/snippets/lowered/pass/set_buffer_reg_group.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/set_buffer_reg_group.hpp @@ -6,6 +6,7 @@ #include "pass.hpp" +#include "snippets/lowered/loop_info.hpp" #include "snippets/utils/utils.hpp" namespace ov { @@ -38,34 +39,19 @@ class SetBufferRegGroup: public RangedPass { * @param linear_ir the target Linear IR * @return status of the pass */ - bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; - - struct ShiftPtrParams { - ShiftPtrParams() = default; - ShiftPtrParams(int64_t ds, int64_t pi, int64_t fo) : data_size(ds), ptr_increment(pi), finalization_offset(fo) {} - int64_t data_size = 0; - int64_t ptr_increment = 0; - int64_t finalization_offset = 0; - - inline bool is_static() const { - return !utils::is_dynamic_value(ptr_increment) && !utils::is_dynamic_value(finalization_offset); - } - - friend bool operator==(const ShiftPtrParams& lhs, const ShiftPtrParams& rhs); - friend bool operator!=(const ShiftPtrParams& lhs, const ShiftPtrParams& rhs); - }; + bool run(LinearIR& linear_ir, LinearIR::constExprIt begin, LinearIR::constExprIt end) override; /** - * @brief Check if two Buffers can be in one register group by ShiftPtrParams < data_size, ptr_increment, finalization_offset > - * @param lhs Data pointer shift params for first Buffer - * @param rhs Data pointer shift params for second Buffer + * @brief Check if two Buffers can be in one register group by LoopDesc < data_size, ptr_increment, finalization_offset > + * @param lhs LoopPortInfo (Port and Data pointer shift params for first Buffer) + * @param rhs LoopPortInfo (Port and Data pointer shift params for second Buffer) * @return Returns True if params are valid to reuse one register. Otherwise returns False */ - static bool can_be_in_one_group(const ShiftPtrParams& lhs, const ShiftPtrParams& rhs); + static bool can_be_in_one_reg_group(const UnifiedLoopInfo::LoopPortInfo& lhs, const UnifiedLoopInfo::LoopPortInfo& rhs); private: using BufferPool = std::vector; - using BufferMap = std::map; + using BufferMap = std::map; /** * @brief Get Buffer Index in Buffer set @@ -76,11 +62,14 @@ class SetBufferRegGroup: public RangedPass { static size_t get_buffer_idx(const BufferExpressionPtr& target, const BufferPool& pool); /** * @brief Create adjacency matrix for Buffer system. See comment in the method for more details. - * @param linear_ir the target Linear IR + * @param loop_manager the loop manager + * @param begin begin iterator + * @param end end iterator * @param pool set of Buffers from the Linear IR * @return adjacency matrix where True value means that Buffers are adjacent and cannot have the same ID */ - static std::vector create_adjacency_matrix(lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end, const BufferPool& pool); + static std::vector create_adjacency_matrix(const LoopManagerPtr& loop_manager, LinearIR::constExprIt begin, LinearIR::constExprIt end, + const BufferPool& pool); /** * @brief Algorithm of Graph coloring where vertices are Buffers * @param buffers set of Buffers from the Linear IR @@ -99,25 +88,23 @@ class SetBufferRegGroup: public RangedPass { * @param buffers set of Buffers from the Linear IR * @param adj Target adjacency matrix */ - static void update_adj_matrix(const std::pair& lhs, - const std::pair& rhs, - const BufferPool& buffers, + static void update_adj_matrix(const BufferMap::value_type& lhs, const BufferMap::value_type& rhs, const BufferPool& buffers, std::vector& adj); + /** * @brief Check if two Buffers are adjacent and cannot have the same ID - * @param lhs Pair where first value is Expression with first Buffer and second value is data pointer shift params for it - * @param rhs Pair where first value is Expression with second Buffer and second value is data pointer shift params for it + * @param lhs LoopPortInfo (Port and Data pointer shift params for first Buffer) + * @param rhs LoopPortInfo (Port and Data pointer shift params for second Buffer) * @return Returns True if they are adjacent, otherwise returns False */ - static bool are_adjacent(const std::pair& lhs, - const std::pair& rhs); + static bool are_adjacent(const BufferMap::value_type& lhs, const BufferMap::value_type& rhs); /** - * @brief Find all buffers that are connected to the current LoopEnd - * @param loop_end_expr expression of the target LoopEnd - * @return buffer map [buffer expr -> ShiftDataPtrs] + * @brief Find all buffers that are connected to the current Loop + * @param loop_info current unified loop info + * @return buffer map */ - static BufferMap get_buffer_loop_neighbours(const ExpressionPtr& loop_end_expr); + static BufferMap get_buffer_loop_neighbours(const UnifiedLoopInfoPtr& loop_info); /** * @brief Find all buffers that are inside the current Loop. * @param loop_end_it expression iterator in LinearIR of the target LoopEnd diff --git a/src/common/snippets/include/snippets/utils/utils.hpp b/src/common/snippets/include/snippets/utils/utils.hpp index f7e584d48a905c..ff4646f24d03b7 100644 --- a/src/common/snippets/include/snippets/utils/utils.hpp +++ b/src/common/snippets/include/snippets/utils/utils.hpp @@ -91,6 +91,12 @@ static inline auto rnd_up(const T lhs, const U rhs) -> decltype(div_up(lhs, rhs) return div_up_res * rhs; } +static inline bool is_planar_layout(const std::vector& order) { + for (size_t i = 0; i < order.size(); ++i) + if (order[i] != i) return false; + return true; +} + inline bool is_dynamic_vdims(const VectorDims& shape) { return std::any_of(shape.cbegin(), shape.cend(), [](size_t v){ return is_dynamic_value(v); }); } diff --git a/src/common/snippets/src/lowered/expression.cpp b/src/common/snippets/src/lowered/expression.cpp index aaa71612cef706..1952b93017aab5 100644 --- a/src/common/snippets/src/lowered/expression.cpp +++ b/src/common/snippets/src/lowered/expression.cpp @@ -170,11 +170,6 @@ ExpressionPtr Expression::clone() const { } bool Expression::visit_attributes(AttributeVisitor &visitor) { - auto is_planar_layout = [](const std::vector& layout) { - for (size_t i = 0; i < layout.size(); ++i) - if (layout[i] != i) return false; - return true; - }; auto subtensor2str = [](const VectorDims& subtensor) { std::stringstream ss; for (size_t i = 0; i < subtensor.size(); ++i) { @@ -203,7 +198,7 @@ bool Expression::visit_attributes(AttributeVisitor &visitor) { subtensors.emplace_back("in_subtensor_" + std::to_string(i), subtensor2str(subtensor)); const auto& layout = desc->get_layout(); - if (!layout.empty() && !is_planar_layout(layout)) + if (!layout.empty() && !utils::is_planar_layout(layout)) layouts.emplace_back("in_layout_" + std::to_string(i), layout); in_reg_types.emplace_back(regTypeToStr(desc->get_reg().type)); @@ -220,7 +215,7 @@ bool Expression::visit_attributes(AttributeVisitor &visitor) { subtensors.emplace_back("out_subtensor_" + std::to_string(i), subtensor2str(subtensor)); const auto& layout = desc->get_layout(); - if (!layout.empty() && !is_planar_layout(layout)) + if (!layout.empty() && !utils::is_planar_layout(layout)) layouts.emplace_back("out_layout_" + std::to_string(i), layout); out_reg_types.emplace_back(regTypeToStr(desc->get_reg().type)); diff --git a/src/common/snippets/src/lowered/expressions/buffer_expression.cpp b/src/common/snippets/src/lowered/expressions/buffer_expression.cpp index acc742ff196407..a8b3bb2034b105 100644 --- a/src/common/snippets/src/lowered/expressions/buffer_expression.cpp +++ b/src/common/snippets/src/lowered/expressions/buffer_expression.cpp @@ -25,12 +25,15 @@ ExpressionPtr BufferExpression::clone() const { } bool BufferExpression::visit_attributes(AttributeVisitor &visitor) { + Expression::visit_attributes(visitor); auto allocation_size = utils::value2str(m_allocation_size); auto offset = utils::value2str(m_offset); + auto prc = get_data_type(); visitor.on_attribute("allocation_size", allocation_size); visitor.on_attribute("offset", offset); visitor.on_attribute("reg_group", m_reg_group); visitor.on_attribute("cluster_id", m_cluster_id); + visitor.on_attribute("data_type", prc); return true; } @@ -38,9 +41,13 @@ bool BufferExpression::is_defined() const { return !utils::is_dynamic_value(m_allocation_size); } +ov::element::Type BufferExpression::get_data_type() const { + return get_node()->get_output_element_type(0); +} + size_t BufferExpression::get_byte_size() const { if (is_defined()) - return m_allocation_size * get_node()->get_output_element_type(0).size(); + return m_allocation_size * get_data_type().size(); return utils::get_dynamic_value(); } diff --git a/src/common/snippets/src/lowered/loop_info.cpp b/src/common/snippets/src/lowered/loop_info.cpp index 954cc180a1527b..1c856869878b80 100644 --- a/src/common/snippets/src/lowered/loop_info.cpp +++ b/src/common/snippets/src/lowered/loop_info.cpp @@ -99,7 +99,7 @@ template<> std::vector::iterator LoopInfo::find_loop_port(const ExpressionPort& expr_port) { auto& ports = expr_port.get_type() == ExpressionPort::Input ? m_input_ports : m_output_ports; const auto it = std::find_if(ports.begin(), ports.end(), - [&expr_port](const LoopPort& port) { return *port.expr_port.get() == expr_port; }); + [&expr_port](const LoopPort& port) { return *port.expr_port == expr_port; }); return it; } @@ -176,6 +176,19 @@ bool UnifiedLoopInfo::LoopPortDesc::is_dynamic() const { return utils::is_dynamic_value(ptr_increment) || utils::is_dynamic_value(finalization_offset); } +bool UnifiedLoopInfo::LoopPortDesc::is_static() const { + return !is_dynamic(); +} + +bool operator==(const UnifiedLoopInfo::LoopPortDesc& lhs, const UnifiedLoopInfo::LoopPortDesc& rhs) { + if (&lhs == &rhs) + return true; + return lhs.ptr_increment == rhs.ptr_increment && lhs.finalization_offset == rhs.finalization_offset && lhs.data_size == rhs.data_size; +} +bool operator!=(const UnifiedLoopInfo::LoopPortDesc& lhs, const UnifiedLoopInfo::LoopPortDesc& rhs) { + return !(rhs == lhs); +} + UnifiedLoopInfo::UnifiedLoopInfo(size_t work_amount, size_t increment, const std::vector& entries, const std::vector& exits, const SpecificIterationHandlers& handlers) @@ -321,6 +334,18 @@ void UnifiedLoopInfo::sort_ports() { reorder(m_output_ports, m_output_port_descs); } +UnifiedLoopInfo::LoopPortInfo UnifiedLoopInfo::get_loop_port_info(const ExpressionPort& expr_port) { + OPENVINO_ASSERT(is_loop_port(expr_port), "Failed get_loop_port: expr_port is not a loop port"); + const auto is_input = expr_port.get_type() == ExpressionPort::Input; + const auto& ports = is_input ? m_input_ports : m_output_ports; + const auto& descs = is_input ? m_input_port_descs : m_output_port_descs; + const auto it = std::find_if(ports.begin(), ports.end(), + [&expr_port](const LoopPort& port) { return *port.expr_port == expr_port; }); + const auto index = static_cast(std::distance(ports.cbegin(), it)); + OPENVINO_ASSERT(index < ports.size() && index < descs.size(), "LoopPortInfo has not been found!"); + return {ports[index], descs[index]}; +} + void UnifiedLoopInfo::replace_with_cloned_descs(size_t actual_port_idx, size_t new_count, bool is_input) { auto& descs = is_input ? m_input_port_descs : m_output_port_descs; std::vector target_shifts(new_count, descs[actual_port_idx]); diff --git a/src/common/snippets/src/lowered/pass/allocate_buffers.cpp b/src/common/snippets/src/lowered/pass/allocate_buffers.cpp index f76c4097b38f38..0269c65109b701 100644 --- a/src/common/snippets/src/lowered/pass/allocate_buffers.cpp +++ b/src/common/snippets/src/lowered/pass/allocate_buffers.cpp @@ -12,7 +12,7 @@ #include "snippets/lowered/pass/define_buffer_clusters.hpp" #include "snippets/lowered/pass/normalize_buffer_reg_groups.hpp" #include "snippets/lowered/pass/propagate_buffer_offset.hpp" -#include "snippets/pass/tokenization.hpp" +#include "snippets/lowered/pass/mark_invariant_shape_path.hpp" #include "snippets/itt.hpp" #include "snippets/utils/utils.hpp" @@ -30,6 +30,7 @@ bool AllocateBuffers::run(lowered::LinearIR& linear_ir, lowered::LinearIR::const PassPipeline pipeline; pipeline.register_pass(); if (m_is_optimized_mode) { + pipeline.register_pass(); pipeline.register_pass(); pipeline.register_pass(); pipeline.register_pass(buffer_scratchpad_size); diff --git a/src/common/snippets/src/lowered/pass/define_buffer_clusters.cpp b/src/common/snippets/src/lowered/pass/define_buffer_clusters.cpp index c43b5d63a358c6..e8132d62be0cc9 100644 --- a/src/common/snippets/src/lowered/pass/define_buffer_clusters.cpp +++ b/src/common/snippets/src/lowered/pass/define_buffer_clusters.cpp @@ -4,24 +4,42 @@ #include "snippets/lowered/pass/define_buffer_clusters.hpp" +#include "snippets/lowered/pass/mark_invariant_shape_path.hpp" #include "snippets/lowered/pass/set_buffer_reg_group.hpp" +#include "snippets/lowered/loop_manager.hpp" #include "snippets/snippets_isa.hpp" #include "snippets/utils/utils.hpp" #include "snippets/itt.hpp" + namespace ov { namespace snippets { namespace lowered { namespace pass { -using ShiftPtrParams = SetBufferRegGroup::ShiftPtrParams; +namespace { + +// Find Loops which are connected to the current `buffer_expr` (consumer of Buffer is port of these Loops) +std::vector get_connected_loops(const BufferExpressionPtr& buffer_expr, const ExpressionPtr& consumer_expr) { + // [133463] Remove it please + if (ov::is_type(consumer_expr->get_node())) + return {}; + const auto& buffer_loops_ids = buffer_expr->get_loop_ids(); + const auto& consumer_loop_ids = consumer_expr->get_loop_ids(); + OPENVINO_ASSERT(buffer_loops_ids.size() <= consumer_loop_ids.size(), "Buffer with consumer are in incorrect loops"); + const auto mismatched_its = std::mismatch(buffer_loops_ids.begin(), buffer_loops_ids.end(), consumer_loop_ids.begin()); + return {mismatched_its.second, consumer_loop_ids.cend()}; +} +} // namespace + +using LoopPortInfo = UnifiedLoopInfo::LoopPortInfo; DefineBufferClusters::BufferClusters::iterator DefineBufferClusters::find_cluster_by_expr(const BufferExpressionPtr& target) { return std::find_if(m_clusters.begin(), m_clusters.end(), [&target](const BufferCluster& cluster) { return cluster.count(target) > 0; }); } -bool DefineBufferClusters::is_direct_buffer(const BufferExpressionPtr& buffer_expr, const ExpressionPtr& target_expr) const { +bool DefineBufferClusters::is_direct_buffer(const BufferExpressionPtr& buffer_expr, const ExpressionPtr& target_expr) { return buffer_expr && buffer_expr->get_loop_ids() == target_expr->get_loop_ids(); } @@ -33,7 +51,16 @@ void DefineBufferClusters::create_new_cluster(const BufferExpressionPtr& buffer_ } } -size_t DefineBufferClusters::get_cluster_buffer_id(const BufferCluster& cluster) const { +void DefineBufferClusters::add_buffers_to_cluster(BufferCluster& existing_cluster, const std::set& buffers) { + existing_cluster.insert(buffers.cbegin(), buffers.cend()); + // All buffers in one cluster must be only static or dynamic (no mixes). + if (std::any_of(existing_cluster.cbegin(), existing_cluster.cend(), [](const BufferExpressionPtr& buffer) { return !buffer->is_defined(); })) { + for (const auto& buffer : existing_cluster) + buffer->set_allocation_size(utils::get_dynamic_value()); + } +} + +size_t DefineBufferClusters::get_cluster_buffer_id(const BufferCluster& cluster) { OPENVINO_ASSERT(!cluster.empty(), "Buffer cluster is empty!"); const auto id = cluster.cbegin()->get()->get_reg_group(); if (std::all_of(cluster.cbegin(), cluster.cend(), [&id](const BufferExpressionPtr& expr) { return expr->get_reg_group() == id; })) { @@ -42,141 +69,121 @@ size_t DefineBufferClusters::get_cluster_buffer_id(const BufferCluster& cluster) return SIZE_MAX; } -DefineBufferClusters::BufferPorts DefineBufferClusters::get_input_buffers(const ExpressionPtr& loop_expr) const { - BufferPorts input_buffers; - - const auto loop_end = ov::as_type_ptr(loop_expr->get_node()); - const auto in_count = loop_end->get_input_num(); - const auto& connectors = loop_expr->get_input_port_connectors(); - - // Input Buffers - for (size_t i = 0; i < in_count; ++i) { - const auto& source_expr = ov::as_type_ptr(connectors[i]->get_source().get_expr()); - if (!is_direct_buffer(source_expr, loop_expr)) +std::pair DefineBufferClusters::get_direct_buffers(const UnifiedLoopInfoPtr& loop_info, + const ExpressionPtr& loop_expr) { + BufferMap input_buffers; + const auto& loop_inputs = loop_info->get_input_ports_info(); + for (const auto& port_info : loop_inputs) { + const auto& buffer_expr = ov::as_type_ptr(port_info.port.expr_port->get_port_connector_ptr()->get_source().get_expr()); + if (!is_direct_buffer(buffer_expr, loop_expr)) + continue; + if (input_buffers.count(buffer_expr) > 0) { + const auto& port_desc = port_info.desc; + OPENVINO_ASSERT(input_buffers[buffer_expr].desc == port_desc, + "Invalid data pointer shifts: If Buffer has several consumers, this consumers must have the same shifts or zero"); continue; - // Save as input Buffer - const auto ret = input_buffers.insert(std::make_pair(source_expr, std::set{ i })).second; - if (!ret) - input_buffers[source_expr].insert(i); + } + input_buffers[buffer_expr] = port_info; } - return input_buffers; -} - -DefineBufferClusters::BufferPorts DefineBufferClusters::get_output_buffers(const ExpressionPtr& loop_expr) const { - BufferPorts output_buffers; - const auto loop_end = ov::as_type_ptr(loop_expr->get_node()); - const auto in_count = loop_end->get_input_num(); - const auto out_count = loop_end->get_output_num(); - const auto& connectors = loop_expr->get_input_port_connectors(); - - for (size_t i = in_count; i < in_count + out_count; ++i) { - for (const auto& consumer : connectors[i]->get_consumers()) { - const auto& consumer_expr = ov::as_type_ptr(consumer.get_expr()); - if (!is_direct_buffer(consumer_expr, loop_expr)) + BufferMap output_buffers; + const auto& loop_outputs = loop_info->get_output_ports_info(); + for (const auto& port_info : loop_outputs) { + const auto& consumer_inputs = port_info.port.expr_port->get_port_connector_ptr()->get_consumers(); + for (const auto& consumer_input : consumer_inputs) { + const auto& buffer_expr = ov::as_type_ptr(consumer_input.get_expr()); + if (!is_direct_buffer(buffer_expr, loop_expr)) continue; - // Save as output Buffer - output_buffers[consumer_expr] = { i }; + OPENVINO_ASSERT(output_buffers.count(buffer_expr) == 0, "Only one Buffer can be on node output!"); + output_buffers[buffer_expr] = port_info; } } - return output_buffers; + + return std::make_pair(input_buffers, output_buffers); } -void DefineBufferClusters::parse_loop(const LinearIR::constExprIt& expr_it) { +void DefineBufferClusters::parse_loop(const LoopManagerPtr& loop_manager, const LinearIR::constExprIt& expr_it) { const auto& expr = *expr_it; - const auto loop_end = ov::as_type_ptr(expr->get_node()); - const auto& ptr_increments = loop_end->get_ptr_increments(); - const auto& final_offsets = loop_end->get_finalization_offsets(); - const auto& data_sizes = loop_end->get_element_type_sizes(); + const auto& loop_end = ov::as_type_ptr(expr->get_node()); + const auto& loop_info = loop_manager->get_loop_info(loop_end->get_id()); - // [ Expression -> Port indexes ] - const auto input_buffers = get_input_buffers(expr); - const auto output_buffers = get_output_buffers(expr); + BufferMap input_buffers, output_buffers; + std::tie(input_buffers, output_buffers) = get_direct_buffers(loop_info, expr); for (const auto& in : input_buffers) create_new_cluster(in.first); std::set visited_buffers; for (const auto& out : output_buffers) { - const auto output_buffer_expr = out.first; - const auto output_buffer_port_idx = *(out.second.cbegin()); // Output port is always one + const auto& output_buffer_expr = out.first; + const auto& output_buffer_port_info = out.second; bool has_been_added = false; for (const auto& in : input_buffers) { const auto& input_buffer_expr = in.first; + const auto& input_buffer_port_info = in.second; if (visited_buffers.count(input_buffer_expr) > 0) continue; - // If allocated sizes of buffers are unkown on compilation stage (dynamic), - // we cannot be sure that they're will be the same in runtime. - if (!input_buffer_expr->is_defined()|| !output_buffer_expr->is_defined()) + // Memory can be reused if reading and writing are executed proportionally: + // - output buffer can have precision with data size less than input buffer + if ((input_buffer_expr->get_data_type().size() < output_buffer_expr->get_data_type().size())) continue; - // Memory can be reused if reading and writing are executed proportionally: - // - the same reading/writing order - // - the same buffer memory sizes - if ((input_buffer_expr->get_byte_size() != output_buffer_expr->get_byte_size()) || - (input_buffer_expr->get_output_port_descriptor(0)->get_layout() != output_buffer_expr->get_input_port_descriptor(0)->get_layout())) + const auto in_path = MarkInvariantShapePath::getInvariantPortShapePath(*input_buffer_port_info.port.expr_port); + const auto out_path = MarkInvariantShapePath::getInvariantPortShapePath(*output_buffer_port_info.port.expr_port); + // - Memory can be reused if there are the same loop pointer increments (data size, final offsets, ptr increments). + // For that, loop ports with buffers should be on the same shape-path and have the same value of `is_incremented`. + if (in_path != out_path || input_buffer_port_info.port.is_incremented != output_buffer_port_info.port.is_incremented) continue; - // Also memory can be reused if there are the same ShiftPtrParams (data size, final offsets, ptr increments) - const auto& input_buffer_ports = in.second; - for (const auto& input_buffer_port_idx : input_buffer_ports) { - const auto input_params = - ShiftPtrParams(data_sizes[input_buffer_port_idx], ptr_increments[input_buffer_port_idx], final_offsets[input_buffer_port_idx]); - const auto output_params = - ShiftPtrParams(data_sizes[output_buffer_port_idx], ptr_increments[output_buffer_port_idx], final_offsets[output_buffer_port_idx]); - - // If data pointer shift parameters are unknown on model compilation stage (dynamic), - // we cannot be sure that these data pointers will be proportionally shifted in runtime. - if (input_params.is_static() && output_params.is_static() && input_params == output_params) { - const auto cluster_it = find_cluster_by_expr(input_buffer_expr); - OPENVINO_ASSERT(cluster_it != m_clusters.end(), "Buffer on inputs of Loop must be already saved in clusters"); - // Add to the existing cluster - has_been_added = cluster_it->insert(output_buffer_expr).second; - OPENVINO_ASSERT(has_been_added, "Buffer has not been saved in cluster"); - // Remove input buffer because we have already use its memory - visited_buffers.insert(input_buffer_expr); - break; - } + // - Memory can be shared if Buffer has the same allocation size. + if (input_buffer_expr->is_defined() && output_buffer_expr->is_defined()) { + if (input_buffer_expr->get_allocation_size() != output_buffer_expr->get_allocation_size()) + continue; + } else { + // If allocation sizes are undefined, we can check if they have the same allocation sizes in runtime: + // - they should calculate allocation size using the common algorithm from `BufferExpression::init_allocation_size`. + if (!utils::everyone_is(BufferExpression::get_type_info_static(), input_buffer_expr->get_type_info(), output_buffer_expr->get_type_info())) + continue; } - if (has_been_added) break; + + const auto cluster_it = find_cluster_by_expr(input_buffer_expr); + OPENVINO_ASSERT(cluster_it != m_clusters.end(), "Buffer on inputs of Loop must be already saved in clusters"); + // Add to the existing cluster + add_buffers_to_cluster(*cluster_it, {output_buffer_expr}); + // Remove input buffer because we have already use its memory + visited_buffers.insert(input_buffer_expr); + has_been_added = true; + break; } if (!has_been_added) { - m_clusters.push_back(BufferCluster{output_buffer_expr}); + create_new_cluster(output_buffer_expr); } } // Check Buffers inside to possible memory reusing using `window` sliding - parse_nested_loops(input_buffers, output_buffers, expr_it); + parse_nested_loops(loop_manager, input_buffers, output_buffers, expr_it); } -void DefineBufferClusters::parse_nested_loops(const BufferPorts& input_buffers, const BufferPorts& output_buffers, - const LinearIR::constExprIt& outer_loop_end_expr_it) { +void DefineBufferClusters::parse_nested_loops(const LoopManagerPtr& loop_manager, const BufferMap& input_buffers, + const BufferMap& output_buffers, const LinearIR::constExprIt& outer_loop_end_expr_it) { if (input_buffers.empty() && output_buffers.empty()) return; - // The inner Buffer can reuse memory of the outer Buffer using `window` sliding only if: - // - The finalization offset of the latest Loop connected to the inner Buffer is equal to pointer increment of outer Buffer to emulate `window` sliding - // - This outer Buffer should have the same Buffer ID as inner to move data ptr of inner Buffer after each outer Loop iteration. - // It's needed because all Loops reset data pointers of connected Buffer after full work. - // To avoid rewriting of outer Buffer data we have to have the same Buffer ID (GPR) to proportionally shift pointers both Buffers. - - auto can_be_data_ptr_proportionally_shifted = [](int64_t outer_buffer_ptr_increment, int64_t outer_buffer_data_size, - int64_t inner_buffer_final_offsets, int64_t inner_buffer_data_size) { - // If data pointer shift parameters are unknown on model compilation stage (dynamic), - // we cannot be sure that these data pointers will be proportionally shifted in runtime. - if (utils::is_dynamic_value(outer_buffer_ptr_increment) || utils::is_dynamic_value(inner_buffer_final_offsets)) + auto can_be_data_ptr_proportionally_shifted = [](const LoopPortInfo& outer_port_info, const LoopPortInfo& inner_port_info) { + // Outer Buffer ptr should be shifted to emulate "window" sliding + const auto& outer_desc = outer_port_info.desc; + if (!outer_port_info.port.is_incremented || (!utils::is_dynamic_value(outer_desc.ptr_increment) && outer_desc.ptr_increment == 0)) return false; - return (outer_buffer_ptr_increment != 0) && - ((inner_buffer_data_size * inner_buffer_final_offsets * -1) == outer_buffer_ptr_increment * outer_buffer_data_size); - }; - const auto outer_loop_end = ov::as_type_ptr(outer_loop_end_expr_it->get()->get_node()); - const auto outer_loop_begin = outer_loop_end->get_loop_begin(); - const auto& outer_ptr_increments = outer_loop_end->get_ptr_increments(); - const auto& outer_data_sizes = outer_loop_end->get_element_type_sizes(); + OPENVINO_ASSERT(inner_port_info.port.expr_port && outer_port_info.port.expr_port, "Expression ports are nullptr!"); + // we can be sure that these data pointers will be proportionally shifted if they're on the same invariant shape path + return MarkInvariantShapePath::getInvariantPortShapePath(*inner_port_info.port.expr_port) == + MarkInvariantShapePath::getInvariantPortShapePath(*outer_port_info.port.expr_port); + }; + const auto outer_loop_begin = ov::as_type_ptr(outer_loop_end_expr_it->get()->get_node())->get_loop_begin(); for (auto it = std::reverse_iterator(outer_loop_end_expr_it); (*it)->get_node() != outer_loop_begin; ++it) { const auto& inner_expr = *it; if (const auto inner_buffer_expr = ov::as_type_ptr(inner_expr)) { @@ -185,9 +192,12 @@ void DefineBufferClusters::parse_nested_loops(const BufferPorts& input_buffers, const auto inner_cluster_id = get_cluster_buffer_id(*inner_cluster_it); if (inner_cluster_id == SIZE_MAX) continue; - const auto final_offset = get_buffer_finalization_offset(inner_buffer_expr); + // If inner Buffer is not connected to the Loop - `window` sliding effect is not possible + LoopPortInfo final_loop_info; + if (!init_buffer_last_loop_port_info(loop_manager, inner_buffer_expr, final_loop_info)) + continue; - auto unite = [&](const BufferPorts& ports, const bool is_input) { + auto unite = [&](const BufferMap& ports, const bool is_input) { bool applied = false; for (const auto& port : ports) { const auto cluster_it = find_cluster_by_expr(port.first); @@ -196,17 +206,15 @@ void DefineBufferClusters::parse_nested_loops(const BufferPorts& input_buffers, if (cluster_it == inner_cluster_it) continue; // Buffer from one cluster must be only defined (with known allocation_size) or dynamic (with unknown allocation_size) if (inner_buffer_expr->is_defined() != port.first->is_defined()) continue; - - bool can_be_reused = true; - for (const auto idx : port.second) { - can_be_reused = can_be_reused && - can_be_data_ptr_proportionally_shifted(outer_ptr_increments[idx], outer_data_sizes[idx], - final_offset, inner_buffer_expr->get_node()->get_element_type().size()); - } - if (!can_be_reused) - continue; - - applied = unite_nested_clusters(inner_cluster_it, *cluster_it, port.first, is_input); + // The inner Buffer can reuse memory of the outer Buffer using `window` sliding only if: + // - The finalization offset of the latest Loop connected to the inner Buffer is equal to + // pointer increment of outer Buffer to emulate `window` sliding + // - This outer Buffer should have the same Buffer ID as inner to move data ptr of inner Buffer after each outer Loop iteration. + // It's needed because all Loops reset data pointers of connected Buffer after full work. + // To avoid rewriting of outer Buffer data we have to have the same Buffer ID (GPR) to proportionally shift pointers both Buffers. + if (!can_be_data_ptr_proportionally_shifted(port.second, final_loop_info)) continue; + + applied = unite_nested_clusters(loop_manager, inner_cluster_it, *cluster_it, port.first, is_input); if (applied) break; } return applied; @@ -218,101 +226,66 @@ void DefineBufferClusters::parse_nested_loops(const BufferPorts& input_buffers, } } -int64_t DefineBufferClusters::get_buffer_finalization_offset(const BufferExpressionPtr& buffer_expr) const { - auto index = [](const std::vector& loop_inputs, const PortConnectorPtr& buffer_out) { - const auto it = std::find(loop_inputs.cbegin(), loop_inputs.cend(), buffer_out); - OPENVINO_ASSERT(it != loop_inputs.cend(), "Buffer output PortConnector has not been found in target LoopEnd inputs"); - return std::distance(loop_inputs.cbegin(), it); +bool DefineBufferClusters::init_buffer_last_loop_port_info(const LoopManagerPtr& loop_manager, const BufferExpressionPtr& buffer_expr, + UnifiedLoopInfo::LoopPortInfo& port_info) { + auto get_direct_loop_for_buffer_out = [&](const BufferExpressionPtr& buffer_expr, const ExpressionPtr& consumer_expr) -> UnifiedLoopInfoPtr { + const auto inner_loops = get_connected_loops(buffer_expr, consumer_expr); + if (inner_loops.empty()) + return nullptr; + return loop_manager->get_loop_info(inner_loops.front()); }; - int64_t final_offset = 0; + + bool found = false; double last_loop_exec_order = -1 * std::numeric_limits::max(); const auto& buffer_outs = buffer_expr->get_output_port_connectors(); for (const auto& buffer_out : buffer_outs) { const auto consumers = buffer_out->get_consumers(); for (const auto& consumer : consumers) { - const auto consumer_expr = consumer.get_expr(); - const auto loop_end = ov::as_type_ptr(consumer_expr->get_node()); - if (loop_end && consumer_expr->get_loop_ids() == buffer_expr->get_loop_ids()) { - const auto loop_order = consumer_expr->get_exec_num(); + if (const auto& direct_loop = get_direct_loop_for_buffer_out(buffer_expr, consumer.get_expr())) { + const auto loop_order = direct_loop->get_output_ports().back().expr_port->get_expr()->get_exec_num(); if (loop_order > last_loop_exec_order) { - const auto& loop_inputs = consumer_expr->get_input_port_connectors(); - final_offset = loop_end->get_finalization_offsets()[index(loop_inputs, buffer_out)]; + OPENVINO_ASSERT(direct_loop->is_loop_port(consumer), "Consumer of Buffer from another loop must be loop port"); + port_info = direct_loop->get_loop_port_info(consumer); last_loop_exec_order = loop_order; + found = true; } } } } - return final_offset; + return found; } -bool DefineBufferClusters::unite_nested_clusters(const BufferClusters::iterator& inner_cluster_it, - BufferCluster& outer_cluster, - const BufferExpressionPtr& outer_buffer, bool is_outer_up) { +bool DefineBufferClusters::unite_nested_clusters(const LoopManagerPtr& loop_manager, const BufferClusters::iterator& inner_cluster_it, + BufferCluster& outer_cluster, const BufferExpressionPtr& outer_buffer, bool is_outer_up) { for (const auto& inner_buffer : *inner_cluster_it) { - ExpressionPtr common_loop_end_expr = nullptr; - size_t outer_idx = SIZE_MAX, inner_idx = SIZE_MAX; - const auto& up_buffer = is_outer_up ? outer_buffer : inner_buffer; - const auto& down_buffer = is_outer_up ? inner_buffer : outer_buffer; - auto& up_idx = is_outer_up ? outer_idx : inner_idx; - auto& down_idx = is_outer_up ? inner_idx : outer_idx; - if (are_buffer_neighbours(up_buffer, down_buffer, common_loop_end_expr, up_idx, down_idx)) { - const auto common_loop_end = ov::as_type_ptr(common_loop_end_expr->get_node()); - const auto& inner_ptr_increments = common_loop_end->get_ptr_increments(); - const auto& inner_final_offsets = common_loop_end->get_finalization_offsets(); - const auto& inner_data_sizes = common_loop_end->get_element_type_sizes(); - if (SetBufferRegGroup::can_be_in_one_group({ inner_data_sizes[up_idx], inner_ptr_increments[up_idx], inner_final_offsets[up_idx] }, - { inner_data_sizes[down_idx], inner_ptr_increments[down_idx], inner_final_offsets[down_idx] })) { - for (const auto& inner_buffer : *inner_cluster_it) - inner_buffer->set_reg_group(outer_buffer->get_reg_group()); - - outer_cluster.insert(inner_cluster_it->cbegin(), inner_cluster_it->cend()); - m_clusters.erase(inner_cluster_it); - return true; + const auto& upper_buffer = is_outer_up ? outer_buffer : inner_buffer; + const auto& lower_buffer = is_outer_up ? inner_buffer : outer_buffer; + + const auto& lower_buffer_source = lower_buffer->get_input_port_connector(0)->get_source(); + const auto& upper_buffer_consumers = upper_buffer->get_output_port_connector(0)->get_consumers(); + for (const auto& upper_buffer_consumer : upper_buffer_consumers) { + const auto& connected_loops = get_connected_loops(upper_buffer, upper_buffer_consumer.get_expr()); + for (const auto& loop_id : connected_loops) { + const auto& common_loop_info = loop_manager->get_loop_info(loop_id); + if (!common_loop_info->is_loop_port(lower_buffer_source) || !common_loop_info->is_loop_port(upper_buffer_consumer)) + continue; + + const auto upper_port_desc = common_loop_info->get_loop_port_info(upper_buffer_consumer); + const auto lower_port_desc = common_loop_info->get_loop_port_info(lower_buffer_source); + if (SetBufferRegGroup::can_be_in_one_reg_group(upper_port_desc, lower_port_desc)) { + for (const auto& inner_buffer : *inner_cluster_it) + inner_buffer->set_reg_group(outer_buffer->get_reg_group()); + + add_buffers_to_cluster(outer_cluster, *inner_cluster_it); + m_clusters.erase(inner_cluster_it); + return true; + } } } } return false; } -bool DefineBufferClusters::are_buffer_neighbours(const BufferExpressionPtr& up, const BufferExpressionPtr& down, ExpressionPtr& loop, - size_t& up_idx, size_t& down_idx) { - auto find_input = [&down](const PortConnectorPtr& in) { - return in->get_source().get_expr() == down; - }; - auto find_output = [&down](const PortConnectorPtr& in) { - const auto consumers = in->get_consumers(); - return std::any_of(consumers.cbegin(), consumers.cend(), - [&down](const ExpressionPort& port) { return port.get_expr() == down; }); - }; - auto find = [&](const std::vector::const_iterator& begin, - const std::vector::const_iterator& end, - const std::vector::const_iterator& orig_begin, - const ExpressionPort& loop_port, - bool is_input) -> bool { - const auto in_buffer_it = is_input ? std::find_if(begin, end, find_input) - : std::find_if(begin, end, find_output); - if (in_buffer_it != end) { - up_idx = loop_port.get_index(); - down_idx = std::distance(orig_begin, in_buffer_it); - loop = loop_port.get_expr(); - return true; - } - return false; - }; - for (const auto& out : up->get_output_port_connectors()) { - for (const auto& buffer_consumer : out->get_consumers()) { - const auto buffer_consumer_expr = buffer_consumer.get_expr(); - const auto loop_end = ov::as_type_ptr(buffer_consumer_expr->get_node()); - if (!loop_end) - continue; - const auto& loop_inputs = buffer_consumer_expr->get_input_port_connectors(); - if (find(loop_inputs.cbegin(), loop_inputs.cbegin() + loop_end->get_input_num(), loop_inputs.cbegin(), buffer_consumer, true)) return true; - if (find(loop_inputs.cbegin() + loop_end->get_input_num(), loop_inputs.cend(), loop_inputs.cbegin(), buffer_consumer, false)) return true; - } - } - return false; -} - void DefineBufferClusters::parse_memory_access_op(const ExpressionPtr& expr) { const auto ma = std::dynamic_pointer_cast(expr->get_node()); // TODO: Some full MemoryAccess ops can have inplace inputs and outputs in general. @@ -340,7 +313,7 @@ bool DefineBufferClusters::run(lowered::LinearIR& linear_ir, lowered::LinearIR:: const auto& expr = *expr_it; const auto op = expr->get_node(); if (ov::is_type(op)) { - parse_loop(expr_it); + parse_loop(linear_ir.get_loop_manager(), expr_it); continue; } diff --git a/src/common/snippets/src/lowered/pass/mark_invariant_shape_path.cpp b/src/common/snippets/src/lowered/pass/mark_invariant_shape_path.cpp new file mode 100644 index 00000000000000..b32056d4e32a57 --- /dev/null +++ b/src/common/snippets/src/lowered/pass/mark_invariant_shape_path.cpp @@ -0,0 +1,128 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + + +#include "snippets/lowered/pass/mark_invariant_shape_path.hpp" + +#include "snippets/lowered/expressions/buffer_expression.hpp" +#include "snippets/op/memory_access.hpp" +#include "snippets/snippets_isa.hpp" +#include "snippets/utils/utils.hpp" +#include "snippets/itt.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +namespace { + +// Specific value to mark ports which doesn't affect output shape of broadcastable ops. +// For example, ops with output scalar shape or Horizon ops. +static const size_t NOT_AFFECTING_PATH = SIZE_MAX; + +static bool is_shape_broadcastable_op(const ExpressionPtr& expr) { + return expr->get_node()->get_autob() != ov::op::AutoBroadcastType::NONE; +} + +static bool is_not_affecting_op(const ExpressionPtr& expr) { + const auto& node = expr->get_node(); + return ov::is_type(node) || + ov::is_type(node) || + ov::is_type(node) || + ov::is_type(node) || + ov::is_type(node) || + ov::is_type(node) || + ov::is_type(node); +} + +static bool is_affecting_op(const ExpressionPtr& expr) { + const auto& node = expr->get_node(); + return ov::is_type(node) || + ov::is_type(node) || + ov::is_type(node); +} +} // namespace + +size_t MarkInvariantShapePath::getInvariantPortShapePath(const ExpressionPort& port) { + auto& rt = get_rt_info(port); + const auto rinfo = rt.find("InvariantShapePath"); + OPENVINO_ASSERT(rinfo != rt.end(), "Invariant path for this expression port has not been marked!"); + return rinfo->second.as(); +} + +void MarkInvariantShapePath::SetInvariantPortShapePath(const ExpressionPort& port, size_t value) { + OPENVINO_ASSERT(port.get_type() == ExpressionPort::Output, "SetInvariantPortShapePath can be used only for output port"); + auto& rt = get_rt_info(port); + rt["InvariantShapePath"] = value; +} + +ov::RTMap& MarkInvariantShapePath::get_rt_info(const ExpressionPort& port) { + const auto& source_port = port.get_type() == ExpressionPort::Input ? port.get_port_connector_ptr()->get_source() : port; + const auto& node = source_port.get_expr()->get_node(); + const auto port_idx = source_port.get_index(); + OPENVINO_ASSERT(port_idx < node->get_output_size(), "Node has incompatible port count with the expression"); + return node->output(port_idx).get_rt_info(); +} + +bool MarkInvariantShapePath::run(lowered::LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::MarkInvariantShapePath"); + + bool modified = false; + + // Shape -> color + std::map colored_shapes; + + size_t color_path = 0; + + auto merge_paths = [&color_path](size_t lhs, size_t rhs) { + if (lhs == rhs || rhs == NOT_AFFECTING_PATH) return lhs; + if (lhs == NOT_AFFECTING_PATH) return rhs; + return ++color_path; + }; + + for (auto expr_it = begin; expr_it != end; ++expr_it) { + const auto& expr = *expr_it; + if (ov::is_type(expr->get_node())) + continue; + + for (size_t out_idx = 0; out_idx < expr->get_output_count(); ++out_idx) { + const auto& out_shape = expr->get_output_port_descriptor(out_idx)->get_shape(); + size_t current_color_path; + if (colored_shapes.count(out_shape)) { + current_color_path = colored_shapes.at(out_shape); + } else if (!utils::is_dynamic_vdims(out_shape) && ov::shape_size(out_shape) == 1) { + current_color_path = NOT_AFFECTING_PATH; + } else { + if (is_affecting_op(expr)) { + current_color_path = ++color_path; + } else if (is_not_affecting_op(expr)) { + current_color_path = NOT_AFFECTING_PATH; + } else if (is_shape_broadcastable_op(expr)) { + current_color_path = NOT_AFFECTING_PATH; + for (size_t in_idx = 0; in_idx < expr->get_input_count(); ++in_idx) { + const auto input_path = getInvariantPortShapePath(expr->get_input_port(in_idx)); + current_color_path = merge_paths(current_color_path, input_path); + } + } else { + current_color_path = expr->get_input_count() > 0 ? getInvariantPortShapePath(expr->get_input_port(0)) + : ++color_path; + } + + if (!utils::is_dynamic_vdims(out_shape)) + colored_shapes[out_shape] = current_color_path; + } + + SetInvariantPortShapePath(expr->get_output_port(out_idx), current_color_path); + modified = true; + } + } + + return modified; +} + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/src/lowered/pass/set_buffer_reg_group.cpp b/src/common/snippets/src/lowered/pass/set_buffer_reg_group.cpp index 9bdb5e8ef3a9dc..46248b9c277818 100644 --- a/src/common/snippets/src/lowered/pass/set_buffer_reg_group.cpp +++ b/src/common/snippets/src/lowered/pass/set_buffer_reg_group.cpp @@ -4,8 +4,10 @@ #include "snippets/lowered/pass/set_buffer_reg_group.hpp" +#include "snippets/lowered/pass/mark_invariant_shape_path.hpp" #include "snippets/lowered/linear_ir.hpp" -#include "snippets/snippets_isa.hpp" +#include "snippets/lowered/loop_manager.hpp" +#include "snippets/lowered/expressions/buffer_expression.hpp" #include "snippets/itt.hpp" namespace ov { @@ -19,54 +21,44 @@ inline size_t index(size_t col_num, size_t row, size_t col) { } } // namespace -bool operator==(const SetBufferRegGroup::ShiftPtrParams& lhs, const SetBufferRegGroup::ShiftPtrParams& rhs) { - if (&lhs == &rhs) - return true; - return lhs.ptr_increment == rhs.ptr_increment && lhs.finalization_offset == rhs.finalization_offset && lhs.data_size == rhs.data_size; -} -bool operator!=(const SetBufferRegGroup::ShiftPtrParams& lhs, const SetBufferRegGroup::ShiftPtrParams& rhs) { - return !(rhs == lhs); -} - size_t SetBufferRegGroup::get_buffer_idx(const BufferExpressionPtr& target, const BufferPool& pool) { const auto iter = std::find(pool.cbegin(), pool.cend(), target); OPENVINO_ASSERT(iter != pool.cend(), "Buffer wasn't find in Buffer system of Subgraph"); return std::distance(pool.cbegin(), iter); } -bool SetBufferRegGroup::can_be_in_one_group(const ShiftPtrParams& lhs, const ShiftPtrParams& rhs) { - // If data pointer shift parameters are unknown on model compilation stage (dynamic), - // we cannot be sure that these data pointers will be proportionally shifted. - // Then we force `false` value here to set unique registers for these buffers - const auto are_static = lhs.is_static() && rhs.is_static(); - const auto equal_ptr_params_shifting = lhs.ptr_increment == rhs.ptr_increment && lhs.finalization_offset == rhs.finalization_offset; - const auto equal_element_type_sizes = lhs.data_size == rhs.data_size; - return are_static && equal_ptr_params_shifting && (equal_element_type_sizes || (lhs.ptr_increment == 0 && lhs.finalization_offset == 0)); +bool SetBufferRegGroup::can_be_in_one_reg_group(const UnifiedLoopInfo::LoopPortInfo& lhs_info, + const UnifiedLoopInfo::LoopPortInfo& rhs_info) { + const auto equal_element_type_sizes = lhs_info.desc.data_size == rhs_info.desc.data_size; + OPENVINO_ASSERT(lhs_info.port.expr_port && rhs_info.port.expr_port, "Expression ports are nullptr!"); + const auto equal_invariant_shape_paths = + MarkInvariantShapePath::getInvariantPortShapePath(*lhs_info.port.expr_port) == + MarkInvariantShapePath::getInvariantPortShapePath(*rhs_info.port.expr_port); + const auto equal_is_incremented = lhs_info.port.is_incremented == rhs_info.port.is_incremented; + return equal_invariant_shape_paths && equal_is_incremented && + (equal_element_type_sizes || !lhs_info.port.is_incremented || (lhs_info.desc.ptr_increment == 0 && lhs_info.desc.finalization_offset == 0)); } -bool SetBufferRegGroup::are_adjacent(const std::pair& lhs, - const std::pair& rhs) { +bool SetBufferRegGroup::are_adjacent(const BufferMap::value_type& lhs, const BufferMap::value_type& rhs) { const auto& lhs_ids = lhs.first->get_loop_ids(); const auto& rhs_ids = rhs.first->get_loop_ids(); const auto equal_loop_ids = lhs_ids == rhs_ids; if (equal_loop_ids) { // Buffers are connected to the same Loop and have the same outer Loops - return !can_be_in_one_group(lhs.second, rhs.second); + return !can_be_in_one_reg_group(lhs.second, rhs.second); } else { // Buffers are connected to the same Loop, but one of Buffers - inside this Loop, another - outside - // Buffers are adjacent if outer Buffer has not zero data shift params + // Buffers are adjacent if outer Buffer has non-zero data shift params if (lhs_ids.size() == rhs_ids.size()) // If the count of outer Loops are equal, it means that outer loops are already different return true; const auto& outer_buffer = lhs_ids.size() < rhs_ids.size() ? lhs : rhs; const auto count_outer_loops = std::min(lhs_ids.size(), rhs_ids.size()); const auto are_outer_loops_the_same = lhs_ids.size() != rhs_ids.size() && std::equal(rhs_ids.cbegin(), rhs_ids.cbegin() + count_outer_loops, lhs_ids.cbegin()); - const auto outer_buffer_has_zero_shifts = outer_buffer.second.ptr_increment == 0 && outer_buffer.second.finalization_offset == 0; + const auto outer_buffer_has_zero_shifts = outer_buffer.second.desc.ptr_increment == 0 && outer_buffer.second.desc.finalization_offset == 0; return !(are_outer_loops_the_same && outer_buffer_has_zero_shifts); } } -void SetBufferRegGroup::update_adj_matrix(const std::pair& lhs, - const std::pair& rhs, - const BufferPool& buffers, +void SetBufferRegGroup::update_adj_matrix(const BufferMap::value_type& lhs, const BufferMap::value_type& rhs, const BufferPool& buffers, std::vector& adj) { const auto size = buffers.size(); const auto lhs_idx = get_buffer_idx(lhs.first, buffers); @@ -80,7 +72,8 @@ void SetBufferRegGroup::update_adj_matrix(const std::pair SetBufferRegGroup::create_adjacency_matrix(LinearIR::constExprIt begin, LinearIR::constExprIt end, const BufferPool& pool) { +std::vector SetBufferRegGroup::create_adjacency_matrix(const LoopManagerPtr& loop_manager, LinearIR::constExprIt begin, LinearIR::constExprIt end, + const BufferPool& pool) { // The sync point to check for adjacency is Loop because only in Loop we increment pointers. // So if some Buffers in the one Loop have conflict (cannot be inplace: the different ptr increment and data sizes) // they are called as adjacent @@ -91,10 +84,12 @@ std::vector SetBufferRegGroup::create_adjacency_matrix(LinearIR::constExpr for (auto expr_it = begin; expr_it != end; expr_it++) { const auto &expr = *expr_it; - if (!ov::is_type(expr->get_node())) + const auto& loop_end = ov::as_type_ptr(expr->get_node()); + if (!loop_end) continue; - const auto buffer_loop_neighbours = get_buffer_loop_neighbours(expr); + const auto& loop_info = loop_manager->get_loop_info(loop_end->get_id()); + const auto buffer_loop_neighbours = get_buffer_loop_neighbours(loop_info); const auto buffers_loop_inside = get_buffer_loop_inside(expr_it); for (auto buffer_it = buffer_loop_neighbours.cbegin(); buffer_it != buffer_loop_neighbours.cend(); ++buffer_it) { // If Buffers, that are connected to the same Loop, have not proportionally ptr shift params for this Loop - these Buffers are adjacent @@ -113,47 +108,33 @@ std::vector SetBufferRegGroup::create_adjacency_matrix(LinearIR::constExpr return adj; } -SetBufferRegGroup::BufferMap SetBufferRegGroup::get_buffer_loop_neighbours(const ExpressionPtr& loop_end_expr) { - const auto& loop_end = ov::as_type_ptr(loop_end_expr->get_node()); - const auto input_count = loop_end->get_input_num(); - const auto output_count = loop_end->get_output_num(); - - const auto& ptr_increments = loop_end->get_ptr_increments(); - const auto& finalization_offsets = loop_end->get_finalization_offsets(); - const auto& data_sizes = loop_end->get_element_type_sizes(); - +SetBufferRegGroup::BufferMap SetBufferRegGroup::get_buffer_loop_neighbours(const UnifiedLoopInfoPtr& loop_info) { BufferMap buffer_neighbours; - for (size_t i = 0; i < input_count; ++i) { - const auto& parent_output = loop_end_expr->get_input_port_connector(i)->get_source().get_expr(); + + const auto& loop_inputs = loop_info->get_input_ports_info(); + for (const auto& port_info : loop_inputs) { + const auto& parent_output = port_info.port.expr_port->get_port_connector_ptr()->get_source().get_expr(); if (const auto buffer_expr = ov::as_type_ptr(parent_output)) { if (buffer_neighbours.count(buffer_expr) > 0) { - OPENVINO_ASSERT(buffer_neighbours[buffer_expr].ptr_increment == ptr_increments[i] && - buffer_neighbours[buffer_expr].finalization_offset == finalization_offsets[i], + const auto& port_desc = port_info.desc; + OPENVINO_ASSERT(buffer_neighbours[buffer_expr].desc == port_desc, "Invalid data pointer shifts: If Buffer has several consumers, this consumers must have the same shifts or zero"); continue; } - buffer_neighbours[buffer_expr] = { data_sizes[i], ptr_increments[i], finalization_offsets[i] }; + buffer_neighbours[buffer_expr] = port_info; } } - for (size_t i = input_count; i < input_count + output_count; ++i) { - // The consumers of the corresponding Store ops - const auto consumer_inputs = loop_end_expr->get_input_port_connector(i)->get_consumers(); - size_t buffer_count = 0; - size_t loop_count = 0; + + const auto& loop_outputs = loop_info->get_output_ports_info(); + for (const auto& port_info : loop_outputs) { + const auto& consumer_inputs = port_info.port.expr_port->get_port_connector_ptr()->get_consumers(); for (const auto& consumer_input : consumer_inputs) { const auto& child_expr = consumer_input.get_expr(); - if (const auto buffer_expr = ov::as_type_ptr(child_expr)) { - buffer_neighbours[buffer_expr] = { data_sizes[i], ptr_increments[i], finalization_offsets[i] }; - buffer_count++; - } else if (ov::is_type(child_expr->get_node())) { - loop_count++; - } - } - if (buffer_count > 0) { - OPENVINO_ASSERT((buffer_count == 1) && (buffer_count + loop_count == consumer_inputs.size()), - "Loop output must have not more than 1 Buffer"); + if (const auto buffer_expr = ov::as_type_ptr(child_expr)) + buffer_neighbours[buffer_expr] = port_info; } } + return buffer_neighbours; } @@ -164,9 +145,9 @@ SetBufferRegGroup::BufferMap SetBufferRegGroup::get_buffer_loop_inside(const Lin for (auto it = std::reverse_iterator(loop_end_it); (*it)->get_node() != loop_begin; ++it) { const auto& inner_expr = *it; if (const auto buffer_expr = ov::as_type_ptr(inner_expr)) { - // Set default zero values since it's not used for adjacency definition in case with Buffers in Loop + // Set default value (zeroes) since it's not used for adjacency definition in case with Buffers in Loop if (inner_buffers.count(buffer_expr) == 0) - inner_buffers[buffer_expr] = { 0, 0, 0 }; + inner_buffers[buffer_expr] = UnifiedLoopInfo::LoopPortInfo(); } } return inner_buffers; @@ -219,6 +200,7 @@ auto SetBufferRegGroup::coloring(BufferPool& buffers, std::vector& adj) -> bool SetBufferRegGroup::run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) { OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::SetBufferRegGroup") + // Identify Buffers using Graph coloring algorithm. BufferPool buffer_pool = linear_ir.get_buffers(); // For the better coloring Buffers should be stored in the order of execution numbers @@ -226,7 +208,7 @@ bool SetBufferRegGroup::run(LinearIR& linear_ir, lowered::LinearIR::constExprIt [](const BufferExpressionPtr& lhs, const BufferExpressionPtr& rhs) { return lhs->get_exec_num() < rhs->get_exec_num(); }); // Creation of Adj matrix - auto adj = create_adjacency_matrix(begin, end, buffer_pool); + auto adj = create_adjacency_matrix(linear_ir.get_loop_manager(), begin, end, buffer_pool); // Graph coloring algorithm const auto color_groups = coloring(buffer_pool, adj); diff --git a/src/common/snippets/src/pass/mha_tokenization.cpp b/src/common/snippets/src/pass/mha_tokenization.cpp index c42eb08b82bd4a..beb465ab3a3fbe 100644 --- a/src/common/snippets/src/pass/mha_tokenization.cpp +++ b/src/common/snippets/src/pass/mha_tokenization.cpp @@ -268,16 +268,11 @@ ov::snippets::pass::TokenizeMHASnippets::TokenizeMHASnippets(const SnippetsToken const auto pattern_rank = matmul0->get_output_partial_shape(0).size(); - const auto ops_count_before_softmax = ordered_ops.size(); auto interm_op = matmul0->get_output_target_inputs(0).begin()->get_node()->shared_from_this(); // Add supported operations which are between MatMul0 and Softmax to ordered_ops if (!update_intermediate_supported_ops(interm_op, ordered_ops, hidden_virtual_ports_count, potential_body_params_count)) return false; - // If before Softmax there is Eltwise ops, there will be one more Buffer - if (ops_count_before_softmax != ordered_ops.size() && interm_op->get_output_partial_shape(0).rbegin()->is_dynamic()) - uniqie_buffer_reg_group_count++; - std::shared_ptr reshape0 = nullptr; if (!tokenize_reshape_around_softmax(interm_op, reshape0, ordered_ops)) return false; @@ -295,10 +290,6 @@ ov::snippets::pass::TokenizeMHASnippets::TokenizeMHASnippets(const SnippetsToken if (axis != rank.get_length() - 1 || interm_op->get_output_target_inputs(0).size() != 1) return false; - // Softmax need one buffer at least - if (interm_op->get_output_partial_shape(0).rbegin()->is_dynamic()) - uniqie_buffer_reg_group_count++; - ordered_ops.push_back(interm_op); interm_op = interm_op->get_output_target_inputs(0).begin()->get_node()->shared_from_this(); @@ -333,7 +324,7 @@ ov::snippets::pass::TokenizeMHASnippets::TokenizeMHASnippets(const SnippetsToken // The Loop will have one Buffer with the same shape both on input and output. // Need to check for precision to get if we need one more register for Buffer const auto matmul0_prc = op::Brgemm::get_output_type(matmul0->get_input_element_type(0), matmul0->get_input_element_type(1)); - if (matmul1->get_input_element_type(0).size() != matmul0_prc.size() || matmul1->get_input_partial_shape(0).is_dynamic()) { + if (matmul1->get_input_element_type(0).size() != matmul0_prc.size()) { uniqie_buffer_reg_group_count++; } diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp index 4f38eddc2bde0f..2a85714a792655 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp @@ -16,13 +16,6 @@ namespace tpp { namespace pass { namespace { using ExpressionPort = snippets::lowered::ExpressionPort; -bool is_planar_layout(const std::vector& layout) { - for (size_t i = 0; i < layout.size(); i++) { - if (layout[i] != i) - return false; - } - return true; -} // Note: Buffer is directly connected to the port if it remains in the same loops with the port's expression // Directly connected Buffers store data densely, so strides are defined by subternsor dims // Indirectly connected Buffers (with loops between the expr and Buffer) store data according @@ -81,12 +74,12 @@ size_t get_leading_dim(ExpressionPort port, const snippets::lowered::LoopManager subtensor[idx] = shape[shape.size() - i]; } } - OPENVINO_ASSERT(!full_dim_substituted || is_planar_layout(layout), + OPENVINO_ASSERT(!full_dim_substituted || ov::snippets::utils::is_planar_layout(layout), "Only planar layouts are supported for FULL_DIM substitution"); if (has_directly_connected_buffer(port, loop_mngr)) { shape = port_desc->get_subtensor(); - OPENVINO_ASSERT(is_planar_layout(layout), "Only planar layouts are supported for Buffers"); + OPENVINO_ASSERT(ov::snippets::utils::is_planar_layout(layout), "Only planar layouts are supported for Buffers"); const auto rank_diff = static_cast(layout.size()) - static_cast(shape.size()); if (rank_diff > 0) layout.erase(layout.end() - rank_diff, layout.end()); diff --git a/src/plugins/intel_cpu/tests/unit/snippets_transformations/x64/lowered/buffer_allocation.cpp b/src/plugins/intel_cpu/tests/unit/snippets_transformations/x64/lowered/buffer_allocation.cpp index 6dad1d4772f531..e31a8bebb95758 100644 --- a/src/plugins/intel_cpu/tests/unit/snippets_transformations/x64/lowered/buffer_allocation.cpp +++ b/src/plugins/intel_cpu/tests/unit/snippets_transformations/x64/lowered/buffer_allocation.cpp @@ -36,21 +36,24 @@ using BRGEMM_TYPE = intel_cpu::brgemm_utils::BRGEMM_TYPE; */ typedef std::tuple< - bool, // Optimized pipeline - bool, // With SplitLoops opt - size_t, // Expected Buffer size in bytes - size_t, // Expected unique Buffer reg group count - size_t // Expected unique Buffer cluster count + std::vector, // Input shapes + bool, // Optimized pipeline + bool, // With SplitLoops opt + size_t, // Expected Buffer size in bytes + size_t, // Expected unique Buffer reg group count + size_t // Expected unique Buffer cluster count > BufferAllocationCPUParams; class BufferAllocationCPUTest : public testing::TestWithParam { public: using VectorDims = ov::snippets::VectorDims; static std::string getTestCaseName(testing::TestParamInfo obj) { + std::vector shapes; bool is_optimized, with_split_loops; size_t expected_size, expected_reg_group_count, expected_cluster_count; - std::tie(is_optimized, with_split_loops, expected_size, expected_reg_group_count, expected_cluster_count) = obj.param; + std::tie(shapes, is_optimized, with_split_loops, expected_size, expected_reg_group_count, expected_cluster_count) = obj.param; std::ostringstream result; + result << "Shapes=" << ov::test::utils::partialShape2str(shapes) << "_"; result << "Opt=" << ov::test::utils::bool2str(is_optimized) << "_"; result << "Split=" << ov::test::utils::bool2str(with_split_loops) << "_"; result << "ExpBufferSize=" << expected_size << "_"; @@ -61,9 +64,11 @@ class BufferAllocationCPUTest : public testing::TestWithParamGetParam(); + std::vector shapes; + std::tie(shapes, m_is_buffer_optimized, m_with_split_loops, m_expected_size, + m_expected_reg_group_count, m_expected_cluster_count) = this->GetParam(); - const auto body = GetModel(); + const auto body = GetModel(shapes); m_linear_ir = ov::snippets::lowered::LinearIR(body, std::make_shared()); m_linear_ir.set_loop_depth(m_loop_depth); // When Subgraph::control_flow_transformations become public method, @@ -105,7 +110,7 @@ class BufferAllocationCPUTest : public testing::TestWithParam GetModel() const = 0; + virtual std::shared_ptr GetModel(const std::vector& shapes) const = 0; void MarkOp(const std::shared_ptr& node, const std::vector& subtensor) const { for (const auto& input : node->inputs()) @@ -131,20 +136,16 @@ class BufferAllocationCPUTest : public testing::TestWithParam GetModel() const override { + std::shared_ptr GetModel(const std::vector& shapes) const override { const auto subtensor_scalar = std::vector{1}; const auto subtensor_power = std::vector{1, ov::snippets::utils::get_full_dim_value()}; const auto subtensor_full = std::vector(2, ov::snippets::utils::get_full_dim_value()); // Dims are selected in order to have blocking loops by each dim - const size_t m = 1024; - const size_t k = 1024; - const size_t n1 = 128; - const size_t n2 = 256; - - const auto parameter0 = std::make_shared(ov::element::f32, ov::PartialShape({1, 12, m, k})); - const auto parameter1 = std::make_shared(ov::element::f32, ov::PartialShape({1, n1, 12, k})); - const auto parameter2 = std::make_shared(ov::element::f32, ov::PartialShape({1, 12, n1, n2})); + OPENVINO_ASSERT(shapes.size() == 3, "Incorrect count of input shapes"); + const auto parameter0 = std::make_shared(ov::element::f32, shapes[0]); + const auto parameter1 = std::make_shared(ov::element::f32, shapes[1]); + const auto parameter2 = std::make_shared(ov::element::f32, shapes[2]); const auto order = std::vector{0, 2, 3, 1}; const auto load_reshape = std::make_shared(parameter1, 1, 0, order); @@ -186,20 +187,15 @@ class MHAFP32BufferAllocationTest : public BufferAllocationCPUTest { class MHABF16AMXBufferAllocationTest : public BufferAllocationCPUTest { protected: - std::shared_ptr GetModel() const override { + std::shared_ptr GetModel(const std::vector& shapes) const override { const auto subtensor_scalar = std::vector{1}; const auto subtensor_power = std::vector{1, ov::snippets::utils::get_full_dim_value()}; const auto subtensor_full = std::vector(2, ov::snippets::utils::get_full_dim_value()); - // Dims are selected in order to have blocking loops by each dim - const size_t m = 1024; - const size_t k = 1024; - const size_t n1 = 128; - const size_t n2 = 256; - - const auto parameter0 = std::make_shared(ov::element::bf16, ov::PartialShape({1, 12, m, k})); - const auto parameter1 = std::make_shared(ov::element::bf16, ov::PartialShape({1, n1, 12, k})); - const auto parameter2 = std::make_shared(ov::element::bf16, ov::PartialShape({1, 12, n1, n2})); + OPENVINO_ASSERT(shapes.size() == 3, "Incorrect count of input shapes"); + const auto parameter0 = std::make_shared(ov::element::bf16, shapes[0]); + const auto parameter1 = std::make_shared(ov::element::bf16, shapes[1]); + const auto parameter2 = std::make_shared(ov::element::bf16, shapes[2]); const auto order = std::vector{0, 2, 3, 1}; const auto load_reshape = std::make_shared(parameter1, 1, 0, order); @@ -264,8 +260,17 @@ TEST_P(MHABF16AMXBufferAllocationTest, BufferAllocationCPU) { namespace BufferAllocationCPUTest_Instances { +std::vector static_shapes = { + { 1, 12, 1024, 1024 }, {1, 128, 12, 1024 }, {1, 12, 128, 256 }, +}; + +std::vector dynamic_shapes = { + { -1, -1, -1, -1 }, { -1, -1, -1, -1 }, { -1, -1, -1, -1 }, +}; + INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHANotOptimizedWSplit, MHAFP32BufferAllocationTest, ::testing::Combine( + ::testing::Values(static_shapes), ::testing::Values(false), ::testing::Values(true), ::testing::Values(591360), // Each Buffer has own allocated memory @@ -275,6 +280,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHANotOptimizedWSplit, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHAOptimizedWSplit, MHAFP32BufferAllocationTest, ::testing::Combine( + ::testing::Values(static_shapes), ::testing::Values(true), ::testing::Values(true), ::testing::Values(573440), // (Buffer before brgemm) + (between brgemms) + (after brgemm) @@ -284,6 +290,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHAOptimizedWSplit, MHA INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHANotOptimizedWOSplit, MHAFP32BufferAllocationTest, ::testing::Combine( + ::testing::Values(static_shapes), ::testing::Values(false), ::testing::Values(false), ::testing::Values(2622976), // Each Buffer has own allocated memory @@ -293,6 +300,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHANotOptimizedWOSplit, INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHAOptimizedWOSplit, MHAFP32BufferAllocationTest, ::testing::Combine( + ::testing::Values(static_shapes), ::testing::Values(true), ::testing::Values(false), ::testing::Values(1572864), // (between brgemms) + (Buffer before brgemm0 and after brgemm1) @@ -302,6 +310,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHAOptimizedWOSplit, MH INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHABF16AMXNotOptimizedWSplit, MHABF16AMXBufferAllocationTest, ::testing::Combine( + ::testing::Values(static_shapes), ::testing::Values(false), ::testing::Values(true), ::testing::Values(713984), @@ -311,15 +320,17 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHABF16AMXNotOptimizedW INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHABF16AMXOptimizedWSplit, MHABF16AMXBufferAllocationTest, ::testing::Combine( + ::testing::Values(static_shapes), ::testing::Values(true), ::testing::Values(true), ::testing::Values(524288), ::testing::Values(3), - ::testing::Values(8)), + ::testing::Values(7)), BufferAllocationCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHABF16AMXNotOptimizedWOSplit, MHABF16AMXBufferAllocationTest, ::testing::Combine( + ::testing::Values(static_shapes), ::testing::Values(false), ::testing::Values(false), ::testing::Values(2491648), @@ -329,11 +340,32 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHABF16AMXNotOptimizedW INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHABF16AMXOptimizedWOSplit, MHABF16AMXBufferAllocationTest, ::testing::Combine( + ::testing::Values(static_shapes), ::testing::Values(true), ::testing::Values(false), - ::testing::Values(1409024), + ::testing::Values(1671168), + ::testing::Values(3), + ::testing::Values(7)), + BufferAllocationCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHAOptimizedWSplit_Dynamic, MHAFP32BufferAllocationTest, + ::testing::Combine( + ::testing::Values(dynamic_shapes), + ::testing::Values(true), + ::testing::Values(true), + ::testing::Values(0), // no static clusters + ::testing::Values(2), // (Buffer before brgemm0 and after brgemm1) + (between brgemms) + ::testing::Values(3)), // (Buffer before brgemm0) + (between brgemms) + (after brgemm1) + BufferAllocationCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHABF16AMXOptimizedWSplit_Dynamic, MHABF16AMXBufferAllocationTest, + ::testing::Combine( + ::testing::Values(dynamic_shapes), + ::testing::Values(true), + ::testing::Values(true), + ::testing::Values(32768), // only WSP buffers ::testing::Values(3), - ::testing::Values(8)), + ::testing::Values(7)), BufferAllocationCPUTest::getTestCaseName); } // namespace BufferAllocationCPUTest_Instances From 696bddee349dd584996f73e81017cf9c2515f2c9 Mon Sep 17 00:00:00 2001 From: Bo Liu Date: Thu, 7 Nov 2024 19:11:18 +0800 Subject: [PATCH 016/182] [CPU] Roll back deconv 3rd rank layout order for non AMX (#27446) ### Details: - *temporal WA limiting Deconv acb Layout to avx512_core_amx_fp16 platform before oneDNN fix the performance regression issue of related primitive creation* ### Tickets: - *CVS-156640* --- src/plugins/intel_cpu/src/nodes/deconv.cpp | 44 ++++++++++++++-------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/src/plugins/intel_cpu/src/nodes/deconv.cpp b/src/plugins/intel_cpu/src/nodes/deconv.cpp index cb340afc029304..2ee858e730c900 100644 --- a/src/plugins/intel_cpu/src/nodes/deconv.cpp +++ b/src/plugins/intel_cpu/src/nodes/deconv.cpp @@ -418,24 +418,38 @@ std::pair Deconvolution::makeDummyInOutShape() { return {inShape.getStaticDims(), outShape.getStaticDims()}; } -std::vector Deconvolution::getAvailableFormatsForDims(const Shape &dims) const { - if (dims.getRank() == 0) +std::vector Deconvolution::getAvailableFormatsForDims(const Shape& dims) const { + if (dims.getRank() == 0) { return {memory::format_tag::x}; - else if (dims.getRank() == 1) + } else if (dims.getRank() == 1) { return {memory::format_tag::x}; - else if (dims.getRank() == 2) + } else if (dims.getRank() == 2) { return {memory::format_tag::nc}; - else if (dims.getRank() == 3) - return {memory::format_tag::ncw, - memory::format_tag::nCw8c, - memory::format_tag::nCw16c, - memory::format_tag::nwc}; - else if (dims.getRank() == 4) - return {memory::format_tag::nchw, memory::format_tag::nChw8c, - memory::format_tag::nChw16c, memory::format_tag::nhwc }; - else if (dims.getRank() == 5) - return {memory::format_tag::ncdhw, memory::format_tag::nCdhw8c, - memory::format_tag::nCdhw16c, dnnl::memory::format_tag::ndhwc }; + } else if (dims.getRank() == 3) { + // Ticket 156640 + if (impl::cpu::x64::mayiuse(impl::cpu::x64::avx512_core_amx_fp16)) { + return {memory::format_tag::ncw, + memory::format_tag::nCw8c, + memory::format_tag::nCw16c, + memory::format_tag::nwc}; + } else { + return {memory::format_tag::tnc, + memory::format_tag::ntc, + memory::format_tag::ncw, + memory::format_tag::nCw8c, + memory::format_tag::nCw16c}; + } + } else if (dims.getRank() == 4) { + return {memory::format_tag::nchw, + memory::format_tag::nChw8c, + memory::format_tag::nChw16c, + memory::format_tag::nhwc}; + } else if (dims.getRank() == 5) { + return {memory::format_tag::ncdhw, + memory::format_tag::nCdhw8c, + memory::format_tag::nCdhw16c, + dnnl::memory::format_tag::ndhwc}; + } return {memory::format_tag::any}; } From a9ddd47ac02dea96b06bf364e2a5a22078a681ba Mon Sep 17 00:00:00 2001 From: Denis Orlov Date: Thu, 7 Nov 2024 13:45:19 +0000 Subject: [PATCH 017/182] [GHA] Increase timeout for PyTorch Models tests in precommit (#27449) ### Tickets: - 155619 --- .github/workflows/job_pytorch_models_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/job_pytorch_models_tests.yml b/.github/workflows/job_pytorch_models_tests.yml index 2959728e39dee7..d52b819981d821 100644 --- a/.github/workflows/job_pytorch_models_tests.yml +++ b/.github/workflows/job_pytorch_models_tests.yml @@ -22,7 +22,7 @@ permissions: read-all jobs: PyTorch_Models_Tests: name: PyTorch Models tests - timeout-minutes: ${{ inputs.model_scope == 'precommit' && 40 || 400 }} + timeout-minutes: ${{ inputs.model_scope == 'precommit' && 45 || 400 }} runs-on: ${{ inputs.runner }} container: ${{ fromJSON(inputs.container) }} defaults: From 98f6ea5fdea0416f17e319e6291029c2d59af14a Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Thu, 7 Nov 2024 14:58:01 +0100 Subject: [PATCH 018/182] [GHA][Coverity] Disable sccache (#27450) ### Details: - sccache doesn't properly work with Coverity ### Tickets: - *ticket-id* --- .github/workflows/coverity.yml | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml index e26bd66e097e33..5a08ec084dadac 100644 --- a/.github/workflows/coverity.yml +++ b/.github/workflows/coverity.yml @@ -85,7 +85,6 @@ jobs: image: ${{ fromJSON(needs.docker.outputs.images).ov_build.ubuntu_20_04_x64 }} volumes: - /mount:/mount - options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING env: DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input CMAKE_BUILD_TYPE: 'Release' @@ -95,14 +94,6 @@ jobs: OPENVINO_CONTRIB_REPO: /__w/openvino/openvino/openvino_contrib BUILD_DIR: /__w/openvino/openvino/openvino_build COVERITY_TOOL_DIR: /__w/openvino/openvino/coverity_tool - CMAKE_CXX_COMPILER_LAUNCHER: sccache - CMAKE_C_COMPILER_LAUNCHER: sccache - SCCACHE_IGNORE_SERVER_IO_ERROR: 1 - SCCACHE_SERVER_PORT: 35555 - SCCACHE_ERROR_LOG: /__w/openvino/sccache_log.txt - SCCACHE_LOG: warn - SCCACHE_AZURE_KEY_PREFIX: coverity_lin - SCCACHE_CACHE_SIZE: 50G steps: - name: Clone OpenVINO @@ -141,8 +132,6 @@ jobs: # # Build # - - name: Clean sccache stats - run: ${SCCACHE_PATH} --zero-stats - name: CMake configure - OpenVINO run: | @@ -165,9 +154,6 @@ jobs: - name: Cmake build - OpenVINO with Coverity run: ${COVERITY_TOOL_DIR}/cov-analysis*/bin/cov-build --dir ${BUILD_DIR}/cov-int cmake --build ${BUILD_DIR} --parallel $(nproc) --config ${{ env.CMAKE_BUILD_TYPE }} - - - name: Show sccache stats - run: ${SCCACHE_PATH} --show-stats - name: Pack Artefacts run: tar -cvf - cov-int | pigz > openvino.tgz From 5c834609fcf9d7d5bf3ce67208dccd5588e03df6 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Thu, 7 Nov 2024 16:01:39 +0100 Subject: [PATCH 019/182] [PT FE] Inherit signature from forward while patching (#27413) ### Details: - *Inherit signature from forward while patching* ### Tickets: - *ticket-id* --------- Signed-off-by: Maxim Vafin --- .../python/src/openvino/frontend/pytorch/patch_model.py | 3 +++ tests/layer_tests/py_frontend_tests/test_torch_frontend.py | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/src/bindings/python/src/openvino/frontend/pytorch/patch_model.py b/src/bindings/python/src/openvino/frontend/pytorch/patch_model.py index fb8f70e2a566bc..0d1c4803e3115f 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/patch_model.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/patch_model.py @@ -4,6 +4,7 @@ # flake8: noqa # mypy: ignore-errors +import functools import logging import torch from openvino.frontend.pytorch import ModuleExtension @@ -70,6 +71,8 @@ def new_forward(*args, **kwargs): Trampoline.stashed_kwargs = kwargs return extension.convert(m, Trampoline.apply, *args, **kwargs) + # make signature of new_forward same as of forward + new_forward = functools.wraps(m.forward)(new_forward) setattr(m, orig_forward_name, m.forward) m.forward = new_forward diff --git a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py index faee72bb5d938a..b659c1735d8453 100644 --- a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py +++ b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py @@ -687,6 +687,7 @@ def test_patched_16bit_model_converts(): from openvino.frontend.pytorch import patch_model from openvino import convert_model, compile_model import copy + import inspect from transformers.pytorch_utils import Conv1D class ModelWithLinear(torch.nn.Module): @@ -716,6 +717,9 @@ def forward(self, x1, x2): model_fp16 = copy.deepcopy(model_ref).half() patch_model.__make_16bit_traceable(model_fp16) + # verify torch.nn.Linear signature after patching + signature = inspect.signature(model_ref.branch1[0].forward).parameters + assert ["input"] == list(signature) # the approach with patching only works for node with no grad with torch.no_grad(): converted_model = convert_model(model_fp16, example_input=example) From 8edb1501d3472944f131df2ecb8d60246c7dac6a Mon Sep 17 00:00:00 2001 From: Ivan Novoselov Date: Thu, 7 Nov 2024 15:29:49 +0000 Subject: [PATCH 020/182] Sns enable lp kn blocking (#26957) ### Details: - *Perform weights repacking outside of the blocking cycles* - *Functionally enable blocking for I8,U8 and BF16* - *The blocking will be temporary disabled until blocking heuristic is updated in 156014* ### Tickets: - *154729* --- src/common/snippets/src/lowered/loop_port.cpp | 6 +- .../snippets/cpu_runtime_configurator.cpp | 65 ++++++++++- .../snippets/cpu_runtime_configurator.hpp | 13 +++ .../snippets/x64/jit_kernel_emitter.cpp | 10 +- .../snippets/x64/kernel_executors/brgemm.cpp | 12 +- .../x64/kernel_executors/brgemm_copy_b.cpp | 2 +- src/plugins/intel_cpu/src/nodes/subgraph.cpp | 9 +- .../snippets/x64/op/brgemm_utils.cpp | 5 - .../snippets/x64/op/brgemm_utils.hpp | 12 +- .../adjust_brgemm_copy_b_loop_ports.cpp | 107 ++++++++++++++++++ .../adjust_brgemm_copy_b_loop_ports.hpp | 37 ++++++ .../x64/pass/lowered/brgemm_cpu_blocking.cpp | 83 ++++++-------- .../x64/pass/lowered/brgemm_cpu_blocking.hpp | 4 - .../x64/lowered/brgemm_blocking.cpp | 89 +++++++-------- 14 files changed, 329 insertions(+), 125 deletions(-) create mode 100644 src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.cpp create mode 100644 src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.hpp diff --git a/src/common/snippets/src/lowered/loop_port.cpp b/src/common/snippets/src/lowered/loop_port.cpp index 990b2801beccb8..52f59bb1fa4d35 100644 --- a/src/common/snippets/src/lowered/loop_port.cpp +++ b/src/common/snippets/src/lowered/loop_port.cpp @@ -30,7 +30,7 @@ std::shared_ptr LoopPort::clone_with_new_expr(const ExpressionPtr& new bool operator==(const LoopPort& lhs, const LoopPort& rhs) { if (&lhs == &rhs) return true; - return lhs.expr_port == rhs.expr_port && lhs.is_incremented == rhs.is_incremented && lhs.dim_idx == rhs.dim_idx; + return *lhs.expr_port == *rhs.expr_port && lhs.is_incremented == rhs.is_incremented && lhs.dim_idx == rhs.dim_idx; } bool operator!=(const LoopPort& lhs, const LoopPort& rhs) { @@ -38,8 +38,8 @@ bool operator!=(const LoopPort& lhs, const LoopPort& rhs) { } bool operator<(const LoopPort& lhs, const LoopPort& rhs) { - return (lhs.expr_port < rhs.expr_port) || - (lhs.expr_port == rhs.expr_port && + return (*lhs.expr_port < *rhs.expr_port) || + (*lhs.expr_port == *rhs.expr_port && (lhs.is_incremented < rhs.is_incremented || (lhs.is_incremented == rhs.is_incremented && lhs.dim_idx < rhs.dim_idx))); } diff --git a/src/plugins/intel_cpu/src/emitters/snippets/cpu_runtime_configurator.cpp b/src/plugins/intel_cpu/src/emitters/snippets/cpu_runtime_configurator.cpp index 56c2c75dae9bc2..1c3d283ab673b1 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/cpu_runtime_configurator.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/cpu_runtime_configurator.cpp @@ -7,6 +7,9 @@ #include "snippets/lowered/loop_manager.hpp" #include "snippets/utils/utils.hpp" +#ifndef OPENVINO_ARCH_ARM64 +#include "transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.hpp" +#endif namespace ov { namespace intel_cpu { @@ -36,10 +39,36 @@ std::string CPURuntimeConfig::to_string() const { CPURuntimeConfigurator::CPURuntimeConfigurator() : ov::snippets::RuntimeConfigurator(std::make_shared()) { } +void CPURuntimeConfigurator::initialization(const ov::snippets::lowered::LinearIRCPtr& linear_ir) { + RuntimeConfigurator::initialization(linear_ir); + if (linear_ir->is_dynamic()) { + loopPortsAdjuster = BrgemmCopyBLoopPortsAdjuster(linear_ir); + } +} + void CPURuntimeConfigurator::update(const ov::snippets::lowered::LinearIRCPtr& linear_ir) { - RuntimeConfigurator::update(linear_ir); + m_config->master_shape = linear_ir->get_master_shape(); + if (linear_ir->is_dynamic()) { + update_loop_info(linear_ir); + } + + if (!m_optimizer.optimize()) { + // If the optimization was not applied, offsets are updated using shapes from descriptors + auto shapes = extract_shapes(); + update_data_offsets(shapes, extract_layouts()); + m_latest_shapes = std::move(shapes); + } if (linear_ir->is_dynamic()) + loopPortsAdjuster.optimize(); + + // Update KernelExecutor Table should be before `update_buffer_scratchpad_size` + // because `ComputeAllocationSize` depends on subtensors which are updated in the table + get_kernel_executor_table()->update_state(linear_ir); + update_buffer_scratchpad_size(linear_ir); + + if (linear_ir->is_dynamic()) { update_loop_args(linear_ir); + } } void CPURuntimeConfigurator::update_tensor_rank(const ov::snippets::VectorDims& master_shape) { @@ -72,6 +101,40 @@ void CPURuntimeConfigurator::update_loop_args(const ov::snippets::lowered::Linea } } } +#ifdef OPENVINO_ARCH_ARM64 +CPURuntimeConfigurator::BrgemmCopyBLoopPortsAdjuster::BrgemmCopyBLoopPortsAdjuster(const ov::snippets::lowered::LinearIRCPtr& linear_ir) { +} + +void CPURuntimeConfigurator::BrgemmCopyBLoopPortsAdjuster::optimize() { +} +#else +CPURuntimeConfigurator::BrgemmCopyBLoopPortsAdjuster::BrgemmCopyBLoopPortsAdjuster(const ov::snippets::lowered::LinearIRCPtr& linear_ir) { + const auto& pass = std::make_shared(); + pass->run(*linear_ir); + const auto& affected_uni_loops = pass->get_affected_loops(); + const auto& loop_map = linear_ir->get_loop_manager()->get_map(); + for (const auto& p : loop_map) { + if (const auto& exp_loop = ov::as_type_ptr(p.second)) { + const auto& uni_loop = exp_loop->get_unified_loop_info(); + if (affected_uni_loops.count(uni_loop)) + m_affected_uni2exp_map[uni_loop].push_back(exp_loop); + } + } +} + +void CPURuntimeConfigurator::BrgemmCopyBLoopPortsAdjuster::optimize() { + for (const auto& p : m_affected_uni2exp_map) { + const auto& uni_loop = p.first; + const auto& exp_loops = p.second; + snippets::RuntimeConfigurator::LoopInfoRuntimeParamsMap initialized_info; + if (intel_cpu::pass::AdjustBrgemmCopyBLoopPorts::update_loop_info(uni_loop)) { + initialized_info[uni_loop] = get_loop_runtime_params(uni_loop); + for (const auto& exp_loop : exp_loops) + update_expanded_loop_info(exp_loop, initialized_info); + } + } +} +#endif } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/emitters/snippets/cpu_runtime_configurator.hpp b/src/plugins/intel_cpu/src/emitters/snippets/cpu_runtime_configurator.hpp index 00bc676678d189..d8ef9772e813ff 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/cpu_runtime_configurator.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/cpu_runtime_configurator.hpp @@ -44,6 +44,7 @@ class CPURuntimeConfigurator : public ov::snippets::RuntimeConfigurator { * @param linear_ir LinearIR */ void init_tensor_rank(const ov::snippets::lowered::LinearIRCPtr& linear_ir) const override; + void initialization(const ov::snippets::lowered::LinearIRCPtr& linear_ir) override; /** * @brief Calculate Loop parameters of Loop emitters and update these values in CPURuntimeConfig * @param linear_ir LinearIR @@ -51,6 +52,18 @@ class CPURuntimeConfigurator : public ov::snippets::RuntimeConfigurator { void update_loop_args(const ov::snippets::lowered::LinearIRCPtr& linear_ir) const; static const size_t rank6D; + + class BrgemmCopyBLoopPortsAdjuster { + public: + BrgemmCopyBLoopPortsAdjuster() = default; + BrgemmCopyBLoopPortsAdjuster(const ov::snippets::lowered::LinearIRCPtr& linear_ir); + + void optimize(); + + private: + std::unordered_map> m_affected_uni2exp_map; + } loopPortsAdjuster; }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp index 4a27c8e17150e8..476123355abe70 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp @@ -180,16 +180,16 @@ void jit_kernel_static_emitter::init_data_pointers(const std::vectormov(data_ptr_regs[i], h->ptr[reg_runtime_params + GET_OFF(dst_ptrs) + (i - num_inputs) * sizeof(void*)]); init_ptr_with_offset(data_ptr_regs[i], data_offsets[i], reg_tmp); } - // a rare case when num_params is maximal, so we have no spare gprs - // * Static case: we can use reg_runtime_params as the last reg_tmp for the last iteration (and corrupt it), since - // it won't be used anymore - // * Dynamic case: we will need reg_runtime_params to pass runtime args to LoopScheduler, so we have to - // push a reg on the stack, and restore it value afterward + // A rare case when num_params is maximal, so we have no spare gprs + // Note that we need to push-pop runtime params because some kernels might need them even in the static case + // (e.g. brgemm emitter for amx tile configuration access) if (last_iter_explicitly) { + h->push(reg_runtime_params); h->mov(data_ptr_regs[i], h->ptr[reg_runtime_params + GET_OFF(dst_ptrs) + (i - num_inputs) * sizeof(void*)]); reg_tmp = reg_runtime_params; // can corrupt reg_runtime_params, since we won't use it anymore init_ptr_with_offset(data_ptr_regs[i], data_offsets[i], reg_tmp); + h->pop(reg_runtime_params); } } diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm.cpp index 6f1f4ab93aeda9..fad1be5a5d1289 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm.cpp @@ -246,7 +246,7 @@ void BrgemmKernelExecutor::update_config(const ov::snippets::lowered::Expression // Note: We check `is_incremented` attribute only for not incremented ports because // this `is_incremented = true` can be changed by `CleanRepeatedDataPointerShifts` optimization auto check_port = [&](const ov::snippets::lowered::LoopPort& p) { return p.dim_idx == 0; }; - OPENVINO_ASSERT(in_ports.size() == 2 && !in_ports.front().is_incremented && std::all_of(in_ports.cbegin(), in_ports.cend(), check_port) && + OPENVINO_ASSERT(in_ports.size() >= 2 && !in_ports.front().is_incremented && std::all_of(in_ports.cbegin(), in_ports.cend(), check_port) && out_ports.size() == 1 && check_port(out_ports.back()), "Incorrect Loop by Brgemm dimension N"); N = current_expanded_loop_info->get_increment(); @@ -269,7 +269,7 @@ void BrgemmKernelExecutor::update_config(const ov::snippets::lowered::Expression // Quick validation check: Should we check that port is really Brgemm port? // Note: We check `is_incremented` attribute only for not incremented ports because // this `is_incremented = true` can be changed by `CleanRepeatedDataPointerShifts` optimization - OPENVINO_ASSERT(in_ports.size() == 2 && in_ports.front().dim_idx == 0 && in_ports.back().dim_idx == 1 && + OPENVINO_ASSERT(in_ports.size() >= 2 && in_ports.front().dim_idx == 0 && in_ports.back().dim_idx == 1 && out_ports.size() == 1 && !out_ports.front().is_incremented, "Incorrect Loop by Brgemm dimension K"); K = current_expanded_loop_info->get_increment(); @@ -286,7 +286,7 @@ void BrgemmKernelExecutor::update_config(const ov::snippets::lowered::Expression OV_CPU_JIT_EMITTER_ASSERT(brgemm_node, "Got invalid node type in update_config"); // In case of data repacking LDB is chosen in accordance with repacking buffer size if (with_repacking(brgemm_node->get_type())) - LDB = brgemm_utils::repacking::compute_out_leading_dim(N, brgemm_node->get_input_element_type(1)); + LDB = brgemm_utils::repacking::compute_LDB(LDB, brgemm_node->get_input_element_type(1)); config.update(DIM_CAST(M), DIM_CAST(N), DIM_CAST(K), LDA, LDB, LDC, beta); } @@ -303,6 +303,8 @@ void BrgemmKernelExecutor::execute(const BrgemmKernelExecutor* executor, call_ar } cpu::x64::brgemm_kernel_params_t brgemm_p; + // Note: compensations should be applied only once, so we do it only on the first iteration, when beta == 0 + size_t is_with_comp = config.get_beta() == 0 && config.is_with_comp(); brgemm_p.batch = nullptr; // default value brgemm_p.ptr_A = args->A; @@ -311,8 +313,8 @@ void BrgemmKernelExecutor::execute(const BrgemmKernelExecutor* executor, call_ar brgemm_p.ptr_D = args->C; brgemm_p.ptr_buf = args->scratch; brgemm_p.ptr_bias = nullptr; - brgemm_p.do_post_ops = static_cast(config.is_with_comp()); - brgemm_p.do_apply_comp = static_cast(config.is_with_comp()); + brgemm_p.do_post_ops = is_with_comp; + brgemm_p.do_apply_comp = is_with_comp; brgemm_p.skip_accm = 0; brgemm_p.BS = 1; // default value OV_CPU_JIT_EMITTER_ASSERT(kernel->compiled_kernel, "has nullptr kernel"); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp index 17f8923ae9867b..cc79458c7c4c64 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp @@ -305,7 +305,7 @@ void BrgemmCopyBKernelExecutor::update_config(const ov::snippets::lowered::Expre init(N_dim, N_blk, 0); const auto& brg_weight_etype = expr->get_node()->get_input_element_type(0); - const auto LDB = brgemm_utils::repacking::compute_out_leading_dim(N_dim, brg_weight_etype); + const auto LDB = brgemm_utils::repacking::compute_LDB(N_dim, brg_weight_etype); const auto copy_B_wei_stride = ov::snippets::utils::get_dim_stride(expr->get_input_port(0), config.is_transposed_B() ? 0 : 1) * brg_weight_etype.size(); config.update(N_dim, N_blk, K_dim, K_blk, copy_B_wei_stride, LDB); diff --git a/src/plugins/intel_cpu/src/nodes/subgraph.cpp b/src/plugins/intel_cpu/src/nodes/subgraph.cpp index 05a1e5a958771b..ee24dd66493204 100644 --- a/src/plugins/intel_cpu/src/nodes/subgraph.cpp +++ b/src/plugins/intel_cpu/src/nodes/subgraph.cpp @@ -17,11 +17,10 @@ #include "snippets/pass/positioned_pass.hpp" #include "snippets/pass/canonicalization.hpp" #include "snippets/pass/analyze_broadcastable_inputs.hpp" -#include "snippets/lowered/linear_ir.hpp" -#include "snippets/lowered/pass/optimize_domain.hpp" #include "snippets/lowered/pass/insert_loops.hpp" #include "snippets/lowered/pass/mark_loops.hpp" #include "snippets/lowered/pass/insert_buffers.hpp" +#include "snippets/lowered/pass/init_loops.hpp" #include "transformations/defs.hpp" #include "transformations/cpu_opset/common/pass/convert_to_swish_cpu.hpp" #include "transformations/snippets/common/pass/mul_add_to_fma.hpp" @@ -38,6 +37,7 @@ #include "transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp" #include "transformations/snippets/x64/pass/enforce_precision.hpp" #include "transformations/snippets/x64/shape_inference.hpp" +#include "transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.hpp" #endif #include "utils/cpu_utils.hpp" @@ -54,6 +54,7 @@ std::mutex err_print_lock; #endif #ifdef SNIPPETS_LIBXSMM_TPP +#include "snippets/lowered/pass/optimize_domain.hpp" #include "transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp" #include "transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp" #include "transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp" @@ -683,6 +684,10 @@ Subgraph::ControlFlowPasses Subgraph::getControlFlowPasses() const { SNIPPETS_REGISTER_PASS_RELATIVE(Place::After, ov::snippets::lowered::pass::MarkLoops, ov::intel_cpu::pass::BrgemmCPUBlocking); + + SNIPPETS_REGISTER_PASS_RELATIVE(Place::After, ov::snippets::lowered::pass::InitLoops, + ov::intel_cpu::pass::AdjustBrgemmCopyBLoopPorts); + SNIPPETS_REGISTER_PASS_RELATIVE(Place::After, ov::snippets::lowered::pass::InsertLoops, ov::intel_cpu::pass::FuseLoadStoreConvert); SNIPPETS_REGISTER_PASS_RELATIVE(Place::Before, ov::snippets::lowered::pass::InsertBuffers, diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.cpp index 844ec338b8a83b..adc215ef1d9900 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.cpp @@ -6,7 +6,6 @@ #include "dnnl_extension_utils.h" #include "emitters/utils.hpp" -#include "snippets/utils/utils.hpp" #include "transformations/snippets/x64/op/brgemm_copy_b.hpp" #include "utils/general_utils.h" @@ -76,10 +75,6 @@ size_t get_elems_in_vec(const ov::element::Type& precision) { } namespace repacking { -size_t compute_out_leading_dim(const size_t n_block, const ov::element::Type& precision) { - return std::max(n_block, compute_inner_n_block(precision)); -} - size_t compute_inner_n_block(const ov::element::Type& precision) { switch (precision) { case element::i8: return 64; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.hpp index bc627c59920c4b..aeb5b22cd56129 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.hpp @@ -8,6 +8,7 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/core/dimension.hpp" #include "snippets/lowered/expression.hpp" +#include "snippets/utils/utils.hpp" namespace ov { namespace intel_cpu { @@ -42,14 +43,19 @@ size_t compute_vnni_factor(const ov::element::Type& precision); size_t get_elems_in_vec(const ov::element::Type& precision); namespace repacking { +/// \brief Computes inner N block size used by OneDNN implementation. Depends on tensor precision +size_t compute_inner_n_block(const ov::element::Type& precision); /** * @brief Computes leading dimension (LDB) which must be used in brgemm and brgemm_copy_b emitters * @param n_block N block size shared between BrgemmCPU and BrgemmCopyB node * @param precision tensor precision */ -size_t compute_out_leading_dim(const size_t n_block, const ov::element::Type& precision); -/// \brief Computes inner N block size used by OneDNN implementation. Depends on tensor precision -size_t compute_inner_n_block(const ov::element::Type& precision); +template::value || std::is_same::value), bool>::type> +T compute_LDB(T n_block, const ov::element::Type& precision) { + return snippets::utils::is_dynamic_value(n_block) ? + n_block : + std::max(n_block, static_cast(compute_inner_n_block(precision))); +} } // namespace repacking } // namespace brgemm_utils } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.cpp new file mode 100644 index 00000000000000..c421e5cc2a4805 --- /dev/null +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.cpp @@ -0,0 +1,107 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/itt.hpp" + +#include "adjust_brgemm_copy_b_loop_ports.hpp" +#include "snippets/utils/utils.hpp" +#include "snippets/lowered/loop_manager.hpp" +#include "snippets/lowered/expressions/buffer_expression.hpp" +#include "transformations/snippets/x64/op/brgemm_copy_b.hpp" +#include "transformations/snippets/x64/op/brgemm_cpu.hpp" + +namespace ov { +namespace intel_cpu { + +bool pass::AdjustBrgemmCopyBLoopPorts::update_loop_info(const std::shared_ptr& loop_info) { + OPENVINO_ASSERT(loop_info, "Invalid loop info pointer"); + bool modified = false; + auto caller = [&](snippets::lowered::LoopPort &loop_port, + snippets::lowered::UnifiedLoopInfo::LoopPortDesc &loop_desc) { + const auto& p = *loop_port.expr_port; + if (p.get_type() == snippets::lowered::ExpressionPort::Input && + p.get_index() == 1) { + const auto& node = p.get_expr()->get_node(); + if (auto brg = as_type_ptr(node)) { + const auto precision = node->get_input_element_type(1); + /* + * The BrgemmCopyB operation repacks the weights in the following way: + * 1) VNNI format is applied: KN4k for I8/U8, or KN2k for BF16 + * 2) Zero padding is applied if N4k < 256 or N2k < 64 + */ + if (brgemm_utils::with_repacking(brg->get_type()) && + precision != element::f32 && loop_port.is_incremented) { + // K blocking loop: account for zero padding + if (loop_port.dim_idx == 1) { + const auto ptr_incr = loop_desc.ptr_increment; + const auto blocked_shape_ptr_inc = brgemm_utils::repacking::compute_LDB(ptr_incr, precision); + if (ptr_incr != 0 && ptr_incr != blocked_shape_ptr_inc) { + loop_desc.ptr_increment = blocked_shape_ptr_inc; + OPENVINO_ASSERT(loop_desc.finalization_offset % ptr_incr == 0, + "Can't rescale finalization offsets"); + loop_desc.finalization_offset = loop_desc.ptr_increment * + (loop_desc.finalization_offset / ptr_incr); + } + // N blocking loop: account for the VNNI format + } else if (loop_port.dim_idx == 0) { + auto k_blk_size = static_cast(brgemm_utils::compute_vnni_factor(precision)); + loop_desc.ptr_increment = snippets::utils::dynamic_safe_mul(loop_desc.ptr_increment, k_blk_size); + loop_desc.finalization_offset = snippets::utils::dynamic_safe_mul(loop_desc.finalization_offset, k_blk_size); + } else { + OPENVINO_THROW("Unexpected loop port dimension index in AdjustBrgemmCopyBLoopPorts"); + } + modified = true; + } + } + } + }; + loop_info->iterate_through_infos(caller); + return modified; +} + +bool pass::AdjustBrgemmCopyBLoopPorts::run(const snippets::lowered::LinearIR& linear_ir) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::AdjustBrgemmCopyBLoopPorts") + + bool modified = false; + + for (const auto& expr : linear_ir) { + const auto& node = expr->get_node(); + if (!is_type(node)) + continue; + const auto& repacking_loop_ids = expr->get_loop_ids(); + const auto& child_ports = expr->get_output_port(0).get_connected_ports(); + OPENVINO_ASSERT(child_ports.size() == 1 && + is_type(child_ports.begin()->get_expr()), + "BrgemmCopyB should have one BufferExpression child"); + auto grandchild_ports = child_ports.begin()->get_expr()->get_output_port(0).get_connected_ports(); + for (const auto& target_port : grandchild_ports) { + const auto& port_node = target_port.get_expr()->get_node(); + if (!is_type(port_node)) { + OPENVINO_ASSERT(is_type(port_node), + "Invalid grandchild of BrgemmCopyB"); + continue; + } + const auto &brgemm_loop_ids = target_port.get_expr()->get_loop_ids(); + // Continue if there is no blocking loop + if (brgemm_loop_ids.empty() && repacking_loop_ids.empty()) + continue; + OPENVINO_ASSERT(brgemm_loop_ids.size() > repacking_loop_ids.size(), "Invalid BrgemmCopyB loop configuration"); + const auto &loop_manager = linear_ir.get_loop_manager(); + for (auto i = repacking_loop_ids.size(); i < brgemm_loop_ids.size(); i++) { + const auto &loop = loop_manager->get_loop_info(brgemm_loop_ids[i]); + auto uni_loop = ov::as_type_ptr(loop); + if (!uni_loop) + uni_loop = ov::as_type_ptr(loop)->get_unified_loop_info(); + if (!m_affected_loops.count(uni_loop) && update_loop_info(uni_loop)) { + m_affected_loops.insert(uni_loop); + modified = true; + } + } + } + } + + return modified; +} +} // namespace intel_cpu +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.hpp new file mode 100644 index 00000000000000..5c65c7a0282823 --- /dev/null +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.hpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "snippets/lowered/pass/pass.hpp" +#include "snippets/lowered/loop_info.hpp" + +namespace ov { +namespace intel_cpu { +namespace pass { + +/** + * @interface AdjustBrgemmCopyBLoopPorts + * @brief BrgemmCopyB is located outside of blocking loops and repacks input data into a blocked layout. + * This layout should be accounted for when we increment BrgemmCopyB data pointers. This pass + * Finds loop ports connected to BrgemmCopyB and sets appropriate pointer increments. + * @ingroup snippets + */ +class AdjustBrgemmCopyBLoopPorts: public snippets::lowered::pass::Pass { +public: + AdjustBrgemmCopyBLoopPorts() = default; + OPENVINO_RTTI("AdjustBrgemmCopyBLoopPorts", "Pass"); + bool run(const snippets::lowered::LinearIR& linear_ir); + bool run(snippets::lowered::LinearIR& linear_ir) override { + return run(const_cast(linear_ir)); + } + static bool update_loop_info(const snippets::lowered::UnifiedLoopInfoPtr& uni_loop_info); + const std::unordered_set& get_affected_loops() { return m_affected_loops; } +private: + std::unordered_set m_affected_loops; +}; + +} // namespace pass +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.cpp index 51565537c43568..9b3009284e09e8 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.cpp @@ -44,18 +44,6 @@ LinearIR::constExprIt BrgemmCPUBlocking::move_new_memory_buffer(LinearIR& linear return std::prev(brgemm_it); } -LinearIR::constExprIt BrgemmCPUBlocking::get_loop_begin_pos(LinearIR& linear_ir, const LinearIR::constExprIt& brgemm_it, const ExpressionPtr& copy_b_expr) { - auto loop_begin_it = brgemm_it; - const auto& brgemm_expr = *brgemm_it; - const auto brgemm = ov::as_type_ptr(brgemm_expr->get_node()); - OPENVINO_ASSERT(brgemm, "get_loop_begin_pos must be called only for BrgemmCPU expression"); - if (with_amx(brgemm->get_type())) - loop_begin_it = move_new_memory_buffer(linear_ir, brgemm_it); - if (copy_b_expr) - loop_begin_it = linear_ir.find(copy_b_expr); - return loop_begin_it; -} - size_t BrgemmCPUBlocking::get_default_n_blk(size_t n) const { return dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core) ? 64 : 24; } @@ -66,6 +54,8 @@ std::tuple BrgemmCPUBlocking::get_blocking_params(const size_t m_blk, n_blk, k_blk; std::tie(m_blk, n_blk, k_blk) = BrgemmBlockingBase::get_blocking_params(brgemm_expr); + // Note: K,N blocking is functionally enabled, need to turn it on after blocking heuristic is updated to cover + // the low precision cases (ticket: 156014) if (with_repacking(brgemm->get_type())) { n_blk = get_full_dim_value(); k_blk = get_full_dim_value(); @@ -88,47 +78,48 @@ bool BrgemmCPUBlocking::mark_blocking_loops(LinearIR& linear_ir, const auto brgemm = ov::as_type_ptr(brgemm_expr->get_node()); const auto type = brgemm->get_type(); - if (stand_alone(type)) - return ov::snippets::lowered::pass::BrgemmBlockingBase::mark_blocking_loops(linear_ir, brgemm_it, m_block, n_block, k_block); + auto res = ov::snippets::lowered::pass::BrgemmBlockingBase::mark_blocking_loops(linear_ir, brgemm_it, m_block, n_block, k_block); - brgemm_expr->get_input_port_descriptor(0)->set_subtensor({m_block, k_block}); - brgemm_expr->get_input_port_descriptor(1)->set_subtensor({k_block, n_block}); - brgemm_expr->get_output_port_descriptor(0)->set_subtensor({m_block, n_block}); + if (stand_alone(type)) + return res; const auto copy_b_expr = linear_ir.get_expr_by_node(brgemm->get_brgemm_copy()); - copy_b_expr->get_input_port_descriptor(0)->set_subtensor({k_block, n_block}); - copy_b_expr->get_output_port_descriptor(0)->set_subtensor({k_block, n_block}); + const ov::snippets::VectorDims full_subtensor(2, get_full_dim_value()); + copy_b_expr->get_input_port_descriptor(0)->set_subtensor(full_subtensor); + copy_b_expr->get_output_port_descriptor(0)->set_subtensor(full_subtensor); + + if (with_amx(type)) { + move_new_memory_buffer(linear_ir, brgemm_it); + auto buffer_it = std::prev(brgemm_it); + buffer_it->get()->set_loop_ids(brgemm_expr->get_loop_ids()); + } + + const auto& loop_manager = linear_ir.get_loop_manager(); if (with_compensations(type)) { - const ov::snippets::VectorDims compensations_subtensor{1, n_block}; + const ov::snippets::VectorDims compensations_subtensor{1, get_full_dim_value()}; OPENVINO_ASSERT(brgemm_expr->get_input_count() == 3, "Brgemm must have 3 inputs in case of compensations."); - brgemm_expr->get_input_port_descriptor(2)->set_subtensor(compensations_subtensor); + const auto& compens_port = brgemm_expr->get_input_port(2); + compens_port.get_descriptor_ptr()->set_subtensor(compensations_subtensor); copy_b_expr->get_output_port_descriptor(1)->set_subtensor(compensations_subtensor); - } - const auto& loop_manager = linear_ir.get_loop_manager(); - if (!is_full_dim_value(k_block)) { - const auto loop_begin = get_loop_begin_pos(linear_ir, brgemm_it, copy_b_expr); - const std::vector entries{LoopPort(brgemm_expr->get_input_port(0), true, 0), - LoopPort(copy_b_expr->get_input_port(0), true, 1)}; - const std::vector exits{LoopPort(brgemm_expr->get_output_port(0), false)}; - mark_k_blocking(loop_manager, loop_begin, std::next(brgemm_it), entries, exits, k_block); - } - if (!is_full_dim_value(n_block)) { - const auto loop_begin = get_loop_begin_pos(linear_ir, brgemm_it, copy_b_expr); - const std::vector entries{LoopPort(brgemm_expr->get_input_port(0), false), - LoopPort(copy_b_expr->get_input_port(0), true)}; - const std::vector exits{LoopPort(brgemm_expr->get_output_port(0), true)}; - mark_n_blocking(loop_manager, loop_begin, std::next(brgemm_it), entries, exits, n_block); - } - if (!is_full_dim_value(m_block)) { - const bool include_repacking = !is_full_dim_value(k_block) || !is_full_dim_value(n_block); - const auto loop_begin = get_loop_begin_pos(linear_ir, brgemm_it, include_repacking ? copy_b_expr : nullptr); - const auto b_input_port = include_repacking ? copy_b_expr->get_input_port(0) : brgemm_expr->get_input_port(1); - std::vector entries{LoopPort(brgemm_expr->get_input_port(0), true), LoopPort(b_input_port, false)}; - if (!include_repacking && with_compensations(type)) - entries.emplace_back(brgemm_expr->get_input_port(2), false); - const std::vector exits{LoopPort(brgemm_expr->get_output_port(0), true)}; - mark_m_blocking(loop_manager, loop_begin, std::next(brgemm_it), entries, exits, m_block); + const auto& loop_ids = brgemm_expr->get_loop_ids(); + size_t i = 0; + LoopInfoPtr loop_info = nullptr; + auto update_loop_info = [&](LoopPort&& new_port) { + OPENVINO_ASSERT(i < loop_ids.size(), "Attempt to access invalid loop id"); + loop_info = loop_manager->get_loop_info(loop_ids[i++]); + const auto& in_ports = loop_info->get_input_ports(); + OPENVINO_ASSERT(in_ports.size() > 1, "Invalid number of input loop ports"); + loop_info->replace_with_new_ports(in_ports[1], {in_ports[1], new_port}); + }; + if (!is_full_dim_value(m_block)) + update_loop_info({compens_port, false, 1}); + + if (!is_full_dim_value(n_block)) + update_loop_info({compens_port, true, 0}); + + if (!is_full_dim_value(k_block)) + update_loop_info({compens_port, false, 1}); } return true; } diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.hpp index 22429a6b0c98fb..0fee4e80454f9a 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.hpp @@ -40,10 +40,6 @@ class BrgemmCPUBlocking : public ov::snippets::lowered::pass::BrgemmBlocking get_blocking_params(const ov::snippets::lowered::ExpressionPtr& brgemm_expr) const override; diff --git a/src/plugins/intel_cpu/tests/unit/snippets_transformations/x64/lowered/brgemm_blocking.cpp b/src/plugins/intel_cpu/tests/unit/snippets_transformations/x64/lowered/brgemm_blocking.cpp index 89f2e06c14a9fa..738afba6a101f9 100644 --- a/src/plugins/intel_cpu/tests/unit/snippets_transformations/x64/lowered/brgemm_blocking.cpp +++ b/src/plugins/intel_cpu/tests/unit/snippets_transformations/x64/lowered/brgemm_blocking.cpp @@ -81,43 +81,6 @@ void create_brgemm_loop_infos(const LinearIRPtr& linear_ir, BrgemmBlockingBase::get_default_blocking_loop_handlers(m, m_block))); } } - -void create_brgemm_with_copy_b_loop_infos(const LinearIRPtr& linear_ir, - const ExpressionPtr& brgemm_expr, - const ExpressionPtr& copy_b_expr, - size_t m = 0, size_t m_blk = 0, - size_t k = 0, size_t k_blk = 0, - size_t n = 0, size_t n_blk = 0) { - const bool k_block = k != 0 && k_blk != 0; - const bool n_block = k != 0 && k_blk != 0; - const bool m_block = m != 0 && m_blk != 0; - if (k_block) { - const auto loop_info = - std::make_shared(k, k_blk, - std::vector{LoopPort(brgemm_expr->get_input_port(0)), - LoopPort(copy_b_expr->get_input_port(0), true, 1)}, - std::vector{LoopPort(brgemm_expr->get_output_port(0), false)}, - get_k_loop_handlers(k, k_block)); - linear_ir->get_loop_manager()->add_loop_info(loop_info); - } - if (n_block) { - linear_ir->get_loop_manager()->add_loop_info( - std::make_shared(n, n_blk, - std::vector{LoopPort(brgemm_expr->get_input_port(0), false), - LoopPort(copy_b_expr->get_input_port(0))}, - std::vector{LoopPort(brgemm_expr->get_output_port(0))}, - BrgemmBlockingBase::get_default_blocking_loop_handlers(n, n_block))); - } - if (m_block) { - const auto& second_input_port = k_block || n_block ? copy_b_expr->get_input_port(0) : brgemm_expr->get_input_port(1); - linear_ir->get_loop_manager()->add_loop_info( - std::make_shared(m, m_blk, - std::vector{LoopPort(brgemm_expr->get_input_port(0), true, 1), - LoopPort(second_input_port, false, 1)}, - std::vector{LoopPort(brgemm_expr->get_output_port(0), true, 1)}, - BrgemmBlockingBase::get_default_blocking_loop_handlers(m, m_block))); - } -} } // namespace class BrgemmBlockingTest : public LoweredPassTestsF { @@ -230,6 +193,8 @@ TEST_F(BrgemmCPUBlockingTest, BlockingIsNotNeeded) { } TEST_F(BrgemmCPUBlockingTest, WithDataRepacking) { + // Skipped because K,N blocking is disabled until heuristic is updated (ticket: 156014) + GTEST_SKIP(); const ov::Dimension::value_type m = 384; const ov::Dimension::value_type k = 1024; const ov::Dimension::value_type n = 384; @@ -257,14 +222,16 @@ TEST_F(BrgemmCPUBlockingTest, WithDataRepacking) { auto brgemm = linear_ir_ref->push_node(data_a.second, copy_b.second, BRGEMM_TYPE::REPACKING_ONLY); const auto& brgemm_expr = *brgemm.first; - init_expr_descriptors(brgemm_expr, {{m_blk, full_dim}, {full_dim, full_dim}, {m_blk, full_dim}}); - create_brgemm_with_copy_b_loop_infos(linear_ir_ref, brgemm_expr, copy_b_expr, m, m_blk); - brgemm_expr->set_loop_ids({0}); + init_expr_descriptors(brgemm_expr, {{m_blk, k_blk}, {k_blk, n_blk}, {m_blk, n_blk}}); + create_brgemm_loop_infos(linear_ir_ref, brgemm_expr, m, m_blk, k, k_blk, n, n_blk); + brgemm_expr->set_loop_ids({2, 1, 0}); auto result = linear_ir_ref->push_node(brgemm.second); } } TEST_F(BrgemmCPUBlockingTest, WithCompensations) { + // Skipped because K,N blocking is disabled until heuristic is updated (ticket: 156014) + GTEST_SKIP(); const ov::Dimension::value_type m = 384; const ov::Dimension::value_type k = 1024; const ov::Dimension::value_type n = 384; @@ -292,14 +259,27 @@ TEST_F(BrgemmCPUBlockingTest, WithCompensations) { const auto& copy_b_n = copy_b.second; auto brgemm = linear_ir_ref->push_node(data_a.second, copy_b_n->output(0), copy_b_n->output(1), BRGEMM_TYPE::WITH_COMPENSATIONS); const auto& brgemm_expr = *brgemm.first; - init_expr_descriptors(brgemm_expr, {{m_blk, full_dim}, {full_dim, full_dim}, {1, full_dim}, {m_blk, full_dim}}); - create_brgemm_loop_infos(linear_ir_ref, brgemm_expr, m, m_blk); - brgemm_expr->set_loop_ids({0}); + init_expr_descriptors(brgemm_expr, {{m_blk, k_blk}, {k_blk, n_blk}, {1, full_dim}, {m_blk, n_blk}}); + create_brgemm_loop_infos(linear_ir_ref, brgemm_expr, m, m_blk, k, k_blk, n, n_blk); + + const auto loop_manager = linear_ir_ref->get_loop_manager(); + auto update_loop_info = [&loop_manager](size_t loop_id, LoopPort&& new_port) { + const auto& loop_info = loop_manager->get_loop_info(loop_id); + const auto& in_ports = loop_info->get_input_ports(); + loop_info->replace_with_new_ports(in_ports[1], {in_ports[1], new_port}); + }; + const auto& compens_port = brgemm_expr->get_input_port(2); + update_loop_info(1, {compens_port, true, 0}); + update_loop_info(0, {compens_port, false, 1}); + + brgemm_expr->set_loop_ids({2, 1, 0}); auto result = linear_ir_ref->push_node(brgemm.second); } } TEST_F(BrgemmCPUBlockingTest, AMX) { + // Skipped because K,N blocking is disabled until heuristic is updated (ticket: 156014) + GTEST_SKIP(); const ov::Dimension::value_type m = 384; const ov::Dimension::value_type k = 1024; const ov::Dimension::value_type n = 384; @@ -322,16 +302,25 @@ TEST_F(BrgemmCPUBlockingTest, AMX) { auto data_b = linear_ir_ref->push_node(precision, input_shape_b); auto copy_b = linear_ir_ref->push_node(data_b.second, precision, BRGEMM_TYPE::REPACKING_ONLY); const auto copy_b_expr = *copy_b.first; - init_expr_descriptors(copy_b_expr, {{full_dim, full_dim}, {full_dim, full_dim}}); + init_expr_descriptors(copy_b_expr, {{full_dim, full_dim}, + {full_dim, full_dim}}); - auto scratch = linear_ir_ref->push_node(ov::Shape{BrgemmCPU::SCRATCH_BYTE_SIZE}); - scratch.first->get()->set_loop_ids({0}); + auto scratch = linear_ir_ref->push_node(ov::Shape {BrgemmCPU::SCRATCH_BYTE_SIZE}); + scratch.first->get()->set_loop_ids({2, 1, 0}); - auto brgemm = linear_ir_ref->push_node(data_a.second, copy_b.second, scratch.second, BRGEMM_TYPE::WITH_AMX); + auto brgemm = linear_ir_ref->push_node(data_a.second, copy_b.second, scratch.second, + BRGEMM_TYPE::WITH_AMX); const auto& brgemm_expr = *brgemm.first; - init_expr_descriptors(brgemm_expr, {{m_blk, full_dim}, {full_dim, full_dim}, get_default_subtensor(), {m_blk, full_dim}}); - create_brgemm_with_copy_b_loop_infos(linear_ir_ref, brgemm_expr, copy_b_expr, m, m_blk); - brgemm_expr->set_loop_ids({0}); + init_expr_descriptors(brgemm_expr, {{m_blk, k_blk}, {k_blk, n_blk}, get_default_subtensor(), {m_blk, n_blk}}); + create_brgemm_loop_infos(linear_ir_ref, brgemm_expr, m, 0, k, k_blk, n, n_blk); + + std::vector entries {LoopPort(brgemm_expr->get_input_port(0), true, 1), + LoopPort(brgemm_expr->get_input_port(1), false, 1)}; + std::vector exits {LoopPort(brgemm_expr->get_output_port(0), true, 1)}; + auto handlers = BrgemmBlockingBase::get_default_blocking_loop_handlers(m, m_blk); + linear_ir_ref->get_loop_manager()-> + add_loop_info(std::make_shared(m, m_blk, entries, exits, handlers)); + brgemm_expr->set_loop_ids({2, 1, 0}); auto result = linear_ir_ref->push_node(brgemm.second); } } From f33a637d30484ea1404f9648b739e35fb43883cd Mon Sep 17 00:00:00 2001 From: Sergey Ivanov Date: Thu, 7 Nov 2024 07:50:28 -0800 Subject: [PATCH 021/182] Improve SIT to compose a batch tensor as a model input make up from a packet of different images (#27341) ### Details: - A new parameter `override_model_batch_size` was added several PRs ago. The purpose of this parameter was is to take a picture/binary and "stretch" it to fit a batched tensor as an input of a batched model providing that we could run and validate batched models - The current PR enhance this idea and allows us to set distinguished images/binaries in a batched tensor input so that we could validate correctness of processing of different lines of batch during our inference invocations. SIT now uses "|" separator to discern different images inside input images array as an command line argument. The batched images filing algorithm is the following: 1) If passed images amount is not enough to fill up an entire batch, a last image will be propagated and copied to remnant lines of N-batch. 2) If passed images array is larger than an batched tensor, then only N images will be used up and the rest of images will be thrown away Example: ``` single-image-test --device CPU --network resnet50.xml --mode classification --top_k 5 --il NCHW --ol NC --iml NCHW --oml NC --ip FP16 --op FP16 --override_model_batch_size 3 --input "dog.bmp|00000001.JPEG|00000002.JPEG" ``` gives us ``` ... Load input #0 from [dog.bmp,00000001.JPEG,00000002.JPEG] as f16 [N,C,H,W] [3,3,224,224] Fill up all input batch slices planes up to 3 with image data from the array: [0/3] Fill input batch slices planes starting from index 1 up to 3 with image data from the array: [1/3] Fill input batch slices planes starting from index 2 up to 3 with image data from the array: [2/3] ``` ### Tickets: - E####145303 --------- Co-authored-by: Maksim Doronin --- .../tools/common/include/tensor_utils.hpp | 8 + .../tools/common/include/tools_helpers.hpp | 16 +- .../tools/common/src/tensor_utils.cpp | 24 +++ .../intel_npu/tools/compile_tool/main.cpp | 18 +- .../tools/single-image-test/main.cpp | 179 ++++++++++++++---- 5 files changed, 197 insertions(+), 48 deletions(-) diff --git a/src/plugins/intel_npu/tools/common/include/tensor_utils.hpp b/src/plugins/intel_npu/tools/common/include/tensor_utils.hpp index 87b2301a7ae4fb..18d431c157d2dd 100644 --- a/src/plugins/intel_npu/tools/common/include/tensor_utils.hpp +++ b/src/plugins/intel_npu/tools/common/include/tensor_utils.hpp @@ -68,5 +68,13 @@ std::vector> parseTensorsAsFP32(const std::map& tensors, const ov::Layout& layout); + +/** + * @brief Split a batched tensor on several non-batched tensors having the same shapes and precisions. + * + * @param tensors The source non-batched tensors + * @return The merged batched tensor + */ +std::list splitBatchedTensor(const ov::Tensor &tensor, const ov::Layout& layout, size_t parts); } // namespace utils } // namespace npu diff --git a/src/plugins/intel_npu/tools/common/include/tools_helpers.hpp b/src/plugins/intel_npu/tools/common/include/tools_helpers.hpp index e9743594ad8711..304f16c29d9a87 100644 --- a/src/plugins/intel_npu/tools/common/include/tools_helpers.hpp +++ b/src/plugins/intel_npu/tools/common/include/tools_helpers.hpp @@ -109,6 +109,7 @@ void setModelBatch(std::shared_ptr& model, uint32_t batch = 1) { if (batch == 1) { return; } + std::cout << "Configuring model batch: " << batch << std::endl; for (auto&& item : model->get_parameters()) { auto shape = item->get_partial_shape(); auto rank = shape.rank(); @@ -175,7 +176,20 @@ void reshape(ov::OutputVector inputsInfo, InputsInfo& infoMap, std::shared_ptrget_layout(); + std::cout << " " << param->get_friendly_name() << " : " << param->get_element_type() << " / " + << param->get_layout().to_string() << " / " << param->get_partial_shape().to_string() << std::endl; + } + std::cout << "Network outputs:" << std::endl; + for (auto&& result : network.get_results()) { + std::cout << " " << result->get_friendly_name() << " : " << result->get_element_type() << " / " + << result->get_layout().to_string() << " / " << result->get_output_partial_shape(0).to_string() + << std::endl; } } diff --git a/src/plugins/intel_npu/tools/common/src/tensor_utils.cpp b/src/plugins/intel_npu/tools/common/src/tensor_utils.cpp index 32616b86135243..c2b4902497777e 100644 --- a/src/plugins/intel_npu/tools/common/src/tensor_utils.cpp +++ b/src/plugins/intel_npu/tools/common/src/tensor_utils.cpp @@ -492,5 +492,29 @@ ov::Tensor joinTensors(const std::list& tensors, const ov::Layout& l } return out; } + +std::list splitBatchedTensor(const ov::Tensor &tensor, const ov::Layout& layout, size_t parts) { + if (!parts) { + OPENVINO_THROW("Cannot split tensor on parts: ", parts); + } + auto pivotShape = tensor.get_shape(); + if (!ov::layout::has_batch(layout)) { + OPENVINO_THROW("Cannot split tensor: has no batch_idx in layout", layout.to_string()); + } + auto pivotPrecision = tensor.get_element_type(); + if (pivotShape[ov::layout::batch_idx(layout)] % parts != 0) { + OPENVINO_THROW("Cannot split tensor with batch size: ", pivotShape[ov::layout::batch_idx(layout)], " on: ", parts ," equal tensors"); + } + pivotShape[ov::layout::batch_idx(layout)] /= parts; + std::list ret; + const auto *inputBuffer = tensor.data(); + for (size_t i = 0; i < parts; i ++) { + ov::Tensor out(pivotPrecision, pivotShape); + memcpy(out.data(), inputBuffer, out.get_byte_size()); + inputBuffer += out.get_byte_size(); + ret.push_back(std::move(out)); + } + return ret; +} } // namespace utils } // namespace npu diff --git a/src/plugins/intel_npu/tools/compile_tool/main.cpp b/src/plugins/intel_npu/tools/compile_tool/main.cpp index 7a088d1afc69e2..47178833508182 100644 --- a/src/plugins/intel_npu/tools/compile_tool/main.cpp +++ b/src/plugins/intel_npu/tools/compile_tool/main.cpp @@ -296,21 +296,6 @@ void configurePrePostProcessing(std::shared_ptr& model, const std::st model = preprocessor.build(); } -void printInputAndOutputsInfoShort(const ov::Model& network) { - std::cout << "Network inputs:" << std::endl; - for (auto&& param : network.get_parameters()) { - auto l = param->get_layout(); - std::cout << " " << param->get_friendly_name() << " : " << param->get_element_type() << " / " - << param->get_layout().to_string() << " / " << param->get_partial_shape().to_string() << std::endl; - } - std::cout << "Network outputs:" << std::endl; - for (auto&& result : network.get_results()) { - std::cout << " " << result->get_friendly_name() << " : " << result->get_element_type() << " / " - << result->get_layout().to_string() << " / " << result->get_output_partial_shape(0).to_string() - << std::endl; - } -} - inline std::string fileNameNoExt(const std::string& filepath) { auto pos = filepath.rfind('.'); if (pos == std::string::npos) @@ -456,6 +441,9 @@ int main(int argc, char* argv[]) { std::cout << "Configuring model pre & post processing" << std::endl; configurePrePostProcessing(model, FLAGS_ip, FLAGS_op, FLAGS_iop, FLAGS_il, FLAGS_ol, FLAGS_iol, FLAGS_iml, FLAGS_oml, FLAGS_ioml); + if (FLAGS_shape.empty()) { + setModelBatch(model, FLAGS_override_model_batch_size); + } std::cout << "Printing Input and Output Info from model" << std::endl; printInputAndOutputsInfoShort(*model); auto timeBeforeLoadNetwork = std::chrono::steady_clock::now(); diff --git a/src/plugins/intel_npu/tools/single-image-test/main.cpp b/src/plugins/intel_npu/tools/single-image-test/main.cpp index 5658c18650243b..2454e9afd40e0d 100644 --- a/src/plugins/intel_npu/tools/single-image-test/main.cpp +++ b/src/plugins/intel_npu/tools/single-image-test/main.cpp @@ -183,6 +183,21 @@ std::vector splitStringList(const std::string& str, char delim) { return out; } +std::string to_string(const std::vector& c) { + std::stringstream stream; + std::string ret; + if (!c.empty()) { + stream << "["; + for (const auto &elem : c) { + stream << elem << ","; + } + ret = stream.str(); + ret.pop_back(); + ret += "]"; + } + return ret; +} + std::map parseArgMap(std::string argMap) { argMap.erase(std::remove_if(argMap.begin(), argMap.end(), ::isspace), argMap.end()); @@ -386,7 +401,23 @@ void convertBufferType(OutT* destination, const InT* source, size_t numberOfElem }); } -void cvToOV(const cv::Mat& cvImg, const ov::Tensor& tensor, const ov::Shape& shape, const ov::Layout& layout, +struct BatchIndexer { + const size_t index = 0; + const size_t size = 1; + + BatchIndexer(size_t lineIndex = 0, size_t lineCount = 1) : index(lineIndex), size(lineCount) { + OPENVINO_ASSERT(index < size, "Inconsistent parameters used for " + "BatchIndexer construction, lineIndex: ", index, + " must be lesser than lineCount: ", size); + } + + std::string to_string() const { + std::stringstream sstream; + sstream << "[" << index << "/" << size << "]"; + return sstream.str(); + } +}; +void cvToOV(const cv::Mat& cvImg, const BatchIndexer &cvImgInBatch, const ov::Tensor& tensor, const ov::Shape& shape, const ov::Layout& layout, const std::string& colorFormat) { const ov::element::Type& precision = tensor.get_element_type(); @@ -449,8 +480,12 @@ void cvToOV(const cv::Mat& cvImg, const ov::Tensor& tensor, const ov::Shape& sha if (layout == ov::Layout("NHWC")) { const auto dataBuffer = reinterpret_cast(tensor.data()); - - cv::Mat out(static_cast(H), static_cast(W), cvType, dataBuffer); + cv::Mat auxOut(static_cast(H), static_cast(W), cvType); + cv::Mat tensorOut(static_cast(H), static_cast(W), cvType, dataBuffer); + // only a first image from an input image array fills an original input tensor up. + // Subsequent images (if exist) will fill batch slices of the input tensor + // by its number in the input array respectively + cv::Mat &out = (cvImgInBatch.index == 0 ? tensorOut : auxOut); if (precision == ov::element::Type_t::f16) { const auto inPtr = in.ptr(); @@ -466,13 +501,35 @@ void cvToOV(const cv::Mat& cvImg, const ov::Tensor& tensor, const ov::Shape& sha in.copyTo(out); } - for (size_t n = 1; n < N; ++n) { + // Being called sequentially with ascending `cvImgInBatch.index` value, + // it fills up rest of the batched tensor by + // a last requested image data until its ending from a batched slice position + // determined by parameter 'cvImgInBatch.index', so that filling N batched tensor + // by array of images size M, where M < N, will make up + // The final batched tensor will comprise + // [imgIdx_0, imgIdx_1,..., imgIdx_M, imgIdx_M,...,imgIdx_M] as its slices + if (cvImgInBatch.index == 0 && N != 1) { + std::cout << "Fill up all input batch slices up to " << N + << " with image data from the array: [" + << cvImgInBatch.to_string() << std::endl; + } + for (size_t n = std::max(1, cvImgInBatch.index); n < N; ++n) { + if (n == std::max(1, cvImgInBatch.index) && cvImgInBatch.index >= 1) { + std::cout << "Fill input batch slices starting from index " + << n << " up to " << N << " with image data from the array: " + << cvImgInBatch.to_string() << std::endl; + } cv::Mat batch(static_cast(H), static_cast(W), cvType, dataBuffer + n * (out.size().area() * out.elemSize())); out.copyTo(batch); } } else if (layout == ov::Layout("NCHW")) { - auto tensorPlanes = ovToCV(tensor, shape, layout, 0); + ov::Tensor auxTensor(precision, shape); + const ov::Tensor &outTensor = (cvImgInBatch.index == 0 ? tensor : auxTensor); + // only a first image from an input image array fills an original input tensor up. + // Subsequent images (if exist) will fill batch slices of the input tensor + // by its number in the input array respectively + auto tensorPlanes = ovToCV(outTensor, shape, layout, 0); if (!(precision == ov::element::Type_t::f16 || precision == ov::element::Type_t::bf16)) { @@ -495,7 +552,24 @@ void cvToOV(const cv::Mat& cvImg, const ov::Tensor& tensor, const ov::Shape& sha } } - for (size_t n = 1; n < N; ++n) { + // Being called sequentially with ascending `cvImgInBatch.index` value, + // it fills up rest of the batched tensor by + // a last requested image data until its ending from a batched slice position + // determined by parameter 'cvImgInBatch.index', so that filling N batched tensor + // by array of images size M, where M < N, will make up + // The final batched tensor will comprise + // [imgIdx_0, imgIdx_1,..., imgIdx_M, imgIdx_M,...,imgIdx_M] as its slices + if (cvImgInBatch.index == 0 && N != 1) { + std::cout << "Fill up all input batch slices planes up to " << N + << " with image data from the array: " + << cvImgInBatch.to_string() << std::endl; + } + for (size_t n = std::max(1, cvImgInBatch.index); n < N; ++n) { + if (n == std::max(1, cvImgInBatch.index) && cvImgInBatch.index >= 1) { + std::cout << "Fill input batch slices planes starting from index " + << n << " up to " << N << " with image data from the array: " + << cvImgInBatch.to_string() << std::endl; + } const auto batchPlanes = ovToCV(tensor, shape, layout, n); OPENVINO_ASSERT(batchPlanes.size() == tensorPlanes.size()); @@ -654,27 +728,28 @@ std::string cleanName(std::string&& name) { return std::move(name); } -ov::Tensor loadImage(const ov::element::Type& precision, const ov::Shape& shape, const ov::Layout& layout, - const std::string& filePath, const std::string& colorFormat) { - const auto frame = cv::imread(filePath, cv::IMREAD_COLOR); - OPENVINO_ASSERT(!frame.empty(), "Failed to open input image file ", filePath); - +ov::Tensor loadImages(const ov::element::Type& precision, const ov::Shape& shape, const ov::Layout& layout, + const std::vector& filePaths, const std::string& colorFormat) { const ov::Tensor tensor(precision, shape); + for (size_t fileIndex = 0; fileIndex != filePaths.size(); fileIndex++) { + const auto &filePath = filePaths[fileIndex]; + const auto frame = cv::imread(filePath, cv::IMREAD_COLOR); + OPENVINO_ASSERT(!frame.empty(), "Failed to open input image file ", filePath); - cvToOV(frame, tensor, shape, layout, colorFormat); - + cvToOV(frame, BatchIndexer{fileIndex, filePaths.size()}, tensor, shape, layout, colorFormat); + } return tensor; } -ov::Tensor loadBinary(const ov::element::Type& modelPrecision, const ov::Shape& shape, const ov::Layout& layout, - const std::string& filePath, const ov::element::Type& dataPrecision) { +void loadBinary(const std::string& filePath, const BatchIndexer &fileSourceInBatch, ov::Tensor &requestedTensor, + const ov::element::Type& modelPrecision, const ov::Shape& shape, + const ov::Layout& layout, const ov::element::Type& dataPrecision) { std::ifstream binaryFile(filePath, std::ios_base::binary | std::ios_base::ate); OPENVINO_ASSERT(binaryFile, "Failed to open input binary file: ", filePath); const auto fileSize = binaryFile.tellg(); binaryFile.seekg(0, std::ios_base::beg); OPENVINO_ASSERT(binaryFile.good(), "While reading a file an error is encountered"); const size_t fileBytes = static_cast(fileSize); - ov::Tensor requestedTensor(modelPrecision, shape); const size_t reqTensorBytes = static_cast(requestedTensor.get_byte_size()); if (dataPrecision != modelPrecision && dataPrecision != ov::element::Type_t::undefined) { @@ -688,7 +763,7 @@ ov::Tensor loadBinary(const ov::element::Type& modelPrecision, const ov::Shape& std::cout << "File contains " << fileBytes << " bytes, but it expected to be: " << inputTensor.get_byte_size() << " while converting precision from " << dataPrecision << " to " << modelPrecision - << ". Check whether it is possible to batch loading " << std::endl; + << ". Check whether it is possible to fit it into batch loading " << std::endl; OPENVINO_ASSERT(ov::layout::has_batch(layout), "Input layout has no batch dimenstion: ", layout.to_string()); size_t N = shape[ov::layout::batch_idx(layout)]; @@ -703,12 +778,20 @@ ov::Tensor loadBinary(const ov::element::Type& modelPrecision, const ov::Shape& const ov::Tensor convertedPrecisionTensor(modelPrecision, debatchedInputTensorShape); npu::utils::convertTensorPrecision(inputDebatchedTensor, convertedPrecisionTensor); std::list tensorsToJoin; - std::generate_n(std::back_inserter(tensorsToJoin), N, [&convertedPrecisionTensor]() { - return convertedPrecisionTensor; - }); + std::list tensorsFromSplit = npu::utils::splitBatchedTensor(requestedTensor, layout, N); + // Constitute a new bathed tensor of size N from parts of it + // enumerated by indices from the interval [0...fileSourceInBatch.index], + // where fileSourceInBatch.index < N + // The rest parts of the new tensor [fileSourceInBatch.index+1...N] + // will be filled up by same content of an image of `fileSourceInBatch.index` + std::copy_n(tensorsFromSplit.begin(), std::min(fileSourceInBatch.index, N), std::back_inserter(tensorsToJoin)); + if (fileSourceInBatch.index < N) { + std::generate_n(std::back_inserter(tensorsToJoin), N - fileSourceInBatch.index, [&convertedPrecisionTensor]() { + return convertedPrecisionTensor; + }); + } requestedTensor = npu::utils::joinTensors(tensorsToJoin, layout); } - } else { if (fileBytes == reqTensorBytes) { binaryFile.read(reinterpret_cast(requestedTensor.data()), @@ -716,22 +799,40 @@ ov::Tensor loadBinary(const ov::element::Type& modelPrecision, const ov::Shape& } else { std::cout << "File contains " << fileBytes << " bytes, but it expected to be: " << reqTensorBytes << " when datatypes match. " - << ". Check whether it is possible to batch loading " << std::endl; + << "Check whether it is possible to fit it into batch loading " << std::endl; OPENVINO_ASSERT(ov::layout::has_batch(layout), "Input layout has no batch dimenstion: ", layout.to_string()); size_t N = shape[ov::layout::batch_idx(layout)]; OPENVINO_ASSERT(fileBytes * N == reqTensorBytes, "File contains ", fileBytes, " bytes, but ", reqTensorBytes, " in batch size ", N, " expected"); - // duplicate a binary into tensor memory if the tensor batched - for (size_t n = 0; n < N; ++n) { + if (fileSourceInBatch.index == 0 && N != 1) { + std::cout << "Fill up all input batch slices up to " << N + << " with binary data from the array: " + << fileSourceInBatch.to_string() << std::endl; + } + for (size_t n = std::max(0, fileSourceInBatch.index); n < N; ++n) { + if (n == std::max(1, fileSourceInBatch.index) && fileSourceInBatch.index >= 1) { + std::cout << "Fill input batch slices starting from index " + << n << " up to " << N + << " with binary data from the data sources array: " + << fileSourceInBatch.to_string() << std::endl; + } binaryFile.seekg(0, std::ios_base::beg); binaryFile.read(reinterpret_cast(requestedTensor.data()) + fileBytes * n, static_cast(fileBytes)); } } } +} +ov::Tensor loadBinaries(const ov::element::Type& modelPrecision, const ov::Shape& shape, const ov::Layout& layout, + const std::vector& filePaths, const ov::element::Type& dataPrecision) { + ov::Tensor requestedTensor(modelPrecision, shape); + for (size_t fileIndex = 0; fileIndex != filePaths.size(); fileIndex++) { + const auto &filePath = filePaths[fileIndex]; + loadBinary(filePath, BatchIndexer{fileIndex, filePaths.size()}, requestedTensor, modelPrecision, shape, layout, dataPrecision); + } return requestedTensor; } @@ -752,12 +853,12 @@ ov::Tensor loadBinary(const ov::element::Type& modelPrecision, const ov::Shape& * @return The tensor containing the loaded data. */ ov::Tensor loadInput(const ov::element::Type& modelPrecision, const ov::Shape& shape, const ov::Layout& layout, - const std::string& filePath, const std::string& colorFormat, + const std::vector& filePaths, const std::string& colorFormat, const ov::element::Type& dataPrecision = ov::element::Type_t::undefined) { if (isImage(shape, layout) && !FLAGS_img_as_bin) { - return loadImage(modelPrecision, shape, layout, filePath, colorFormat); + return loadImages(modelPrecision, shape, layout, filePaths, colorFormat); } else { - return loadBinary(modelPrecision, shape, layout, filePath, dataPrecision); + return loadBinaries(modelPrecision, shape, layout, filePaths, dataPrecision); } } @@ -1719,11 +1820,20 @@ static int runSingleImageTest() { std::map outModelLayouts = parseLayoutRegex(FLAGS_oml); std::vector inputFilesPerCase; - std::vector> inputFilesForOneInfer; + using FilesPerInput = std::vector; + using FilesForModelInputs = std::vector; + std::vector inputFilesForOneInfer; inputFilesPerCase = splitStringList(FLAGS_input, ';'); for (const auto& images : inputFilesPerCase) { - inputFilesForOneInfer.push_back(splitStringList(images, ',')); + std::vector filesPerModel = splitStringList(images, ','); + FilesForModelInputs entireModelFiles; + entireModelFiles.reserve(filesPerModel.size()); + for (auto &&filesPerInput : filesPerModel) { + // from now on each input of a model support multiple image files as content of a batched input + entireModelFiles.push_back(splitStringList(filesPerInput, '|')); + } + inputFilesForOneInfer.push_back(std::move(entireModelFiles)); } std::vector inputBinPrecisionStrPerCase; @@ -1896,8 +2006,13 @@ static int runSingleImageTest() { } } + if (FLAGS_shape.empty()) { + setModelBatch(model, FLAGS_override_model_batch_size); + } std::cout << "Compile model" << std::endl; - compiledModel = core.compile_model(ppp.build(), FLAGS_device); + model = ppp.build(); + printInputAndOutputsInfoShort(*model); + compiledModel = core.compile_model(model, FLAGS_device); } else { std::cout << "Import network " << FLAGS_network << std::endl; @@ -1947,7 +2062,7 @@ static int runSingleImageTest() { for (size_t numberOfTestCase = 0; numberOfTestCase < inputFilesPerCase.size(); ++numberOfTestCase) { const auto inputsInfo = compiledModel.inputs(); const auto outputsInfo = compiledModel.outputs(); - std::vector inputFiles = inputFilesForOneInfer[numberOfTestCase]; + const FilesForModelInputs &inputFiles = inputFilesForOneInfer[numberOfTestCase]; OPENVINO_ASSERT(inputFiles.size() == inputsInfo.size(), "Number of input files ", inputFiles.size(), " doesn't match network configuration ", inputsInfo.size()); @@ -1984,7 +2099,7 @@ static int runSingleImageTest() { inputDescriptors.emplace(inputInfo.get_any_name(), TensorDescriptor{precision, shape, dataShape, inputLayout}); - std::cout << "Load input #" << inputInd << " from " << inputFiles[inputInd] << " as " << precision + std::cout << "Load input #" << inputInd << " from " << to_string(inputFiles[inputInd]) << " as " << precision << " " << inputLayout.to_string() << " " << shape << std::endl; const ov::Tensor tensor = From b416fb02d97896f8198287c8a976cc8e1978ff62 Mon Sep 17 00:00:00 2001 From: Alina Kladieva Date: Thu, 7 Nov 2024 17:48:10 +0100 Subject: [PATCH 022/182] [GHA] Enable Fedora trigger in post-commit (#27463) We need that to have build artifacts stored from pushes Signed-off-by: Alina Kladieva --- .github/workflows/fedora_29.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/fedora_29.yml b/.github/workflows/fedora_29.yml index 3f685502747a19..f3b101327f76dc 100644 --- a/.github/workflows/fedora_29.yml +++ b/.github/workflows/fedora_29.yml @@ -5,7 +5,7 @@ on: merge_group: push: branches: - # - master + - master - 'releases/**' concurrency: From fb5b5ed0036c1bff4753c70364541600889841db Mon Sep 17 00:00:00 2001 From: Chon Ming Lee Date: Fri, 8 Nov 2024 14:34:54 +0800 Subject: [PATCH 023/182] [GPU] Add check to fallback to permute_ref if different format for oneDNN (#27402) ### Details: If the input and output format are not the same in permute before oneDNN convolution, permute_kernel_f_y_axes doesn't support it. Need to fallback to permute_ref ### Tickets: - *CVS-155933* --- .../permute/permute_kernel_f_y_axes.cpp | 6 ++ .../unit/test_cases/permute_gpu_test.cpp | 70 +++++++++++++++++++ 2 files changed, 76 insertions(+) diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_f_y_axes.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_f_y_axes.cpp index 69756b02e2d9ff..deb25d17618347 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_f_y_axes.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_f_y_axes.cpp @@ -214,6 +214,8 @@ bool PermuteKernel_f_y_axes::Validate(const Params& p) const { const auto& params = dynamic_cast(p); const auto& in = params.inputs[0]; const auto in_layout = in.GetLayout(); + const auto& out = params.outputs[0]; + const auto& out_layout = out.GetLayout(); const auto feature_div = GetDivisor(in.Feature().v); const auto y_div = GetDivisor(in.Y().v); @@ -227,6 +229,10 @@ bool PermuteKernel_f_y_axes::Validate(const Params& p) const { return false; } + if (in_layout != out_layout) { + return false; + } + // Accept only supported blocked layouts and SIMD sizes. if (!SimpleLayout(in_layout)) { const auto feature_block_size = GetFeatureBlockSize(params); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/permute_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/permute_gpu_test.cpp index 9688ebe227071f..db9777a3a613d9 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/permute_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/permute_gpu_test.cpp @@ -2175,6 +2175,76 @@ TEST(permute_gpu_f32_dynamic, bfyx_0_2_3_1) { } } +TEST(permute_f_y_axes_fallback, b_fs_yx_fsv16) { + constexpr size_t array_size = 128; + + auto& engine = get_test_engine(); + if (!engine.get_device_info().supports_immad) + return; + + auto input_layout_static = layout{ov::PartialShape{1, 8, 16, 1}, data_types::f32, format::bfyx}; + auto input = engine.allocate_memory(input_layout_static); + + std::vector input_data; + input_data.reserve(array_size); + for (size_t i = 0; i < array_size; ++i) + input_data.push_back(static_cast(i)); + + auto weights = engine.allocate_memory({ data_types::f32, format::bfyx, { 8, 16, 1, 1 } }); + + std::vector weights_data; + weights_data.reserve(array_size); + for (size_t i = 0; i < array_size; ++i) + weights_data.push_back(static_cast(1.0)); + + set_values(weights, weights_data); + set_values(input, input_data); + + auto impl_desc_onednn = ov::intel_gpu::ImplementationDesc{format::b_fs_yx_fsv16, "", impl_types::onednn}; + auto impl_forcing_map = ov::intel_gpu::ImplForcingMap{{"conv", impl_desc_onednn}}; + + topology topology; + topology.add(input_layout("input", input_layout_static)); + topology.add(permute("permute", input_info("input"), { 0, 2, 1, 3 })); + topology.add(data("weights", weights)); + topology.add(convolution("conv", input_info("permute"), "weights", "", 1, {1,1}, {1,1}, {0,0}, {0,0}, false)); + + ExecutionConfig config = get_test_default_config(engine); + config.set_property(ov::intel_gpu::force_implementations(impl_forcing_map)); + + network network(engine, topology, config); + network.set_input_data("input", input); + auto outputs = network.execute(); + ASSERT_EQ(outputs.size(), size_t(1)); + ASSERT_EQ(outputs.begin()->first, "conv"); + + auto output = outputs.begin()->second.get_memory(); + + float answers[] = { + 120.f, 120.f, 120.f, 120.f, 120.f, 120.f, 120.f, 120.f, + 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, + 376.f, 376.f, 376.f, 376.f, 376.f, 376.f, 376.f, 376.f, + 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, + 632.f, 632.f, 632.f, 632.f, 632.f, 632.f, 632.f, 632.f, + 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, + 888.f, 888.f, 888.f, 888.f, 888.f, 888.f, 888.f, 888.f, + 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, + 1144.f, 1144.f, 1144.f, 1144.f, 1144.f, 1144.f, 1144.f, 1144.f, + 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, + 1400.f, 1400.f, 1400.f, 1400.f, 1400.f, 1400.f, 1400.f, 1400.f, + 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, + 1656.f, 1656.f, 1656.f, 1656.f, 1656.f, 1656.f, 1656.f, 1656.f, + 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, + 1912.f, 1912.f, 1912.f, 1912.f, 1912.f, 1912.f, 1912.f, 1912.f, + 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, + }; + + cldnn::mem_lock output_ptr(output, get_test_stream()); + for (size_t i = 0; i < array_size; i++) { + ASSERT_FLOAT_EQ(answers[i], output_ptr[i]); + } +} + class permute_bfzyx_to_bfyxz: public TiledPermuteTest {}; INSTANTIATE_TEST_SUITE_P(, permute_bfzyx_to_bfyxz, From 5e6fb3c157cc0231f7ba6aae4d546da93beaaba2 Mon Sep 17 00:00:00 2001 From: yuanxion <96522341+yuanxion@users.noreply.github.com> Date: Fri, 8 Nov 2024 15:47:57 +0800 Subject: [PATCH 024/182] [GPU] Fix one hot seg fault when the second input node is not a Constant node (#27442) ### Details: - *Get the 'depth' value from 'Select' node instead of a Constant node* This happens when the second input of **OneHot** node is not a **Constant** node (**Select** node in this PR). ![image](https://github.com/user-attachments/assets/1bef5fdd-ae42-4916-9ba1-46b8d397e34b) ### Tickets: - *[CVS-155564](https://jira.devtools.intel.com/browse/CVS-155564)* --------- Signed-off-by: yuan.xiong --- .../include/intel_gpu/primitives/one_hot.hpp | 15 ++++++++ .../src/graph/include/one_hot_inst.h | 2 +- src/plugins/intel_gpu/src/graph/one_hot.cpp | 20 ++++++++-- src/plugins/intel_gpu/src/graph/program.cpp | 4 +- .../intel_gpu/src/plugin/ops/one_hot.cpp | 37 ++++++++++++------- 5 files changed, 58 insertions(+), 20 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/one_hot.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/one_hot.hpp index c66bbedc80ee0c..d47a128c62c434 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/one_hot.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/one_hot.hpp @@ -54,6 +54,21 @@ struct one_hot : public primitive_base { , on_value(on_value) , off_value(off_value) {} + /// @brief onehot with depth from Select node + one_hot(const primitive_id& id, + const input_info& input, + const input_info& input_depth, + const tensor& shape, + const data_types output_dt, + const int64_t& one_hot_axis, + const float& on_value = 1.0f, + const float& off_value = 0.0f) + : primitive_base(id, {input, input_depth}, 1, {optional_data_type{output_dt}}) + , shape(shape) + , one_hot_axis(one_hot_axis) + , on_value(on_value) + , off_value(off_value) {} + /// @brief Constructs one-hot primitive layer. /// @param id An identifier of new primitive. /// @param input An identifier of primitive which is an input for newly created one-hot primitive. diff --git a/src/plugins/intel_gpu/src/graph/include/one_hot_inst.h b/src/plugins/intel_gpu/src/graph/include/one_hot_inst.h index 782751e261e51a..2e5dc81a3d4049 100644 --- a/src/plugins/intel_gpu/src/graph/include/one_hot_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/one_hot_inst.h @@ -22,7 +22,7 @@ struct typed_program_node : typed_program_node_base { support_padding_all(true); } program_node& input() const { return get_dependency(0); } - std::vector get_shape_infer_dependencies() const override { return {}; } + std::vector get_shape_infer_dependencies() const override { return {1}; } }; using one_hot_node = typed_program_node; diff --git a/src/plugins/intel_gpu/src/graph/one_hot.cpp b/src/plugins/intel_gpu/src/graph/one_hot.cpp index ede34e8e0935b0..bb90e3bf1276b5 100644 --- a/src/plugins/intel_gpu/src/graph/one_hot.cpp +++ b/src/plugins/intel_gpu/src/graph/one_hot.cpp @@ -66,11 +66,23 @@ std::vector one_hot_inst::calc_output_layouts(const one_hot_node& /*node }; int64_t depth = desc->depth; + auto& memory_deps = impl_param.memory_deps; + + std::unordered_map const_data = {}; + if (depth != 0) { + auto depth_tensor = ov::Tensor(ov::element::i64, ov::Shape{1}, static_cast(&depth)); + const_data[1] = depth_tensor; + } else if (memory_deps.count(1) > 0) { + auto depth_mem = memory_deps.at(1); + + cldnn::mem_lock depth_lock(depth_mem, impl_param.get_stream()); + auto depth_ptr = depth_lock.data(); + + // update depth_tensor if depth value comes from memory_deps instead of Constant node + auto depth_tensor = make_tensor(depth_mem->get_layout(), depth_ptr); + const_data[1] = depth_tensor; + } - auto depth_tensor = ov::Tensor(ov::element::i64, ov::Shape{1}, static_cast(&depth)); - std::unordered_map const_data = { - {1, depth_tensor} - }; std::vector output_shapes = ov::op::v1::shape_infer(&op, input_shapes, ov::make_tensor_accessor(const_data)); return {{output_shapes[0], dt, format::get_default_format(output_shapes[0].size())}}; diff --git a/src/plugins/intel_gpu/src/graph/program.cpp b/src/plugins/intel_gpu/src/graph/program.cpp index 7a66d32795c17c..2bfaac84134387 100644 --- a/src/plugins/intel_gpu/src/graph/program.cpp +++ b/src/plugins/intel_gpu/src/graph/program.cpp @@ -436,7 +436,7 @@ void program::prepare_nodes(topology const& topology) { } } -// add node's dependecies from its primitive dependencies +// add node's dependencies from its primitive dependencies void program::add_node_dependencies(program_node* node) { auto deps = node->get_primitive()->dependencies(); // add pointers to node's dependencies @@ -453,7 +453,7 @@ void program::add_node_dependencies(program_node* node) { } /* helper method for program constructor from list of nodes which - copies src_node dependecies to the destination node dest_node dependencies. + copies src_node dependencies to the destination node dest_node dependencies. But only to those which appaer in this program implementation nodes_map */ void program::copy_node_dependencies(program_node* dest_node, program_node* src_node) { if (dest_node->get_primitive()->id != src_node->get_primitive()->id) { diff --git a/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp b/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp index 9a7f8697d0b736..09e9d81db088df 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp @@ -7,7 +7,6 @@ #include "transformations/utils/utils.hpp" #include "openvino/op/one_hot.hpp" - #include "intel_gpu/primitives/one_hot.hpp" namespace ov { @@ -49,21 +48,33 @@ static void CreateOneHotOp(ProgramBuilder& p, const std::shared_ptrcast_vector()[0]; - auto out_pshape = op->get_output_partial_shape(0); cldnn::tensor out_tensor = out_pshape.is_static() ? tensor_from_dims(out_pshape.to_shape()) : cldnn::tensor{}; - auto oneHotPrim = cldnn::one_hot(layerName, - inputs[0], - out_tensor, - cldnn::element_type_to_data_type(op->get_output_element_type(0)), - axis, - depth, - on_value, - off_value); - - p.add_primitive(*op, oneHotPrim); + if (depth_value_node) { + int64_t depth = depth_value_node->cast_vector()[0]; + auto oneHotPrim = cldnn::one_hot(layerName, + inputs[0], + out_tensor, + cldnn::element_type_to_data_type(op->get_output_element_type(0)), + axis, + depth, + on_value, + off_value); + + p.add_primitive(*op, oneHotPrim); + } else { + auto oneHotPrim = cldnn::one_hot(layerName, + inputs[0], + inputs[1], + out_tensor, + cldnn::element_type_to_data_type(op->get_output_element_type(0)), + axis, + on_value, + off_value); + + p.add_primitive(*op, oneHotPrim); + } } REGISTER_FACTORY_IMPL(v1, OneHot); From 9ff42cde8951ff3e70d6f512c032e1ce21bf800b Mon Sep 17 00:00:00 2001 From: Valentin Rusu Date: Fri, 8 Nov 2024 11:12:38 +0200 Subject: [PATCH 025/182] [Benchmark App] Reject -nireq > -niter when using sync API (#27460) ### Details: While investigating the problem raised by the E-117106 ticket, turned out that, in fact, a problem lies within the Benchmark App. When choosing the sync API, it is not possible to run more inference requests than specified iterations. Later on, if `-pc` flag was given, the app still tries to display the profiling data for the never launched inferences. This PR detects this situation and rejects the application execution after writing an explanatory message to the console output. Changes are applied for both C++ and Python versions of the benchmark app. ### Tickets: - CVS-156823 Co-authored-by: Vladimir Zlobin --- samples/cpp/benchmark_app/main.cpp | 4 ++++ tools/benchmark_tool/openvino/tools/benchmark/main.py | 3 +++ 2 files changed, 7 insertions(+) diff --git a/samples/cpp/benchmark_app/main.cpp b/samples/cpp/benchmark_app/main.cpp index 2cfd15b77afb6e..4dcc1e82924efd 100644 --- a/samples/cpp/benchmark_app/main.cpp +++ b/samples/cpp/benchmark_app/main.cpp @@ -60,6 +60,10 @@ bool parse_and_check_command_line(int argc, char* argv[]) { if (FLAGS_api != "async" && FLAGS_api != "sync") { throw std::logic_error("Incorrect API. Please set -api option to `sync` or `async` value."); } + if (FLAGS_api == "sync" && FLAGS_nireq > FLAGS_niter) { + throw std::logic_error( + "Number of iterations should be greater than number of infer requests when using sync API."); + } if (!FLAGS_hint.empty() && FLAGS_hint != "throughput" && FLAGS_hint != "tput" && FLAGS_hint != "latency" && FLAGS_hint != "cumulative_throughput" && FLAGS_hint != "ctput" && FLAGS_hint != "none") { throw std::logic_error("Incorrect performance hint. Please set -hint option to" diff --git a/tools/benchmark_tool/openvino/tools/benchmark/main.py b/tools/benchmark_tool/openvino/tools/benchmark/main.py index ad2839d217fcad..c77b50a7fd4721 100644 --- a/tools/benchmark_tool/openvino/tools/benchmark/main.py +++ b/tools/benchmark_tool/openvino/tools/benchmark/main.py @@ -49,6 +49,9 @@ def arg_not_empty(arg_value,empty_value): raise Exception("Cannot set precision for a compiled model. " \ "Please re-compile your model with required precision.") + if args.api_type == "sync" and args.number_infer_requests > args.number_iterations: + raise Exception("Number of infer requests should be less than or equal to number of iterations in sync mode.") + return args, is_network_compiled def main(): From bb99d6723ba3e875158ddec63cf4df26affe14ed Mon Sep 17 00:00:00 2001 From: Wilson Seok Date: Fri, 8 Nov 2024 02:43:02 -0800 Subject: [PATCH 026/182] [GPU] Don't select add_fusing_type::sum when dependency is input_layout in loop body network (#27443) ### Details: - Don't select add_fusing_type::sum when dependency is input_layout in loop body network. sum op could contaminate memory buffer. ### Tickets: - 154020 --- src/plugins/intel_gpu/src/graph/program_helpers.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/src/graph/program_helpers.cpp b/src/plugins/intel_gpu/src/graph/program_helpers.cpp index 256ba493311b7c..51e5d8510739e2 100644 --- a/src/plugins/intel_gpu/src/graph/program_helpers.cpp +++ b/src/plugins/intel_gpu/src/graph/program_helpers.cpp @@ -117,7 +117,8 @@ add_fusing_type onednn_add_fusing_helpers::get_add_fusing_type( && p_layout.data_padding == d_layout.data_padding && dep_node.get_users().size() == 1 && !dep_node.is_constant() - && !p_node.is_type()) { + && !p_node.is_type() + && !(dep_node.get_program().is_body_program() && dep_node.is_type())) { return add_fusing_type::sum; } else if (p_layout.get_tensor() == d_layout.get_tensor()) { return add_fusing_type::binary_per_tensor; From 406034de7d38f0d076cb88618499b6bb860f617d Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Fri, 8 Nov 2024 12:03:16 +0100 Subject: [PATCH 027/182] [DOCS] benchmark data tweaks (#27424) Co-authored-by: Andrzej Kopytko --- .../benchmarks_files/data/graph-data-ov.json | 1462 ++++++++--------- .../benchmarks_files/graph-config.json | 10 +- .../_static/download/supported_models.csv | 3 +- docs/sphinx_setup/index.rst | 2 +- 4 files changed, 741 insertions(+), 736 deletions(-) diff --git a/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json b/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json index 0bb38199b997ca..7576fd96148554 100644 --- a/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json +++ b/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ov.json @@ -3,7 +3,7 @@ "Platform": "Intel® Celeron® 6305E CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -37,7 +37,7 @@ "Platform": "Intel® Core™ i3-8100 CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -71,7 +71,7 @@ "Platform": "Intel® Core™ i5-10500TE CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -105,7 +105,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -139,7 +139,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -173,7 +173,7 @@ "Platform": "Intel® Core™ i5-13600K CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -207,7 +207,7 @@ "Platform": "Intel® Core™ i5-8500 CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -241,7 +241,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -275,7 +275,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -309,7 +309,7 @@ "Platform": "Intel® Core™ i7-12700H CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -343,7 +343,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -377,7 +377,7 @@ "Platform": "Intel® Core™ i7-1360P CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -411,7 +411,7 @@ "Platform": "Intel® Core™ i7-8700T CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -445,7 +445,7 @@ "Platform": "Intel® Core™ i9-10900TE CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -479,7 +479,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -513,7 +513,7 @@ "Platform": "Intel® Xeon® W1290P CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -547,7 +547,7 @@ "Platform": "Intel® Xeon® E-2124G CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -581,7 +581,7 @@ "Platform": "Intel® Xeon® Gold 5218T CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -615,7 +615,7 @@ "Platform": "Intel® Xeon® Platinum 8280 CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -649,7 +649,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -683,7 +683,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -717,7 +717,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -751,7 +751,7 @@ "Platform": "Intel® Xeon® Gold 6238L CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -785,7 +785,7 @@ "Platform": "Intel® Xeon® Silver 4316 CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -819,7 +819,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -853,7 +853,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -887,7 +887,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1023,7 +1023,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1057,7 +1057,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H NPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1091,7 +1091,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1125,7 +1125,7 @@ "Platform": "Intel® Celeron® 6305E iGPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1159,7 +1159,7 @@ "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1193,7 +1193,7 @@ "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1227,7 +1227,7 @@ "Platform": "Intel® Core™ i7-1185G7 iGPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1261,7 +1261,7 @@ "Platform": "Intel® Core™ i7-1185GRE iGPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1295,7 +1295,7 @@ "Platform": "Intel® Core™ i7-12700H iGPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1329,7 +1329,7 @@ "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1363,7 +1363,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1397,7 +1397,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1431,7 +1431,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1465,7 +1465,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1499,7 +1499,7 @@ "Platform": "Intel® Celeron® 6305E CPU+iGPU", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -1533,7 +1533,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1567,7 +1567,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1601,7 +1601,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1635,7 +1635,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1669,7 +1669,7 @@ "Platform": "Intel® Core™ i7-12700H CPU+iGPU", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1703,7 +1703,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1737,7 +1737,7 @@ "Platform": "Intel® Core™ i7-1360P CPU+iGPU", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1771,7 +1771,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1805,7 +1805,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU+iGPU", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1839,7 +1839,7 @@ "Platform": "Intel® Celeron® 6305E CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1873,7 +1873,7 @@ "Platform": "Intel® Core™ i3-8100 CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1907,7 +1907,7 @@ "Platform": "Intel® Core™ i5-10500TE CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1941,7 +1941,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -1975,7 +1975,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2009,7 +2009,7 @@ "Platform": "Intel® Core™ i5-13600K CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2043,7 +2043,7 @@ "Platform": "Intel® Core™ i5-8500 CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2077,7 +2077,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2111,7 +2111,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2145,7 +2145,7 @@ "Platform": "Intel® Core™ i7-12700H CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2179,7 +2179,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2213,7 +2213,7 @@ "Platform": "Intel® Core™ i7-1360P CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2247,7 +2247,7 @@ "Platform": "Intel® Core™ i7-8700T CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2281,7 +2281,7 @@ "Platform": "Intel® Core™ i9-10900TE CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2315,7 +2315,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2349,7 +2349,7 @@ "Platform": "Intel® Xeon® W1290P CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2383,7 +2383,7 @@ "Platform": "Intel® Xeon® E-2124G CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2417,7 +2417,7 @@ "Platform": "Intel® Xeon® Gold 5218T CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2451,7 +2451,7 @@ "Platform": "Intel® Xeon® Platinum 8280 CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2485,7 +2485,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2519,7 +2519,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2553,7 +2553,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2587,7 +2587,7 @@ "Platform": "Intel® Xeon® Gold 6238L CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2621,7 +2621,7 @@ "Platform": "Intel® Xeon® Silver 4316 CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2655,7 +2655,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2689,7 +2689,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2723,7 +2723,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2859,7 +2859,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2893,7 +2893,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H NPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2927,7 +2927,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2961,7 +2961,7 @@ "Platform": "Intel® Atom® x7425E iGPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -2995,7 +2995,7 @@ "Platform": "Intel® Atom® X6425E iGPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3029,7 +3029,7 @@ "Platform": "Intel® Celeron® 6305E iGPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3063,7 +3063,7 @@ "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3097,7 +3097,7 @@ "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3131,7 +3131,7 @@ "Platform": "Intel® Core™ i7-1185G7 iGPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3165,7 +3165,7 @@ "Platform": "Intel® Core™ i7-1185GRE iGPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3199,7 +3199,7 @@ "Platform": "Intel® Core™ i7-12700H iGPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3233,7 +3233,7 @@ "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3267,7 +3267,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3301,7 +3301,7 @@ "Platform": "Intel® Processor N100 iGPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3335,7 +3335,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3369,7 +3369,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3403,7 +3403,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3437,7 +3437,7 @@ "Platform": "Intel® Celeron® 6305E CPU+iGPU", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -3471,7 +3471,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3505,7 +3505,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3539,7 +3539,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3573,7 +3573,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3607,7 +3607,7 @@ "Platform": "Intel® Core™ i7-12700H CPU+iGPU", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3641,7 +3641,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3675,7 +3675,7 @@ "Platform": "Intel® Core™ i7-1360P CPU+iGPU", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3709,7 +3709,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3743,7 +3743,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU+iGPU", "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3777,7 +3777,7 @@ "Platform": "Intel® Atom® x7425E CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3811,7 +3811,7 @@ "Platform": "Intel® Atom® X6425E CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3845,7 +3845,7 @@ "Platform": "Intel® Celeron® 6305E CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3879,7 +3879,7 @@ "Platform": "Intel® Core™ i3-8100 CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3913,7 +3913,7 @@ "Platform": "Intel® Core™ i5-10500TE CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3947,7 +3947,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -3981,7 +3981,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4015,7 +4015,7 @@ "Platform": "Intel® Core™ i5-13600K CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4049,7 +4049,7 @@ "Platform": "Intel® Core™ i5-8500 CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4083,7 +4083,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4117,7 +4117,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4151,7 +4151,7 @@ "Platform": "Intel® Core™ i7-12700H CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4185,7 +4185,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4219,7 +4219,7 @@ "Platform": "Intel® Core™ i7-1360P CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4253,7 +4253,7 @@ "Platform": "Intel® Core™ i7-8700T CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4287,7 +4287,7 @@ "Platform": "Intel® Core™ i9-10900TE CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4321,7 +4321,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4355,7 +4355,7 @@ "Platform": "Intel® Processor N100 CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4389,7 +4389,7 @@ "Platform": "Intel® Xeon® W1290P CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4423,7 +4423,7 @@ "Platform": "Intel® Xeon® E-2124G CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4457,7 +4457,7 @@ "Platform": "Intel® Xeon® Gold 5218T CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4491,7 +4491,7 @@ "Platform": "Intel® Xeon® Platinum 8280 CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4525,7 +4525,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4559,7 +4559,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4593,7 +4593,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4627,7 +4627,7 @@ "Platform": "Intel® Xeon® Gold 6238L CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4661,7 +4661,7 @@ "Platform": "Intel® Xeon® Silver 4316 CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4695,7 +4695,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4729,7 +4729,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4763,7 +4763,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4899,7 +4899,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4933,7 +4933,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H NPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -4967,7 +4967,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5001,7 +5001,7 @@ "Platform": "Intel® Atom® x7425E iGPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5035,7 +5035,7 @@ "Platform": "Intel® Atom® X6425E iGPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5069,7 +5069,7 @@ "Platform": "Intel® Celeron® 6305E iGPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5103,7 +5103,7 @@ "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5137,7 +5137,7 @@ "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5171,7 +5171,7 @@ "Platform": "Intel® Core™ i7-1185G7 iGPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5205,7 +5205,7 @@ "Platform": "Intel® Core™ i7-1185GRE iGPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5239,7 +5239,7 @@ "Platform": "Intel® Core™ i7-12700H iGPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5273,7 +5273,7 @@ "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5307,7 +5307,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5341,7 +5341,7 @@ "Platform": "Intel® Processor N100 iGPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5375,7 +5375,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5409,7 +5409,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5443,7 +5443,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5477,7 +5477,7 @@ "Platform": "Intel® Atom® x7425E CPU+iGPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -5511,7 +5511,7 @@ "Platform": "Intel® Atom® X6425E CPU+iGPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -5545,7 +5545,7 @@ "Platform": "Intel® Celeron® 6305E CPU+iGPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -5579,7 +5579,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5613,7 +5613,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5647,7 +5647,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5681,7 +5681,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5715,7 +5715,7 @@ "Platform": "Intel® Core™ i7-12700H CPU+iGPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5749,7 +5749,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5783,7 +5783,7 @@ "Platform": "Intel® Core™ i7-1360P CPU+iGPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5817,7 +5817,7 @@ "Platform": "Intel® Processor N100 CPU+iGPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -5851,7 +5851,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5885,7 +5885,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU+iGPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5919,7 +5919,7 @@ "Platform": "Intel® Celeron® 6305E CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5953,7 +5953,7 @@ "Platform": "Intel® Core™ i3-8100 CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -5987,7 +5987,7 @@ "Platform": "Intel® Core™ i5-10500TE CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6021,7 +6021,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6055,7 +6055,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6089,7 +6089,7 @@ "Platform": "Intel® Core™ i5-13600K CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6123,7 +6123,7 @@ "Platform": "Intel® Core™ i5-8500 CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6157,7 +6157,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6191,7 +6191,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6225,7 +6225,7 @@ "Platform": "Intel® Core™ i7-12700H CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6259,7 +6259,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6293,7 +6293,7 @@ "Platform": "Intel® Core™ i7-1360P CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6327,7 +6327,7 @@ "Platform": "Intel® Core™ i7-8700T CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6361,7 +6361,7 @@ "Platform": "Intel® Core™ i9-10900TE CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6395,7 +6395,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6429,7 +6429,7 @@ "Platform": "Intel® Xeon® W1290P CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6463,7 +6463,7 @@ "Platform": "Intel® Xeon® E-2124G CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6497,7 +6497,7 @@ "Platform": "Intel® Xeon® Gold 5218T CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6531,7 +6531,7 @@ "Platform": "Intel® Xeon® Platinum 8280 CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6565,7 +6565,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6599,7 +6599,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6633,7 +6633,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6667,7 +6667,7 @@ "Platform": "Intel® Xeon® Gold 6238L CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6701,7 +6701,7 @@ "Platform": "Intel® Xeon® Silver 4316 CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6735,7 +6735,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6769,7 +6769,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6803,7 +6803,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6939,7 +6939,7 @@ "Platform": "Intel® Celeron® 6305E iGPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -6973,7 +6973,7 @@ "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7007,7 +7007,7 @@ "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7041,7 +7041,7 @@ "Platform": "Intel® Core™ i7-1185G7 iGPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7075,7 +7075,7 @@ "Platform": "Intel® Core™ i7-1185GRE iGPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7109,7 +7109,7 @@ "Platform": "Intel® Core™ i7-12700H iGPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7143,7 +7143,7 @@ "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7177,7 +7177,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7211,7 +7211,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7245,7 +7245,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7279,7 +7279,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7313,7 +7313,7 @@ "Platform": "Intel® Celeron® 6305E CPU+iGPU", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -7347,7 +7347,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7381,7 +7381,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7415,7 +7415,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7449,7 +7449,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7483,7 +7483,7 @@ "Platform": "Intel® Core™ i7-12700H CPU+iGPU", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7517,7 +7517,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7551,7 +7551,7 @@ "Platform": "Intel® Core™ i7-1360P CPU+iGPU", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7585,7 +7585,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7619,7 +7619,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU+iGPU", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7653,7 +7653,7 @@ "Platform": "Intel® Atom® x7425E CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7687,7 +7687,7 @@ "Platform": "Intel® Atom® X6425E CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7721,7 +7721,7 @@ "Platform": "Intel® Celeron® 6305E CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7755,7 +7755,7 @@ "Platform": "Intel® Core™ i3-8100 CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7789,7 +7789,7 @@ "Platform": "Intel® Core™ i5-10500TE CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7823,7 +7823,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7857,7 +7857,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7891,7 +7891,7 @@ "Platform": "Intel® Core™ i5-13600K CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7925,7 +7925,7 @@ "Platform": "Intel® Core™ i5-8500 CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7959,7 +7959,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -7993,7 +7993,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8027,7 +8027,7 @@ "Platform": "Intel® Core™ i7-12700H CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8061,7 +8061,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8095,7 +8095,7 @@ "Platform": "Intel® Core™ i7-1360P CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8129,7 +8129,7 @@ "Platform": "Intel® Core™ i7-8700T CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8163,7 +8163,7 @@ "Platform": "Intel® Core™ i9-10900TE CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8197,7 +8197,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8231,7 +8231,7 @@ "Platform": "Intel® Processor N100 CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8265,7 +8265,7 @@ "Platform": "Intel® Xeon® W1290P CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8299,7 +8299,7 @@ "Platform": "Intel® Xeon® E-2124G CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8333,7 +8333,7 @@ "Platform": "Intel® Xeon® Gold 5218T CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8367,7 +8367,7 @@ "Platform": "Intel® Xeon® Platinum 8280 CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8401,7 +8401,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8435,7 +8435,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8469,7 +8469,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8503,7 +8503,7 @@ "Platform": "Intel® Xeon® Gold 6238L CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8537,7 +8537,7 @@ "Platform": "Intel® Xeon® Silver 4316 CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8571,7 +8571,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8605,7 +8605,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8639,7 +8639,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8775,7 +8775,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8809,7 +8809,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H NPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8843,7 +8843,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8877,7 +8877,7 @@ "Platform": "Intel® Atom® x7425E iGPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8911,7 +8911,7 @@ "Platform": "Intel® Atom® X6425E iGPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8945,7 +8945,7 @@ "Platform": "Intel® Celeron® 6305E iGPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -8979,7 +8979,7 @@ "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9013,7 +9013,7 @@ "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9047,7 +9047,7 @@ "Platform": "Intel® Core™ i7-1185G7 iGPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9081,7 +9081,7 @@ "Platform": "Intel® Core™ i7-1185GRE iGPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9115,7 +9115,7 @@ "Platform": "Intel® Core™ i7-12700H iGPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9149,7 +9149,7 @@ "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9183,7 +9183,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9217,7 +9217,7 @@ "Platform": "Intel® Processor N100 iGPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9251,7 +9251,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9285,7 +9285,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9319,7 +9319,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9353,7 +9353,7 @@ "Platform": "Intel® Atom® x7425E CPU+iGPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -9387,7 +9387,7 @@ "Platform": "Intel® Atom® X6425E CPU+iGPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -9421,7 +9421,7 @@ "Platform": "Intel® Celeron® 6305E CPU+iGPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -9455,7 +9455,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9489,7 +9489,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9523,7 +9523,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9557,7 +9557,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9591,7 +9591,7 @@ "Platform": "Intel® Core™ i7-12700H CPU+iGPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9625,7 +9625,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9659,7 +9659,7 @@ "Platform": "Intel® Core™ i7-1360P CPU+iGPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9693,7 +9693,7 @@ "Platform": "Intel® Processor N100 CPU+iGPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -9727,7 +9727,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9761,7 +9761,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU+iGPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9795,7 +9795,7 @@ "Platform": "Intel® Atom® x7425E CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9829,7 +9829,7 @@ "Platform": "Intel® Atom® X6425E CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9863,7 +9863,7 @@ "Platform": "Intel® Celeron® 6305E CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9897,7 +9897,7 @@ "Platform": "Intel® Core™ i3-8100 CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9931,7 +9931,7 @@ "Platform": "Intel® Core™ i5-10500TE CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9965,7 +9965,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -9999,7 +9999,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10033,7 +10033,7 @@ "Platform": "Intel® Core™ i5-13600K CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10067,7 +10067,7 @@ "Platform": "Intel® Core™ i5-8500 CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10101,7 +10101,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10135,7 +10135,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10169,7 +10169,7 @@ "Platform": "Intel® Core™ i7-12700H CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10203,7 +10203,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10237,7 +10237,7 @@ "Platform": "Intel® Core™ i7-1360P CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10271,7 +10271,7 @@ "Platform": "Intel® Core™ i7-8700T CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10305,7 +10305,7 @@ "Platform": "Intel® Core™ i9-10900TE CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10339,7 +10339,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10373,7 +10373,7 @@ "Platform": "Intel® Processor N100 CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10407,7 +10407,7 @@ "Platform": "Intel® Xeon® W1290P CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10441,7 +10441,7 @@ "Platform": "Intel® Xeon® E-2124G CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10475,7 +10475,7 @@ "Platform": "Intel® Xeon® Gold 5218T CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10509,7 +10509,7 @@ "Platform": "Intel® Xeon® Platinum 8280 CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10543,7 +10543,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10577,7 +10577,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10611,7 +10611,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10645,7 +10645,7 @@ "Platform": "Intel® Xeon® Gold 6238L CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10679,7 +10679,7 @@ "Platform": "Intel® Xeon® Silver 4316 CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10713,7 +10713,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10747,7 +10747,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10781,7 +10781,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10917,7 +10917,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10951,7 +10951,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H NPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -10985,7 +10985,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11019,7 +11019,7 @@ "Platform": "Intel® Atom® x7425E iGPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11053,7 +11053,7 @@ "Platform": "Intel® Atom® X6425E iGPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11087,7 +11087,7 @@ "Platform": "Intel® Celeron® 6305E iGPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11121,7 +11121,7 @@ "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11155,7 +11155,7 @@ "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11189,7 +11189,7 @@ "Platform": "Intel® Core™ i7-1185G7 iGPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11223,7 +11223,7 @@ "Platform": "Intel® Core™ i7-1185GRE iGPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11257,7 +11257,7 @@ "Platform": "Intel® Core™ i7-12700H iGPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11291,7 +11291,7 @@ "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11325,7 +11325,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11359,7 +11359,7 @@ "Platform": "Intel® Processor N100 iGPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11393,7 +11393,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11427,7 +11427,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11461,7 +11461,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11495,7 +11495,7 @@ "Platform": "Intel® Atom® x7425E CPU+iGPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -11529,7 +11529,7 @@ "Platform": "Intel® Atom® X6425E CPU+iGPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -11563,7 +11563,7 @@ "Platform": "Intel® Celeron® 6305E CPU+iGPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -11597,7 +11597,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11631,7 +11631,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11665,7 +11665,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11699,7 +11699,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11733,7 +11733,7 @@ "Platform": "Intel® Core™ i7-12700H CPU+iGPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11767,7 +11767,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11801,7 +11801,7 @@ "Platform": "Intel® Core™ i7-1360P CPU+iGPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11835,7 +11835,7 @@ "Platform": "Intel® Processor N100 CPU+iGPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -11869,7 +11869,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11903,7 +11903,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU+iGPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11937,7 +11937,7 @@ "Platform": "Intel® Atom® x7425E CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -11971,7 +11971,7 @@ "Platform": "Intel® Atom® X6425E CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12005,7 +12005,7 @@ "Platform": "Intel® Celeron® 6305E CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12039,7 +12039,7 @@ "Platform": "Intel® Core™ i3-8100 CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12073,7 +12073,7 @@ "Platform": "Intel® Core™ i5-10500TE CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12107,7 +12107,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12141,7 +12141,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12175,7 +12175,7 @@ "Platform": "Intel® Core™ i5-13600K CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12209,7 +12209,7 @@ "Platform": "Intel® Core™ i5-8500 CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12243,7 +12243,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12277,7 +12277,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12311,7 +12311,7 @@ "Platform": "Intel® Core™ i7-12700H CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12345,7 +12345,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12379,7 +12379,7 @@ "Platform": "Intel® Core™ i7-1360P CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12413,7 +12413,7 @@ "Platform": "Intel® Core™ i7-8700T CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12447,7 +12447,7 @@ "Platform": "Intel® Core™ i9-10900TE CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12481,7 +12481,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12515,7 +12515,7 @@ "Platform": "Intel® Processor N100 CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12549,7 +12549,7 @@ "Platform": "Intel® Xeon® W1290P CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12583,7 +12583,7 @@ "Platform": "Intel® Xeon® E-2124G CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12617,7 +12617,7 @@ "Platform": "Intel® Xeon® Gold 5218T CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12651,7 +12651,7 @@ "Platform": "Intel® Xeon® Platinum 8280 CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12685,7 +12685,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12719,7 +12719,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12753,7 +12753,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12787,7 +12787,7 @@ "Platform": "Intel® Xeon® Gold 6238L CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12821,7 +12821,7 @@ "Platform": "Intel® Xeon® Silver 4316 CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12855,7 +12855,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12889,7 +12889,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -12923,7 +12923,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13059,7 +13059,7 @@ "Platform": "Intel® Atom® x7425E iGPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13093,7 +13093,7 @@ "Platform": "Intel® Atom® X6425E iGPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13127,7 +13127,7 @@ "Platform": "Intel® Celeron® 6305E iGPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13161,7 +13161,7 @@ "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13195,7 +13195,7 @@ "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13229,7 +13229,7 @@ "Platform": "Intel® Core™ i7-1185G7 iGPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13263,7 +13263,7 @@ "Platform": "Intel® Core™ i7-1185GRE iGPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13297,7 +13297,7 @@ "Platform": "Intel® Core™ i7-12700H iGPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13331,7 +13331,7 @@ "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13365,7 +13365,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13399,7 +13399,7 @@ "Platform": "Intel® Processor N100 iGPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13433,7 +13433,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13467,7 +13467,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13501,7 +13501,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13535,7 +13535,7 @@ "Platform": "Intel® Atom® x7425E CPU+iGPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -13569,7 +13569,7 @@ "Platform": "Intel® Atom® X6425E CPU+iGPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -13603,7 +13603,7 @@ "Platform": "Intel® Celeron® 6305E CPU+iGPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -13637,7 +13637,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13671,7 +13671,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13705,7 +13705,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13739,7 +13739,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13773,7 +13773,7 @@ "Platform": "Intel® Core™ i7-12700H CPU+iGPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13807,7 +13807,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13841,7 +13841,7 @@ "Platform": "Intel® Core™ i7-1360P CPU+iGPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13875,7 +13875,7 @@ "Platform": "Intel® Processor N100 CPU+iGPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -13909,7 +13909,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13943,7 +13943,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU+iGPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -13977,7 +13977,7 @@ "Platform": "Intel® Atom® x7425E CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14011,7 +14011,7 @@ "Platform": "Intel® Atom® X6425E CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14045,7 +14045,7 @@ "Platform": "Intel® Celeron® 6305E CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14079,7 +14079,7 @@ "Platform": "Intel® Core™ i3-8100 CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14113,7 +14113,7 @@ "Platform": "Intel® Core™ i5-10500TE CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14147,7 +14147,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14181,7 +14181,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14215,7 +14215,7 @@ "Platform": "Intel® Core™ i5-13600K CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14249,7 +14249,7 @@ "Platform": "Intel® Core™ i5-8500 CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14283,7 +14283,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14317,7 +14317,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14351,7 +14351,7 @@ "Platform": "Intel® Core™ i7-12700H CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14385,7 +14385,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14419,7 +14419,7 @@ "Platform": "Intel® Core™ i7-1360P CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14453,7 +14453,7 @@ "Platform": "Intel® Core™ i7-8700T CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14487,7 +14487,7 @@ "Platform": "Intel® Core™ i9-10900TE CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14521,7 +14521,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14555,7 +14555,7 @@ "Platform": "Intel® Processor N100 CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14589,7 +14589,7 @@ "Platform": "Intel® Xeon® W1290P CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14623,7 +14623,7 @@ "Platform": "Intel® Xeon® E-2124G CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14657,7 +14657,7 @@ "Platform": "Intel® Xeon® Gold 5218T CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14691,7 +14691,7 @@ "Platform": "Intel® Xeon® Platinum 8280 CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14725,7 +14725,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14759,7 +14759,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14793,7 +14793,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14827,7 +14827,7 @@ "Platform": "Intel® Xeon® Gold 6238L CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14861,7 +14861,7 @@ "Platform": "Intel® Xeon® Silver 4316 CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14895,7 +14895,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14929,7 +14929,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -14963,7 +14963,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15099,7 +15099,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15133,7 +15133,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H NPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15167,7 +15167,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15201,7 +15201,7 @@ "Platform": "Intel® Atom® x7425E iGPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15235,7 +15235,7 @@ "Platform": "Intel® Atom® X6425E iGPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15269,7 +15269,7 @@ "Platform": "Intel® Celeron® 6305E iGPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15303,7 +15303,7 @@ "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15337,7 +15337,7 @@ "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15371,7 +15371,7 @@ "Platform": "Intel® Core™ i7-1185G7 iGPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15405,7 +15405,7 @@ "Platform": "Intel® Core™ i7-1185GRE iGPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15439,7 +15439,7 @@ "Platform": "Intel® Core™ i7-12700H iGPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15473,7 +15473,7 @@ "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15507,7 +15507,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15541,7 +15541,7 @@ "Platform": "Intel® Processor N100 iGPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15575,7 +15575,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15609,7 +15609,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15643,7 +15643,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15677,7 +15677,7 @@ "Platform": "Intel® Atom® x7425E CPU+iGPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -15711,7 +15711,7 @@ "Platform": "Intel® Atom® X6425E CPU+iGPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -15745,7 +15745,7 @@ "Platform": "Intel® Celeron® 6305E CPU+iGPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -15779,7 +15779,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15813,7 +15813,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15847,7 +15847,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15881,7 +15881,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15915,7 +15915,7 @@ "Platform": "Intel® Core™ i7-12700H CPU+iGPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15949,7 +15949,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -15983,7 +15983,7 @@ "Platform": "Intel® Core™ i7-1360P CPU+iGPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16017,7 +16017,7 @@ "Platform": "Intel® Processor N100 CPU+iGPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -16051,7 +16051,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16085,7 +16085,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU+iGPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16119,7 +16119,7 @@ "Platform": "Intel® Celeron® 6305E CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16153,7 +16153,7 @@ "Platform": "Intel® Core™ i3-8100 CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16187,7 +16187,7 @@ "Platform": "Intel® Core™ i5-10500TE CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16221,7 +16221,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16255,7 +16255,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16289,7 +16289,7 @@ "Platform": "Intel® Core™ i5-13600K CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16323,7 +16323,7 @@ "Platform": "Intel® Core™ i5-8500 CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16357,7 +16357,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16391,7 +16391,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16425,7 +16425,7 @@ "Platform": "Intel® Core™ i7-12700H CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16459,7 +16459,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16493,7 +16493,7 @@ "Platform": "Intel® Core™ i7-1360P CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16527,7 +16527,7 @@ "Platform": "Intel® Core™ i7-8700T CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16561,7 +16561,7 @@ "Platform": "Intel® Core™ i9-10900TE CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16595,7 +16595,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16629,7 +16629,7 @@ "Platform": "Intel® Xeon® W1290P CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16663,7 +16663,7 @@ "Platform": "Intel® Xeon® E-2124G CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16697,7 +16697,7 @@ "Platform": "Intel® Xeon® Gold 5218T CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16731,7 +16731,7 @@ "Platform": "Intel® Xeon® Platinum 8280 CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16765,7 +16765,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16799,7 +16799,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16833,7 +16833,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16867,7 +16867,7 @@ "Platform": "Intel® Xeon® Gold 6238L CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16901,7 +16901,7 @@ "Platform": "Intel® Xeon® Silver 4316 CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16935,7 +16935,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -16969,7 +16969,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17003,7 +17003,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17139,7 +17139,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17173,7 +17173,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H NPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17207,7 +17207,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17241,7 +17241,7 @@ "Platform": "Intel® Atom® x7425E iGPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17275,7 +17275,7 @@ "Platform": "Intel® Atom® X6425E iGPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17309,7 +17309,7 @@ "Platform": "Intel® Celeron® 6305E iGPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17343,7 +17343,7 @@ "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17377,7 +17377,7 @@ "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17411,7 +17411,7 @@ "Platform": "Intel® Core™ i7-1185G7 iGPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17445,7 +17445,7 @@ "Platform": "Intel® Core™ i7-1185GRE iGPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17479,7 +17479,7 @@ "Platform": "Intel® Core™ i7-12700H iGPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17513,7 +17513,7 @@ "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17547,7 +17547,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17581,7 +17581,7 @@ "Platform": "Intel® Processor N100 iGPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17615,7 +17615,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17649,7 +17649,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17683,7 +17683,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17717,7 +17717,7 @@ "Platform": "Intel® Celeron® 6305E CPU+iGPU", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -17751,7 +17751,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17785,7 +17785,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17819,7 +17819,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17853,7 +17853,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17887,7 +17887,7 @@ "Platform": "Intel® Core™ i7-12700H CPU+iGPU", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17921,7 +17921,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17955,7 +17955,7 @@ "Platform": "Intel® Core™ i7-1360P CPU+iGPU", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -17989,7 +17989,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18023,7 +18023,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU+iGPU", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18057,7 +18057,7 @@ "Platform": "Intel® Atom® x7425E CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18091,7 +18091,7 @@ "Platform": "Intel® Atom® X6425E CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18125,7 +18125,7 @@ "Platform": "Intel® Celeron® 6305E CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18159,7 +18159,7 @@ "Platform": "Intel® Core™ i3-8100 CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18193,7 +18193,7 @@ "Platform": "Intel® Core™ i5-10500TE CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18227,7 +18227,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18261,7 +18261,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18295,7 +18295,7 @@ "Platform": "Intel® Core™ i5-13600K CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18329,7 +18329,7 @@ "Platform": "Intel® Core™ i5-8500 CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18363,7 +18363,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18397,7 +18397,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18431,7 +18431,7 @@ "Platform": "Intel® Core™ i7-12700H CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18465,7 +18465,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18499,7 +18499,7 @@ "Platform": "Intel® Core™ i7-1360P CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18533,7 +18533,7 @@ "Platform": "Intel® Core™ i7-8700T CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18567,7 +18567,7 @@ "Platform": "Intel® Core™ i9-10900TE CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18601,7 +18601,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18635,7 +18635,7 @@ "Platform": "Intel® Processor N100 CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18669,7 +18669,7 @@ "Platform": "Intel® Xeon® W1290P CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18703,7 +18703,7 @@ "Platform": "Intel® Xeon® E-2124G CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18737,7 +18737,7 @@ "Platform": "Intel® Xeon® Gold 5218T CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18771,7 +18771,7 @@ "Platform": "Intel® Xeon® Platinum 8280 CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18805,7 +18805,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18839,7 +18839,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18873,7 +18873,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18907,7 +18907,7 @@ "Platform": "Intel® Xeon® Gold 6238L CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18941,7 +18941,7 @@ "Platform": "Intel® Xeon® Silver 4316 CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -18975,7 +18975,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19009,7 +19009,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19145,7 +19145,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19179,7 +19179,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H NPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19213,7 +19213,7 @@ "Platform": "Intel® Atom® x7425E iGPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19247,7 +19247,7 @@ "Platform": "Intel® Atom® X6425E iGPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19281,7 +19281,7 @@ "Platform": "Intel® Celeron® 6305E iGPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19315,7 +19315,7 @@ "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19349,7 +19349,7 @@ "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19383,7 +19383,7 @@ "Platform": "Intel® Core™ i7-1185G7 iGPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19417,7 +19417,7 @@ "Platform": "Intel® Core™ i7-1185GRE iGPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19451,7 +19451,7 @@ "Platform": "Intel® Core™ i7-12700H iGPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19485,7 +19485,7 @@ "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19519,7 +19519,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19553,7 +19553,7 @@ "Platform": "Intel® Processor N100 iGPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19587,7 +19587,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19621,7 +19621,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19655,7 +19655,7 @@ "Platform": "Intel® Atom® x7425E CPU+iGPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -19689,7 +19689,7 @@ "Platform": "Intel® Atom® X6425E CPU+iGPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -19723,7 +19723,7 @@ "Platform": "Intel® Celeron® 6305E CPU+iGPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -19757,7 +19757,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19791,7 +19791,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19825,7 +19825,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19859,7 +19859,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19893,7 +19893,7 @@ "Platform": "Intel® Core™ i7-12700H CPU+iGPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19927,7 +19927,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19961,7 +19961,7 @@ "Platform": "Intel® Core™ i7-1360P CPU+iGPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -19995,7 +19995,7 @@ "Platform": "Intel® Processor N100 CPU+iGPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -20029,7 +20029,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20063,7 +20063,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU+iGPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20097,7 +20097,7 @@ "Platform": "Intel® Atom® x7425E CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20131,7 +20131,7 @@ "Platform": "Intel® Atom® X6425E CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20165,7 +20165,7 @@ "Platform": "Intel® Celeron® 6305E CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20199,7 +20199,7 @@ "Platform": "Intel® Core™ i3-8100 CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20233,7 +20233,7 @@ "Platform": "Intel® Core™ i5-10500TE CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20267,7 +20267,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20301,7 +20301,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20335,7 +20335,7 @@ "Platform": "Intel® Core™ i5-13600K CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20369,7 +20369,7 @@ "Platform": "Intel® Core™ i5-8500 CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20403,7 +20403,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20437,7 +20437,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20471,7 +20471,7 @@ "Platform": "Intel® Core™ i7-12700H CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20505,7 +20505,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20539,7 +20539,7 @@ "Platform": "Intel® Core™ i7-1360P CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20573,7 +20573,7 @@ "Platform": "Intel® Core™ i7-8700T CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20607,7 +20607,7 @@ "Platform": "Intel® Core™ i9-10900TE CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20641,7 +20641,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20675,7 +20675,7 @@ "Platform": "Intel® Processor N100 CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20709,7 +20709,7 @@ "Platform": "Intel® Xeon® W1290P CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20743,7 +20743,7 @@ "Platform": "Intel® Xeon® E-2124G CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20777,7 +20777,7 @@ "Platform": "Intel® Xeon® Gold 5218T CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20811,7 +20811,7 @@ "Platform": "Intel® Xeon® Platinum 8280 CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20845,7 +20845,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20879,7 +20879,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20913,7 +20913,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20947,7 +20947,7 @@ "Platform": "Intel® Xeon® Gold 6238L CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -20981,7 +20981,7 @@ "Platform": "Intel® Xeon® Silver 4316 CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21015,7 +21015,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21049,7 +21049,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21083,7 +21083,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21219,7 +21219,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H NPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21253,7 +21253,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H NPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21287,7 +21287,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V NPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21321,7 +21321,7 @@ "Platform": "Intel® Atom® x7425E iGPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21355,7 +21355,7 @@ "Platform": "Intel® Atom® X6425E iGPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21389,7 +21389,7 @@ "Platform": "Intel® Celeron® 6305E iGPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21423,7 +21423,7 @@ "Platform": "Intel® Core™ i5-1235U Processor iGPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21457,7 +21457,7 @@ "Platform": "Intel® Core™ i5-1335U Processor iGPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21491,7 +21491,7 @@ "Platform": "Intel® Core™ i7-1185G7 iGPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21525,7 +21525,7 @@ "Platform": "Intel® Core™ i7-1185GRE iGPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21559,7 +21559,7 @@ "Platform": "Intel® Core™ i7-12700H iGPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21593,7 +21593,7 @@ "Platform": "Intel® Core™ i7-1355U Processor iGPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21627,7 +21627,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21661,7 +21661,7 @@ "Platform": "Intel® Processor N100 iGPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21695,7 +21695,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H iGPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21729,7 +21729,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21763,7 +21763,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21797,7 +21797,7 @@ "Platform": "Intel® Atom® x7425E CPU+iGPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -21831,7 +21831,7 @@ "Platform": "Intel® Atom® X6425E CPU+iGPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -21865,7 +21865,7 @@ "Platform": "Intel® Celeron® 6305E CPU+iGPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -21899,7 +21899,7 @@ "Platform": "Intel® Core™ i5-1235U Processor CPU+iGPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21933,7 +21933,7 @@ "Platform": "Intel® Core™ i5-1335U Processor CPU+iGPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -21967,7 +21967,7 @@ "Platform": "Intel® Core™ i7-1185G7 CPU+iGPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22001,7 +22001,7 @@ "Platform": "Intel® Core™ i7-1185GRE CPU+iGPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22035,7 +22035,7 @@ "Platform": "Intel® Core™ i7-12700H CPU+iGPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22069,7 +22069,7 @@ "Platform": "Intel® Core™ i7-1355U Processor CPU+iGPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22103,7 +22103,7 @@ "Platform": "Intel® Core™ i7-1360P CPU+iGPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22137,7 +22137,7 @@ "Platform": "Intel® Processor N100 CPU+iGPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Mobile Platforms (Intel® Atom™)", + "PlatformType": "Intel® Atom™, CPU+iGPU", "Parameters": { "throughput": { "Precisions": [ @@ -22171,7 +22171,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 155H CPU+iGPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22205,7 +22205,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU+iGPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22239,7 +22239,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "chatglm2-6b", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22273,7 +22273,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "chatglm2-6b", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22307,7 +22307,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "chatglm2-6b", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22341,7 +22341,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "chatglm2-6b", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22375,7 +22375,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "chatglm2-6b", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22511,7 +22511,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "chatglm2-6b", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22545,7 +22545,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "chatglm2-6b", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22579,7 +22579,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "chatglm2-6b", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22613,7 +22613,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "falcon-7b-instruct", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22647,7 +22647,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "falcon-7b-instruct", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22681,7 +22681,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "falcon-7b-instruct", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22715,7 +22715,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "falcon-7b-instruct", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22749,7 +22749,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "falcon-7b-instruct", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22885,7 +22885,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "falcon-7b-instruct", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22919,7 +22919,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "falcon-7b-instruct", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22953,7 +22953,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "falcon-7b-instruct", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -22987,7 +22987,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "llama-2-7b-chat", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23021,7 +23021,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "llama-2-7b-chat", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23055,7 +23055,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "llama-2-7b-chat", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23089,7 +23089,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "llama-2-7b-chat", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23123,7 +23123,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "llama-2-7b-chat", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23225,7 +23225,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "llama-2-7b-chat", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23259,7 +23259,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "llama-2-7b-chat", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23293,7 +23293,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "llama-2-7b-chat", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23327,7 +23327,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "llama-3-8b", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23361,7 +23361,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "llama-3-8b", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23395,7 +23395,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "llama-3-8b", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23429,7 +23429,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "llama-3-8b", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23463,7 +23463,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "llama-3-8b", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23599,7 +23599,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "llama-3-8b", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23633,7 +23633,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "llama-3-8b", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23667,7 +23667,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "llama-3-8b", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23701,7 +23701,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "mistral-7b-v0.1", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23735,7 +23735,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "mistral-7b-v0.1", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23769,7 +23769,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "mistral-7b-v0.1", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23803,7 +23803,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "mistral-7b-v0.1", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23837,7 +23837,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "mistral-7b-v0.1", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -23973,7 +23973,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "mistral-7b-v0.1", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24007,7 +24007,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "mistral-7b-v0.1", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24041,7 +24041,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "mistral-7b-v0.1", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24075,7 +24075,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "phi-3-mini-4k-instruct", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24109,7 +24109,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "phi-3-mini-4k-instruct", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24143,7 +24143,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "phi-3-mini-4k-instruct", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24177,7 +24177,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "phi-3-mini-4k-instruct", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24211,7 +24211,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H CPU-only", "Model": "phi-3-mini-4k-instruct", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24347,7 +24347,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "phi-3-mini-4k-instruct", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24381,7 +24381,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 165H iGPU-only", "Model": "phi-3-mini-4k-instruct", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24415,7 +24415,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "phi-3-mini-4k-instruct", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24449,7 +24449,7 @@ "Platform": "Intel® Core™ i9-13900K CPU-only", "Model": "stable-diffusion-v1-5", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24483,7 +24483,7 @@ "Platform": "Intel® Xeon® Platinum 8380 CPU-only", "Model": "stable-diffusion-v1-5", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24517,7 +24517,7 @@ "Platform": "Intel® Xeon® Platinum 8480+ CPU-only", "Model": "stable-diffusion-v1-5", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24551,7 +24551,7 @@ "Platform": "Intel® Xeon® Platinum 8580 CPU-only", "Model": "stable-diffusion-v1-5", "Checked": "true", - "PlatformType": "Server Platforms (Intel® Xeon®)", + "PlatformType": "Intel® Xeon®, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24653,7 +24653,7 @@ "Platform": "Intel® Core™ i7-1360P iGPU-only", "Model": "stable-diffusion-v1-5", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24687,7 +24687,7 @@ "Platform": "Intel® Core™ Ultra 9 processor 288V iGPU-only", "Model": "stable-diffusion-v1-5", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24721,7 +24721,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24753,9 +24753,9 @@ }, { "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", - "Model": "bert-large-uncased", + "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24789,7 +24789,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24823,7 +24823,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24857,7 +24857,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24891,7 +24891,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24925,7 +24925,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24959,7 +24959,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -24993,7 +24993,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25027,7 +25027,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25061,7 +25061,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU-only", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, CPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25095,7 +25095,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25127,9 +25127,9 @@ }, { "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", - "Model": "bert-large-uncased", + "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25163,7 +25163,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25197,7 +25197,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25231,7 +25231,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25265,7 +25265,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25299,7 +25299,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25333,7 +25333,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25367,7 +25367,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25401,7 +25401,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25435,7 +25435,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25469,7 +25469,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25501,9 +25501,9 @@ }, { "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", - "Model": "bert-large-uncased", + "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25537,7 +25537,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25571,7 +25571,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25605,7 +25605,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25639,7 +25639,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25673,7 +25673,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25707,7 +25707,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25741,7 +25741,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25775,7 +25775,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25809,7 +25809,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V CPU+iGPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25843,7 +25843,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V NPU", "Model": "bert-base-cased", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25875,9 +25875,9 @@ }, { "Platform": "Intel® Core™ Ultra 7 processor 268V NPU", - "Model": "bert-large-uncased", + "Model": "bert-large-uncased-whole-word-masking-squad-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25911,7 +25911,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V NPU", "Model": "efficientdet-d0", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25945,7 +25945,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V NPU", "Model": "mask_rcnn_resnet50_atrous_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -25979,7 +25979,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V NPU", "Model": "mobilenet-v2", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -26013,7 +26013,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V NPU", "Model": "resnet-50", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -26047,7 +26047,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V NPU", "Model": "ssd_mobilenet_v1_coco", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -26081,7 +26081,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V NPU", "Model": "ssd-resnet34-1200", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -26115,7 +26115,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V NPU", "Model": "unet-camvid-onnx-0001", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -26149,7 +26149,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V NPU", "Model": "yolo_v5m", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -26183,7 +26183,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V NPU", "Model": "yolo_v8n", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, NPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -26217,7 +26217,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "chatglm2-6b", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -26251,7 +26251,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "falcon-7b-instruct", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -26283,9 +26283,9 @@ }, { "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", - "Model": "llama-2-7b-chat-hf", + "Model": "llama-2-7b-chat", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -26319,7 +26319,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "llama-3-8b", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -26353,7 +26353,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "mistral-7b-v0.1", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -26387,7 +26387,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "phi-3-mini-4k-instruct", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ @@ -26421,7 +26421,7 @@ "Platform": "Intel® Core™ Ultra 7 processor 268V iGPU", "Model": "stable-diffusion-v1-5", "Checked": "true", - "PlatformType": "Client Platforms (Intel® Core™)", + "PlatformType": "Intel® Core™, iGPU-only", "Parameters": { "throughput": { "Precisions": [ diff --git a/docs/sphinx_setup/_static/benchmarks_files/graph-config.json b/docs/sphinx_setup/_static/benchmarks_files/graph-config.json index 6fb8d19e1a1adf..9cd8176af26235 100644 --- a/docs/sphinx_setup/_static/benchmarks_files/graph-config.json +++ b/docs/sphinx_setup/_static/benchmarks_files/graph-config.json @@ -80,7 +80,10 @@ "platformTypes": { "name": "ietype", "data": [ - "Client Platforms (Intel® Core™)" + "Intel® Core™, CPU-only", + "Intel® Core™, iGPU-only", + "Intel® Core™, NPU-only", + "Intel® Core™, CPU+iGPU" ] }, "platforms": { @@ -97,7 +100,10 @@ "models": { "name": "networkmodel", "data": [ - "bert-base-cased" + "bert-base-cased", + "resnet-50", + "yolo_v8n", + "llama-3-8B" ] }, "parameters": { diff --git a/docs/sphinx_setup/_static/download/supported_models.csv b/docs/sphinx_setup/_static/download/supported_models.csv index 1c7db3ab6bfc9d..87ea37b0f207c3 100644 --- a/docs/sphinx_setup/_static/download/supported_models.csv +++ b/docs/sphinx_setup/_static/download/supported_models.csv @@ -344,8 +344,7 @@ mask_rcnn_resnet101_atrous_coco,Instance Segmentation,tf,FP16-INT8,+,+, mask_rcnn_resnet50_atrous_coco,Instance Segmentation,tf,FP16,+,+, mask_rcnn_resnet50_atrous_coco,Instance Segmentation,tf,FP16-INT8,+,+, mask_rcnn_resnet50_atrous_coco,Instance Segmentation,tf,FP32,+,+, -MaskRCNN-12,Object Detection,onnx,FP16,,,+ -MaskRCNN-12,Object Detection,onnx,FP32,+,+,+ +MaskRCNN-12,Object Detection,onnx,FP32,+,+, mbart-large-50-many-to-one-mmt,Natural Language Processing,pytorch,intel-optimum default,,+, Meta-Llama-3-8B,Large Language Model,pytorch,intel-optimum default,,+, Meta-Llama-3-8B-Instruct,Large Language Model,pytorch,intel-optimum default,,+, diff --git a/docs/sphinx_setup/index.rst b/docs/sphinx_setup/index.rst index 47dc76bf16cf29..9d376877b51d08 100644 --- a/docs/sphinx_setup/index.rst +++ b/docs/sphinx_setup/index.rst @@ -124,7 +124,7 @@ Places to Begin Cloud-ready deployments for microservice applications. - .. button-link:: openvino-workflow/openvino-workflow/model-server/ovms_what_is_openvino_model_server.html + .. button-link:: openvino-workflow/model-server/ovms_what_is_openvino_model_server.html :color: primary :outline: From 704de6e74443f28b53f4ca755240fd425ab2830a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Krzemi=C5=84ski?= Date: Fri, 8 Nov 2024 12:03:59 +0100 Subject: [PATCH 028/182] [Opset] Opset16 initialization (#27458) ### Details: - Initialization of OV Opset16 ### Tickets: - 156876, Finalization 156877 --- docs/sphinx_setup/api/ie_python_api/api.rst | 6 ++++++ .../src/openvino/runtime/opset16/__init__.py | 8 ++++++++ .../python/src/openvino/runtime/opset16/ops.py | 12 ++++++++++++ .../test_transformations/test_pattern_ops.py | 2 +- src/core/include/openvino/opsets/opset.hpp | 5 +++++ src/core/include/openvino/opsets/opset16.hpp | 15 +++++++++++++++ src/core/include/openvino/opsets/opset16_tbl.hpp | 16 ++++++++++++++++ src/core/src/opsets/opset.cpp | 14 +++++++++++++- src/core/tests/op.cpp | 1 + src/core/tests/opset.cpp | 4 +++- .../include/openvino/frontend/extension/op.hpp | 2 +- src/plugins/template/src/plugin.cpp | 1 + .../src/op_impl_check/single_op_graph.cpp | 1 + 13 files changed, 83 insertions(+), 4 deletions(-) create mode 100644 src/bindings/python/src/openvino/runtime/opset16/__init__.py create mode 100644 src/bindings/python/src/openvino/runtime/opset16/ops.py create mode 100644 src/core/include/openvino/opsets/opset16.hpp create mode 100644 src/core/include/openvino/opsets/opset16_tbl.hpp diff --git a/docs/sphinx_setup/api/ie_python_api/api.rst b/docs/sphinx_setup/api/ie_python_api/api.rst index efbe25ee40bcde..6fc754da1246b7 100644 --- a/docs/sphinx_setup/api/ie_python_api/api.rst +++ b/docs/sphinx_setup/api/ie_python_api/api.rst @@ -119,6 +119,12 @@ OpenVINO Python API openvino.runtime.opset15 +.. autosummary:: + :toctree: _autosummary + :template: custom-module-template.rst + + openvino.runtime.opset16 + .. autosummary:: :toctree: _autosummary :template: custom-module-template.rst diff --git a/src/bindings/python/src/openvino/runtime/opset16/__init__.py b/src/bindings/python/src/openvino/runtime/opset16/__init__.py new file mode 100644 index 00000000000000..bf155b81312090 --- /dev/null +++ b/src/bindings/python/src/openvino/runtime/opset16/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# New operations added in Opset16 + +# Operators from previous opsets +# TODO (ticket: 156877): Add previous opset operators at the end of opset16 development diff --git a/src/bindings/python/src/openvino/runtime/opset16/ops.py b/src/bindings/python/src/openvino/runtime/opset16/ops.py new file mode 100644 index 00000000000000..0825ccb9d0f487 --- /dev/null +++ b/src/bindings/python/src/openvino/runtime/opset16/ops.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +"""Factory functions for ops added to openvino opset16.""" +from functools import partial + +from openvino.runtime.opset_utils import _get_node_factory + +_get_node_factory_opset16 = partial(_get_node_factory, "opset16") + +# -------------------------------------------- ops ------------------------------------------------ diff --git a/src/bindings/python/tests/test_transformations/test_pattern_ops.py b/src/bindings/python/tests/test_transformations/test_pattern_ops.py index 041bf83764e265..24b28061582c68 100644 --- a/src/bindings/python/tests/test_transformations/test_pattern_ops.py +++ b/src/bindings/python/tests/test_transformations/test_pattern_ops.py @@ -189,7 +189,7 @@ def test_pattern_optional_root(): def test_wrap_type_pattern_type(): - last_opset_number = 15 + last_opset_number = 16 for i in range(1, last_opset_number + 1): WrapType(f"opset{i}.Parameter") WrapType(f"opset{i}::Parameter") diff --git a/src/core/include/openvino/opsets/opset.hpp b/src/core/include/openvino/opsets/opset.hpp index 7ab8c43fcec6ab..524729793f0569 100644 --- a/src/core/include/openvino/opsets/opset.hpp +++ b/src/core/include/openvino/opsets/opset.hpp @@ -172,6 +172,11 @@ const OPENVINO_API OpSet& get_opset14(); * @ingroup ov_opset_cpp_api */ const OPENVINO_API OpSet& get_opset15(); +/** + * @brief Returns map of available opsets + * @ingroup ov_opset_cpp_api + */ +const OPENVINO_API OpSet& get_opset16(); /** * @brief Returns map of available opsets * @ingroup ov_opset_cpp_api diff --git a/src/core/include/openvino/opsets/opset16.hpp b/src/core/include/openvino/opsets/opset16.hpp new file mode 100644 index 00000000000000..0e2c6c5452f661 --- /dev/null +++ b/src/core/include/openvino/opsets/opset16.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/ops.hpp" + +namespace ov { +namespace opset16 { +#define _OPENVINO_OP_REG(a, b) using b::a; +#include "openvino/opsets/opset16_tbl.hpp" +#undef _OPENVINO_OP_REG +} // namespace opset16 +} // namespace ov diff --git a/src/core/include/openvino/opsets/opset16_tbl.hpp b/src/core/include/openvino/opsets/opset16_tbl.hpp new file mode 100644 index 00000000000000..82aa9629263e2a --- /dev/null +++ b/src/core/include/openvino/opsets/opset16_tbl.hpp @@ -0,0 +1,16 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#ifndef _OPENVINO_OP_REG +# warning "_OPENVINO_OP_REG not defined" +# define _OPENVINO_OP_REG(x, y) +#endif + +// Previous opsets operators +// TODO (ticket: 156877): Add remaining operators from opset15 at the end of opset16 development +_OPENVINO_OP_REG(Parameter, ov::op::v0) +_OPENVINO_OP_REG(Convert, ov::op::v0) +_OPENVINO_OP_REG(ShapeOf, ov::op::v3) + +// New operations added in opset16 diff --git a/src/core/src/opsets/opset.cpp b/src/core/src/opsets/opset.cpp index ca219f1c68ecd3..f2490010e9dc50 100644 --- a/src/core/src/opsets/opset.cpp +++ b/src/core/src/opsets/opset.cpp @@ -113,7 +113,8 @@ const std::map>& ov::get_availabl _OPENVINO_REG_OPSET(opset12), _OPENVINO_REG_OPSET(opset13), _OPENVINO_REG_OPSET(opset14), - _OPENVINO_REG_OPSET(opset15)}; + _OPENVINO_REG_OPSET(opset15), + _OPENVINO_REG_OPSET(opset16)}; #undef _OPENVINO_REG_OPSET return opset_map; } @@ -288,3 +289,14 @@ const ov::OpSet& ov::get_opset15() { }); return opset; } + +const ov::OpSet& ov::get_opset16() { + static OpSet opset; + static std::once_flag flag; + std::call_once(flag, [&]() { +#define _OPENVINO_OP_REG(NAME, NAMESPACE) opset.insert(); +#include "openvino/opsets/opset16_tbl.hpp" +#undef _OPENVINO_OP_REG + }); + return opset; +} diff --git a/src/core/tests/op.cpp b/src/core/tests/op.cpp index cf65eecc16cd4c..08fa7a97584e41 100644 --- a/src/core/tests/op.cpp +++ b/src/core/tests/op.cpp @@ -67,4 +67,5 @@ TEST(op, opset_multi_thread) { doTest(ov::get_opset13); doTest(ov::get_opset14); doTest(ov::get_opset15); + doTest(ov::get_opset16); } diff --git a/src/core/tests/opset.cpp b/src/core/tests/opset.cpp index 65c79dc9432439..d667de21263f5a 100644 --- a/src/core/tests/opset.cpp +++ b/src/core/tests/opset.cpp @@ -14,6 +14,7 @@ #include "openvino/opsets/opset13.hpp" #include "openvino/opsets/opset14.hpp" #include "openvino/opsets/opset15.hpp" +#include "openvino/opsets/opset16.hpp" #include "openvino/opsets/opset2.hpp" #include "openvino/opsets/opset3.hpp" #include "openvino/opsets/opset4.hpp" @@ -75,7 +76,8 @@ INSTANTIATE_TEST_SUITE_P(opset, OpsetTestParams{ov::get_opset12, 178}, OpsetTestParams{ov::get_opset13, 186}, OpsetTestParams{ov::get_opset14, 188}, - OpsetTestParams{ov::get_opset15, 199}), + OpsetTestParams{ov::get_opset15, 199}, + OpsetTestParams{ov::get_opset16, 3}), OpsetTestNameGenerator{}); class MyOpOld : public ov::op::Op { diff --git a/src/frontends/common/include/openvino/frontend/extension/op.hpp b/src/frontends/common/include/openvino/frontend/extension/op.hpp index 55fa919447406a..9022b9f801f800 100644 --- a/src/frontends/common/include/openvino/frontend/extension/op.hpp +++ b/src/frontends/common/include/openvino/frontend/extension/op.hpp @@ -25,7 +25,7 @@ inline const ov::OpSet& get_opset_by_name(const std::string& opset_name) { if (opsets.find(opset_name) != opsets.end()) return opsets.at(opset_name)(); if (opset_name.empty() || opset_name == "latest") { - return ov::get_opset15(); + return ov::get_opset15(); // TODO (ticket: 156877): Update to 16 at the end of opset16 development } else { FRONT_END_GENERAL_CHECK(false, "Unsupported opset name: ", opset_name); } diff --git a/src/plugins/template/src/plugin.cpp b/src/plugins/template/src/plugin.cpp index ee885f67e188b5..f66df99c7b1c43 100644 --- a/src/plugins/template/src/plugin.cpp +++ b/src/plugins/template/src/plugin.cpp @@ -221,6 +221,7 @@ ov::SupportedOpsMap ov::template_plugin::Plugin::query_model(const std::shared_p #include "openvino/opsets/opset13_tbl.hpp" #include "openvino/opsets/opset14_tbl.hpp" #include "openvino/opsets/opset15_tbl.hpp" +#include "openvino/opsets/opset16_tbl.hpp" // clang-format on #undef _OPENVINO_OP_REG return op_super_set.contains_type(node->get_type_info()); diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp index fb192e0ac40223..e311f532a2497b 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/single_op_graph.cpp @@ -2189,6 +2189,7 @@ OpGenerator getOpGeneratorMap() { #include "openvino/opsets/opset13_tbl.hpp" #include "openvino/opsets/opset14_tbl.hpp" #include "openvino/opsets/opset15_tbl.hpp" +#include "openvino/opsets/opset16_tbl.hpp" #undef _OPENVINO_OP_REG }; return opGeneratorMap; From 3826706b881ac46aade97fceda78cad5b3f33cc2 Mon Sep 17 00:00:00 2001 From: Andrzej Kopytko Date: Fri, 8 Nov 2024 13:16:56 +0100 Subject: [PATCH 029/182] Docs new benchmark grapf with two y axis (#27480) ### Details: - *item1* - *...* ### Tickets: - *ticket-id* --- .../about-openvino/performance-benchmarks.rst | 6 +- .../data/graph-data-ovms-genai.json | 96 ++ .../benchmarks_files/graph-config.json | 41 +- docs/sphinx_setup/_static/html/modalLLM.html | 95 ++ docs/sphinx_setup/_static/js/graphs.js | 179 +--- docs/sphinx_setup/_static/js/graphsLLM.js | 889 ++++++++++++++++++ docs/sphinx_setup/conf.py | 1 + 7 files changed, 1121 insertions(+), 186 deletions(-) create mode 100644 docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ovms-genai.json create mode 100644 docs/sphinx_setup/_static/html/modalLLM.html create mode 100644 docs/sphinx_setup/_static/js/graphsLLM.js diff --git a/docs/articles_en/about-openvino/performance-benchmarks.rst b/docs/articles_en/about-openvino/performance-benchmarks.rst index ed9d39aaf8b9e6..75d1882b8cee89 100644 --- a/docs/articles_en/about-openvino/performance-benchmarks.rst +++ b/docs/articles_en/about-openvino/performance-benchmarks.rst @@ -13,7 +13,7 @@ Performance Benchmarks Efficient LLMs for AI PC Performance Information F.A.Q. OpenVINO Accuracy - Getting Performance Numbers + Getting Performance Numbers This page presents benchmark results for the @@ -59,12 +59,12 @@ implemented in your solutions. Click the buttons below to see the chosen benchma .. grid-item:: .. button-link:: # - :class: ovms-toolkit-benchmark-llm + :class: ovms-toolkit-benchmark-llm-result :color: primary :outline: :expand: - :material-regular:`bar_chart;1.4em` OVMS for GenAI (coming soon) + :material-regular:`bar_chart;1.4em` OVMS for GenAI (incoming) diff --git a/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ovms-genai.json b/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ovms-genai.json new file mode 100644 index 00000000000000..f96fb11e6b029d --- /dev/null +++ b/docs/sphinx_setup/_static/benchmarks_files/data/graph-data-ovms-genai.json @@ -0,0 +1,96 @@ +[ + { + "Platform": "Intel® Xeon® Platinum 8580", + "Model": "mistralai/Mistral-7B-v0.1", + "PlatformType": "None", + "Parameters": { + "Vllm": { + "Precisions": [ + { + "Throughput": { + "0.2": "350.06", + "0.6": "486.89", + "0.8": "575.92", + "2.0": "778.07" + } + }, + { + "Latency": { + "0.2": "60.93", + "0.6": "91.63", + "0.8": "113.61", + "2.0": "240.25" + } + } + ] + }, + "Ovms": { + "Precisions": [ + { + "Throughput": { + "0.2": "90.98", + "0.6": "266.24", + "0.8": "351.63", + "2.0": "195.16" + } + }, + { + "Latency": { + "0.2": "54.9", + "0.6": "78.78", + "0.8": "95.78", + "2.0": "352.23" + } + } + ] + } + } + }, + { + "Platform": "Intel® Xeon® Platinum 8530", + "Model": "mistralai/Mistral-7B-v0.1", + "PlatformType": "None", + "Parameters": { + "Vllm": { + "Precisions": [ + { + "Throughput": { + "0.2": "350.06", + "0.6": "486.89", + "0.8": "575.92", + "2.0": "778.07" + } + }, + { + "Latency": { + "0.2": "60.93", + "0.6": "91.63", + "0.8": "113.61", + "2.0": "240.25" + } + } + ] + }, + "Ovms": { + "Precisions": [ + { + "Throughput": { + "0.2": "90.98", + "0.6": "266.24", + "0.8": "351.63", + "2.0": "195.16" + } + }, + { + "Latency": { + "0.2": "54.9", + "0.6": "78.78", + "0.8": "95.78", + "2.0": "352.23" + } + } + ] + } + } + } +] \ No newline at end of file diff --git a/docs/sphinx_setup/_static/benchmarks_files/graph-config.json b/docs/sphinx_setup/_static/benchmarks_files/graph-config.json index 9cd8176af26235..29fbe714094b74 100644 --- a/docs/sphinx_setup/_static/benchmarks_files/graph-config.json +++ b/docs/sphinx_setup/_static/benchmarks_files/graph-config.json @@ -1,4 +1,3 @@ - { "PrecisionsMap": { "INT4": "int4", @@ -9,7 +8,9 @@ "FP32_OV": "fp32_ov", "FP32_OVMS": "fp32_ovms", "INT8_OV": "int8_ov", - "INT8_OVMS": "int8_ovms" + "INT8_OVMS": "int8_ovms", + "THROUGHPUT": "Throughput", + "LATENCY": "Latency" }, "ParametersMap": { "Throughput": "throughput", @@ -71,6 +72,30 @@ "data": null, "color": "#00536a", "label": "INT8 OVMS" + }, + "Vllm": { + "Latency": { + "data": null, + "color": "#FDB2BC", + "label": "Latency" + }, + "Throughput": { + "data": null, + "color": "#91CDF7", + "label": "Throughput" + } + }, + "Ovms": { + "Latency": { + "data": null, + "color": "#CDB2BC", + "label": "Latency" + }, + "Throughput": { + "data": null, + "color": "#12CDF7", + "label": "Throughput" + } } }, "Filters": [ @@ -80,6 +105,7 @@ "platformTypes": { "name": "ietype", "data": [ + "None", "Intel® Core™, CPU-only", "Intel® Core™, iGPU-only", "Intel® Core™, NPU-only", @@ -88,8 +114,7 @@ }, "platforms": { "name": "platform", - "data": [ - ] + "data": [] }, "platformFilters": { "name": "coretype", @@ -109,15 +134,15 @@ "parameters": { "name": "parameters", "data": [ - "Throughput", - "Latency" + "Vllm", + "Ovms" ] }, "precision": { "name": "precision", "data": [ - "INT4", - "INT8" + "THROUGHPUT", + "LATENCY" ] } } diff --git a/docs/sphinx_setup/_static/html/modalLLM.html b/docs/sphinx_setup/_static/html/modalLLM.html new file mode 100644 index 00000000000000..e3395a16931188 --- /dev/null +++ b/docs/sphinx_setup/_static/html/modalLLM.html @@ -0,0 +1,95 @@ +
+ + + + + +
\ No newline at end of file diff --git a/docs/sphinx_setup/_static/js/graphs.js b/docs/sphinx_setup/_static/js/graphs.js index f8146c1580c58e..4d621ce0780261 100644 --- a/docs/sphinx_setup/_static/js/graphs.js +++ b/docs/sphinx_setup/_static/js/graphs.js @@ -1,173 +1,4 @@ -// =================== GENERAL OUTPUT CONFIG ========================= - -class Filter { - - // param: GraphData[], networkModels[] - static FilterByNetworkModel(graphDataArr, networkModels) { - const optionMap = new Map(); - networkModels.map((model) => graphDataArr.filter((graphData => graphData.Model === model))) - .flat(1) - .forEach(item => optionMap.set(item.Platform, item)); - return Array.from(optionMap.values()); - } - - // param: GraphData[], ieType - static ByIeTypes(graphDataArr, ieTypes) { - const optionMap = new Map(); - graphDataArr - .filter(graphData => ieTypes.includes(graphData.PlatformType)) - .forEach(item => optionMap.set(item.Platform, item)); - return Array.from(optionMap.values()); - } - - // param: GraphData[], ieType, networkModels - static ByTypesAndModels(graphDataArr, ieTypes, models) { - const optionMap = new Map(); - graphDataArr - .filter(graphData => ieTypes.includes(graphData.PlatformType)) - .filter(graphData => models.includes(graphData.Model)) - .forEach(item => optionMap.set(item.Platform, item)); - return Array.from(optionMap.values()); - } - - // param: GraphData[], clientPlatforms - static ByIeKpis(graphDataArr, clientPlatforms) { - var kpis = [] - clientPlatforms.forEach((platformName) => { - graphDataArr.filter((data) => { - if (data.Platform.includes(platformName)) { - for (var key in data.Parameters) { - if (!kpis.includes(key)) kpis.push(key) - } - } - }) - }) - return kpis; - } - - // param: GraphData[] - static getParameters(graphDataArr) { - var parameters = [] - graphDataArr.filter((data) => { - for (var key in data.Parameters) { - if (!parameters.includes(Graph.capitalizeFirstLetter(key))) parameters.push(Graph.capitalizeFirstLetter(key)) - } - }) - return parameters; - } - - // param: GraphData[] - static getIeTypes(graphDataArr) { - var kpis = [] - graphDataArr.filter((data) => { - for (var key in data.Parameters) { - if (!kpis.includes(Graph.capitalizeFirstLetter(key))) kpis.push(Graph.capitalizeFirstLetter(key)) - } - }) - return kpis; - } - - // param: GraphData[], clientPlatforms[] - static ByClientPlatforms(graphDataArr, platformsArr) { - return graphDataArr.filter((data) => { - return platformsArr.includes(data.Platform) - }); - } - - // param: GraphData[], coreTypes[] - static FilterByCoreTypes(graphDataArr, coreTypes) { - if (coreTypes) { - return graphDataArr.filter((data) => coreTypes.includes(data.PlatformType)); - } - return graphDataArr; - } -} - -class Modal { - static getPrecisionsLabels(graphDataArr) { - var kpis = [] - graphDataArr.filter((data) => { - for (var key in data.Parameters) { - data.Parameters[key].Precisions.forEach((key) => { - Object.keys(key).forEach((key) => { - if (!kpis.includes(key.toUpperCase())) kpis.push(key.toUpperCase()) - }); - }) - } - }) - return kpis; - } - - static getPrecisions(appConfig, labels) { - return labels.map((label) => { - var prec = appConfig.PrecisionsMap[label]; - if (prec !== undefined) { - return prec; - } - else { - return "no name"; - } - }); - } -} - - -class Graph { - // functions to get unique keys - static getNetworkModels(graphDataArr) { - return Array.from(new Set(graphDataArr.map((obj) => obj.Model))); - } - static getIeTypes(graphDataArr) { - return Array.from(new Set(graphDataArr.map((obj) => obj.PlatformType))); - } - static getCoreTypes(graphDataArr) { - return Array.from(new Set(graphDataArr.map((obj) => obj.ieType))); - } - - // param: GraphData[] - static getPlatformNames(graphDataArr) { - return graphDataArr.map((data) => data.Platform); - } - - // param: GraphData[], parameterName: string, precisions: list - static getDatabyParameter(graphDataArr, parameterName, precisions) { - var array = []; - graphDataArr.forEach((item) => { - if (item.Parameters[parameterName] !== undefined) { - array.push(item.Parameters[parameterName].Precisions); - } - else { - var obj = {}; - precisions.forEach((prec) => { - obj[prec] = 0; - }) - array.push([obj]) - } - }) - return array; - - } - - // this returns an object that is used to ender the chart - static getGraphConfig(parameterName, item, precisions, appConfig) { - return { - chartTitle: Graph.capitalizeFirstLetter(parameterName), - iconClass: parameterName + '-icon', - unit: item.Parameters[parameterName]?.Unit, - datasets: precisions.map((precision) => appConfig.PrecisionData[precision]), - }; - } - static capitalizeFirstLetter(string) { - return string.charAt(0).toUpperCase() + string.slice(1); - } -} - -class ChartDisplay { - constructor(mode, numberOfCharts) { - this.mode = mode; - this.numberOfChartsInRow = numberOfCharts; - } -} +// =================== ADDITIONAL OUTPUT CONFIG ========================= $(document).ready(function () { @@ -186,7 +17,6 @@ $(document).ready(function () { $('.graph-chart-title-header').off('click').on('click', (event) => { var parent = event.target.parentElement; - if ($(parent).children('.chart-wrap,.empty-chart-container').is(":visible")) { $(parent).children('.chart-wrap,.empty-chart-container').hide(); $(parent).children('.chevron-right-btn').show(); @@ -702,11 +532,10 @@ $(document).ready(function () { chartContainer.append(chartWrap); var graphConfigs = parameters.map((parameter) => { var groupUnit = model[0]; - var kpiData = Graph.getDatabyParameter(model, appConfig.ParametersMap[parameter], precisions); - var config = Graph.getGraphConfig(appConfig.ParametersMap[parameter], groupUnit, precisions, JSON.parse(JSON.stringify(appConfig))); + var kpiData = Graph.getDatabyParameterOld(model, appConfig.ParametersMap[parameter], precisions); + var config = Graph.getGraphConfigOld(appConfig.ParametersMap[parameter], groupUnit, precisions, JSON.parse(JSON.stringify(appConfig))); precisions.forEach((precision, index) => { - config.datasets[index].data = kpiData.map(tData => tData[0][precision] - ); + config.datasets[index].data = kpiData.map(tData => tData[0][precision]); }); return config; }); diff --git a/docs/sphinx_setup/_static/js/graphsLLM.js b/docs/sphinx_setup/_static/js/graphsLLM.js new file mode 100644 index 00000000000000..4dbc0313e2a133 --- /dev/null +++ b/docs/sphinx_setup/_static/js/graphsLLM.js @@ -0,0 +1,889 @@ +// =================== GENERAL OUTPUT CONFIG ========================= + +class Filter { + + // param: GraphData[], networkModels[] + static FilterByNetworkModel(graphDataArr, networkModels) { + const optionMap = new Map(); + networkModels.map((model) => graphDataArr.filter((graphData => graphData.Model === model))) + .flat(1) + .forEach(item => optionMap.set(item.Platform, item)); + return Array.from(optionMap.values()); + } + + // param: GraphData[], ieType + static ByIeTypes(graphDataArr, ieTypes) { + const optionMap = new Map(); + graphDataArr + .filter(graphData => ieTypes.includes(graphData.PlatformType)) + .forEach(item => optionMap.set(item.Platform, item)); + return Array.from(optionMap.values()); + } + + // param: GraphData[], ieType, networkModels + static ByTypesAndModels(graphDataArr, ieTypes, models) { + const optionMap = new Map(); + graphDataArr + .filter(graphData => ieTypes.includes(graphData.PlatformType)) + .filter(graphData => models.includes(graphData.Model)) + .forEach(item => optionMap.set(item.Platform, item)); + return Array.from(optionMap.values()); + } + + // param: GraphData[], clientPlatforms + static ByIeKpis(graphDataArr, clientPlatforms) { + var kpis = [] + clientPlatforms.forEach((platformName) => { + graphDataArr.filter((data) => { + if (data.Platform.includes(platformName)) { + for (var key in data.Parameters) { + if (!kpis.includes(key)) kpis.push(key) + } + } + }) + }) + return kpis; + } + + // param: GraphData[] + static getParameters(graphDataArr) { + var parameters = [] + graphDataArr.filter((data) => { + for (var key in data.Parameters) { + if (!parameters.includes(Graph.capitalizeFirstLetter(key))) parameters.push(Graph.capitalizeFirstLetter(key)) + } + }) + return parameters; + } + + // param: GraphData[] + static getIeTypes(graphDataArr) { + var kpis = [] + graphDataArr.filter((data) => { + for (var key in data.Parameters) { + if (!kpis.includes(Graph.capitalizeFirstLetter(key))) kpis.push(Graph.capitalizeFirstLetter(key)) + } + }) + return kpis; + } + + // param: GraphData[], clientPlatforms[] + static ByClientPlatforms(graphDataArr, platformsArr) { + return graphDataArr.filter((data) => { + return platformsArr.includes(data.Platform) + }); + } + + // param: GraphData[], coreTypes[] + static FilterByCoreTypes(graphDataArr, coreTypes) { + if (coreTypes) { + return graphDataArr.filter((data) => coreTypes.includes(data.PlatformType)); + } + return graphDataArr; + } +} + +class Modal { + static getPrecisionsLabels(graphDataArr) { + var kpis = [] + graphDataArr.filter((data) => { + for (var key in data.Parameters) { + data.Parameters[key].Precisions.forEach((key) => { + Object.keys(key).forEach((key) => { + if (!kpis.includes(key.toUpperCase())) kpis.push(key.toUpperCase()) + }); + }) + } + }) + return kpis; + } + + static getPrecisions(appConfig, labels) { + return labels.map((label) => { + var prec = appConfig.PrecisionsMap[label]; + if (prec !== undefined) { + return prec; + } + else { + return "no name"; + } + }); + } +} + +class Graph { + // functions to get unique keys + static getNetworkModels(graphDataArr) { + return Array.from(new Set(graphDataArr.map((obj) => obj.Model))); + } + static getIeTypes(graphDataArr) { + return Array.from(new Set(graphDataArr.map((obj) => obj.PlatformType))); + } + static getCoreTypes(graphDataArr) { + return Array.from(new Set(graphDataArr.map((obj) => obj.ieType))); + } + + // param: GraphData[] + static getPlatformNames(graphDataArr) { + return graphDataArr.map((data) => data.Platform); + } + + // param: GraphData[], engine: string, precisions: list + static getDatabyParameter(graphDataArr, engine, array) { + if (!Array.isArray(array[engine])) { + array[engine] = []; + } + array[engine].push(graphDataArr.Parameters[engine].Precisions); + return array; + } + + // this returns an object that is used to ender the chart + static getGraphConfig(engine, precisions, appConfig) { + return { + chartTitle: 'Throughput vs Latency', + iconClass: 'latency-icon', + datasets: precisions.map((precision) => appConfig.PrecisionData[engine][precision]), + unit: "None" + }; + } + // param: GraphData[], parameterName: string, precisions: list + static getDatabyParameterOld(graphDataArr, parameterName, precisions) { + var array = []; + graphDataArr.forEach((item) => { + if (item.Parameters[parameterName] !== undefined) { + array.push(item.Parameters[parameterName].Precisions); + } + else { + var obj = {}; + precisions.forEach((prec) => { + obj[prec] = 0; + }) + array.push([obj]) + } + }) + return array; + + } + + // this returns an object that is used to ender the chart + static getGraphConfigOld(parameterName, item, precisions, appConfig) { + return { + chartTitle: Graph.capitalizeFirstLetter(parameterName), + iconClass: parameterName + '-icon', + unit: item.Parameters[parameterName]?.Unit, + datasets: precisions.map((precision) => appConfig.PrecisionData[precision]), + }; + } + static capitalizeFirstLetter(string) { + return string.charAt(0).toUpperCase() + string.slice(1); + } +} + +class ChartDisplay { + constructor(mode, numberOfCharts) { + this.mode = mode; + this.numberOfChartsInRow = numberOfCharts; + } +} + +$(document).ready(function () { + $('.ovms-toolkit-benchmark-llm-result').on('click', () => showModal("graph-data-ovms-genai.json")); + function clickBuildGraphsLLM(graph, appConfig, networkModels, ieTypes, platforms, kpis, precisions) { + renderData(graph, appConfig, networkModels, ieTypes, platforms, kpis, precisions); + $('.modal-footer').show(); + $('#modal-display-graphs').show(); + $('.edit-settings-btn').off('click').on('click', (event) => { + $('#modal-configure-graphs').show(); + $('#modal-display-graphs').hide(); + $('.modal-footer').hide(); + $('.chart-placeholder').empty(); + }); + + $('.graph-chart-title-header').off('click').on('click', (event) => { + var parent = event.target.parentElement; + + if ($(parent).children('.chart-wrap,.empty-chart-container').is(":visible")) { + $(parent).children('.chart-wrap,.empty-chart-container').hide(); + $(parent).children('.chevron-right-btn').show(); + $(parent).children('.chevron-down-btn').hide(); + } else { + $(parent).children('.chart-wrap,.empty-chart-container').show(); + $(parent).children('.chevron-down-btn').show(); + $(parent).children('.chevron-right-btn').hide(); + } + }); + } + + function hideModal() { + $('#graphModal').remove(); + $('body').css('overflow', 'auto'); + } + + function showModal(file) { + $('body').css('overflow', 'hidden'); + + fetch('../_static/benchmarks_files/data/' + file) + .then((response) => response.json()) + .then((jsonData) => { + fetch('../_static/benchmarks_files/graph-config.json') + .then((configResponse) => configResponse.json()) + .then((appConfig) => { + renderModal(jsonData, appConfig) + }) + }); + } + + function getSelectedNetworkModels() { + return $('.models-column input:checked, .platforms-column input:checked').not('[data-networkmodel="Select All"]').map(function () { + return $(this).data('networkmodel'); + }).get(); + } + + function getSelectedIeTypes() { + return $('.ietype-column input:checked').map(function () { + return $(this).data('ietype'); + }).get(); + } + + function getSelectedClientPlatforms() { + return $('.platforms-column input:checked').map(function () { + return $(this).data('platform'); + }).get(); + } + + function getSelectedKpis() { + return $('.kpi-column input:checked').map(function () { + return $(this).data('kpi'); + }).get(); + } + + function getSelectedPrecisions() { + return $('.precisions-column input:checked').map(function () { + return $(this).data('precision'); + }).get(); + } + + function validateSelections() { + if (getSelectedNetworkModels().length > 0 + && getSelectedIeTypes() + && getSelectedClientPlatforms().length > 0 + && getSelectedKpis().length > 0) { + if (getSelectedPrecisions().length > 0) { + $('#build-graphs-btn').prop('disabled', false); + return; + } + $('#build-graphs-btn').prop('disabled', true); + return; + } + $('#build-graphs-btn').prop('disabled', true); + } + + function renderModal(graph, appConfig) { + new Graph(graph); + var networkModels = Graph.getNetworkModels(graph); + var ieTypes = Graph.getIeTypes(graph); + fetch('../_static/html/modalLLM.html').then((response) => response.text()).then((text) => { + + // generate and configure modal container + var modal = $('
'); + modal.attr('id', 'graphModal'); + modal.addClass('modal'); + var modalContent = $(text); + modalContent.attr('id', 'graphModalContent'); + modalContent.addClass('modal-content'); + modal.append(modalContent); + + const models = networkModels.map((networkModel) => createCheckMark(networkModel, 'networkmodel')); + modal.find('.models-column').append(models); + + const selectAllModelsButton = createCheckMark('', 'networkmodel'); + modal.find('.models-selectall').append(selectAllModelsButton); + + const selectAllPlatformsButton = createCheckMark('', 'platform'); + modal.find('.platforms-selectall').append(selectAllPlatformsButton); + + const precisions = Modal.getPrecisionsLabels(graph).map((precision) => createCheckMark(precision, 'precision', false)); + modal.find('.precisions-column').append(precisions); + + selectAllCheckboxes(precisions); + disableAllCheckboxes(precisions); + + const selectAllTypesButton = createCheckMark('', 'ietype') + modal.find('.ietype-selectall').append(selectAllTypesButton); + + const iefilter = ieTypes.map((ieType) => createCheckMark(ieType, 'ietype')); + modal.find('.ietype-column').append(iefilter); + + modal.find('#modal-display-graphs').hide(); + modal.find('.ietype-column input').first().prop('checked', true); + + const kpiLabels = Filter.getParameters(graph).map((parameter) => createCheckMark(parameter, 'kpi', false)); + modal.find('.kpi-column').append(kpiLabels); + + $('body').prepend(modal); + + preselectDefaultSettings(graph, modal, appConfig); + + //is not generic solution :( + if (appConfig.DefaultSelections.platformTypes?.data?.includes('Select All')) { + selectAllCheckboxes(iefilter); + + }; + renderClientPlatforms(graph, modal); + + $('.clear-all-btn').on('click', clearAll); + $('#build-graphs-btn').on('click', () => { + $('#modal-configure-graphs').hide(); + clickBuildGraphsLLM(graph, appConfig, getSelectedNetworkModels(), getSelectedIeTypes(), getSelectedClientPlatforms(), getSelectedKpis(), Modal.getPrecisions(appConfig, getSelectedPrecisions())); + }); + $('.modal-close').on('click', hideModal); + $('.close-btn').on('click', hideModal); + + modal.find('.ietype-selectall input').on('click', function () { + if ($(this).prop('checked')) + selectAllCheckboxes(iefilter); + else deSelectAllCheckboxes(iefilter); + }); + + modal.find('.models-selectall input').on('click', function () { + if ($(this).prop('checked')) selectAllCheckboxes(models); + else deSelectAllCheckboxes(models); + + renderClientPlatforms(graph, modal) + }); + + modal.find('.platforms-selectall input').on('click', function () { + if ($(this).prop('checked')) + renderClientPlatforms(graph, modal) + else { + var enabledPlatforms = modal.find('.platforms-column .checkmark-container'); + deSelectCheckbox(enabledPlatforms); + }; + + }); + + modal.find('.models-column input').on('click', function () { + if (!$(this)[0].checked) { + deSelectCheckbox(selectAllModelsButton); + } + }); + + + modal.find('.ietype-column input').on('click', function () { + if (!$(this)[0].checked) { + deSelectCheckbox(selectAllTypesButton); + } + }); + + modal.find('.models-column input').on('click', () => renderClientPlatforms(graph, modal)); + modal.find('.ietype-column input').on('click', () => renderClientPlatforms(graph, modal)); + modal.find('.ietype-selectall input').on('click', () => renderClientPlatforms(graph, modal)); + modal.find('.platforms-column').on('click', () => enableParmeters(graph, getSelectedClientPlatforms())); + + modal.find('.kpi-column input').on('click', validatePrecisionSelection); + modal.find('input').on('click', validateSelections); + + var modalFilters = document.getElementById("modal-filters"); + + var showFiltersButton = document.getElementById("filters"); + showFiltersButton.onclick = function () { + modalFilters.style.display = "block"; + } + + var closeFiltersButton = document.getElementsByClassName("close-filters")[0]; + closeFiltersButton.onclick = function () { + modalFilters.style.display = "none"; + } + + window.onclick = function (event) { + if (event.target == modalFilters) { + modalFilters.style.display = "none"; + } + } + }); + } + + function validatePrecisionSelection() { + const precisions = $('.precisions-column').find('input') + precisions.prop('disabled', false); + } + + function clearAll() { + $('.modal-content-grid-container input:checkbox').each((index, object) => $(object).prop('checked', false)); + validatePrecisionSelection(); + validateSelections(); + } + + function preselectDefaultSettings(graph, modal, appConfig) { + + const defaultSelections = appConfig.DefaultSelections; + + selectDefaultPlatformType(defaultSelections.platformTypes, graph, modal); + + applyPlatformFilters(defaultSelections.platformFilters, modal, graph); + + clearAllSettings(defaultSelections); + + validateSelections(); + validatePrecisionSelection(); + } + + function selectDefaultPlatformType(platformTypes, graph, modal) { + if (!platformTypes) return; + + const type = platformTypes.data[0]; + $(`input[data-ietype="${type}"]`).prop('checked', true); + renderClientPlatforms(graph, modal); + } + + function applyPlatformFilters(platformFilters, modal, graph) { + if (!platformFilters) return; + + const filters = modal.find('.selectable-box-container').children('.selectable-box'); + filters.removeClass('selected'); + + platformFilters.data.forEach(selection => { + filters.filter(`[data-${platformFilters.name}="${selection}"]`).addClass('selected'); + }); + + renderClientPlatforms(graph, modal); + } + + function clearAllSettings(defaultSelections) { + clearAll(); + Object.keys(defaultSelections).forEach(setting => { + const { name, data } = defaultSelections[setting]; + data.forEach(selection => { + $(`input[data-${name}="${selection}"]`).prop('checked', true); + }); + }); + } + + function filterClientPlatforms(graph, ietypes) { + return Filter.ByIeTypes(graph, ietypes); + } + + function filterPlatforms(graph, ietypes, models) { + return Filter.ByTypesAndModels(graph, ietypes, models); + } + + function renderClientPlatforms(graph, modal) { + var fPlatforms = filterClientPlatforms(graph, getSelectedIeTypes()); + var platformNames = Graph.getPlatformNames(fPlatforms); + $('.platforms-column .checkmark-container').remove(); + + const clientPlatforms = platformNames.map((platform) => createCheckMark(platform, 'platform', true)); + + var enabledPlatforms = filterPlatforms(graph, getSelectedIeTypes(), getSelectedNetworkModels()); + enableCheckBoxes(clientPlatforms, enabledPlatforms); + modal.find('.platforms-column').append(clientPlatforms); + + enableParmeters(graph, getSelectedClientPlatforms()); + modal.find('.platforms-column input').on('click', validateSelections); + } + + function enableParmeters(graph, clientPlatforms) { + var allKpis = Filter.getParameters(graph); + + allKpis.forEach((kpi) => { + $(`input[data-kpi="${Graph.capitalizeFirstLetter(kpi)}"]`).prop('disabled', true); + }) + + var kpis = Filter.ByIeKpis(graph, clientPlatforms); + kpis.forEach((kpi) => { + $(`input[data-kpi="${Graph.capitalizeFirstLetter(kpi)}"]`).prop('disabled', false); + }) + } + + function createCheckMark(itemLabel, modelLabel, disabled) { + const item = $('