Skip to content

Commit

Permalink
Fix CPP passes
Browse files Browse the repository at this point in the history
Signed-off-by: Vihang Mehta <[email protected]>
  • Loading branch information
vihangm committed Nov 19, 2024
1 parent 643f23d commit 300953f
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -665,7 +665,7 @@ void Transformations::PreLpt(const std::vector<ov::element::Type>& defaultPrecis

CPU_SET_CALLBACK_COMMON(manager,
[&defaultPrecisions](const_node_ptr &node) -> bool {
return ov::pass::low_precision::NetworkHelper::areQuantizeAndDequantizeSupportedForMultiply(node, defaultPrecisions);
return !ov::pass::low_precision::NetworkHelper::areQuantizeAndDequantizeSupportedForMultiply(node, defaultPrecisions);
},
ov::pass::ConvertQuantizeDequantize);
}
Expand Down
6 changes: 3 additions & 3 deletions src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -310,6 +310,8 @@ void TransformationsPipeline::apply(std::shared_ptr<ov::Model> func) {
if (!is_model_quantized)
pass_config->set_callback<ov::pass::MarkDequantizationSubgraph>(is_non_supported_decompression_op);

manager.register_pass<ov::pass::CommonOptimizations>();

const bool keep_precision_sensitive_in_fp32_1 = true;
const bool convert_input_output_precision = false;
const bool store_original_precision_as_rt_attribute = true;
Expand All @@ -319,8 +321,6 @@ void TransformationsPipeline::apply(std::shared_ptr<ov::Model> func) {
convert_input_output_precision,
store_original_precision_as_rt_attribute);

manager.register_pass<ov::pass::CommonOptimizations>();

pass_config->set_callback<ov::pass::ScaledDotProductAttentionDecomposition>([&](const std::shared_ptr<const ov::Node> node){
GPU_DEBUG_IF(cldnn::debug_configuration::get_instance()->enable_sdpa != -1) {
GPU_DEBUG_CODE(return cldnn::debug_configuration::get_instance()->enable_sdpa == 1);
Expand Down Expand Up @@ -638,7 +638,7 @@ void TransformationsPipeline::apply(std::shared_ptr<ov::Model> func) {

if (enableInt8) {
pass_config->set_callback<ov::pass::ConvertQuantizeDequantize>([&](const_node_ptr &node) -> bool {
return ov::pass::low_precision::NetworkHelper::areQuantizeAndDequantizeSupportedForMultiply(node, defaultPrecisions);
return !ov::pass::low_precision::NetworkHelper::areQuantizeAndDequantizeSupportedForMultiply(node, defaultPrecisions);
});
}

Expand Down

0 comments on commit 300953f

Please sign in to comment.