You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Originally posted by MinGiSa March 5, 2024
I've been working on converting Torch models into OpenVINO models recently.
I was able to easily do this for the existing classification models in torchvision using the tutorials in openvino_notebooks.
However, when I tried to convert the detection models, I encountered errors
when i do "quantized_model = nncf.quantize(torchModel, calibrationDataset)".
I'm not sure how to solve this. Could you help me?
Below is my code and my model is from torchvision / fasterrcnn_resnet50_fpn_v2
import sys
import os
os.environ["PYTHONUTF8"] = str(1)
from pathlib import Path
from typing import List, Tuple
import nncf
import openvino as ov
import torch
import torchvision.transforms as transforms
import distutils.command.build_ext
import numpy as np
import openvino.runtime as ovr
import time
from DTT_dataLoader import customDatasetDTT
from DTT_utils import createModel, validateDTT, getModelSize
if sys.platform == "win32":
VS_INSTALL_DIR = r"C:/Program Files (x86)/Microsoft Visual Studio"
cl_paths = sorted(list(Path(VS_INSTALL_DIR).glob("**/Hostx86/x64/cl.exe")))
if len(cl_paths) == 0:
raise ValueError(
"Cannot find Visual Studio. This notebook requires C++. If you installed "
"a C++ compiler, please add the directory that contains cl.exe to "
"os.environ['PATH']"
)
else:
cl_path = cl_paths[-1]
vs_dir = str(cl_path.parent)
os.environ["PATH"] += f"{os.pathsep}{vs_dir}"
d = distutils.core.Distribution()
b = distutils.command.build_ext.build_ext(d)
b.finalize_options()
os.environ["LIB"] = os.pathsep.join(b.library_dirs)
File c:\Users\sa\anaconda3\envs\openVino\lib\site-packages\nncf\quantization\algorithms\min_max\algorithm.py:834, in MinMaxQuantization.apply(self, model, graph, statistic_points, dataset)
832 statistics = tensor_collector.get_statistics()
833 if statistics.min_values is None or statistics.max_values is None:
--> 834 raise RuntimeError(f"Statistics were not collected for the node {target_node_name}")
835 if self._mode is not None:
836 destination_type = self._quantization_params[quant_group].destination_type
RuntimeError: Statistics were not collected for the node FasterRCNN/RoIHeads[roi_heads]/__sub___4
The text was updated successfully, but these errors were encountered:
Discussed in #2547
Originally posted by MinGiSa March 5, 2024
I've been working on converting Torch models into OpenVINO models recently.
I was able to easily do this for the existing classification models in torchvision using the tutorials in openvino_notebooks.
However, when I tried to convert the detection models, I encountered errors
when i do "quantized_model = nncf.quantize(torchModel, calibrationDataset)".
I'm not sure how to solve this. Could you help me?
Below is my code and my model is from torchvision / fasterrcnn_resnet50_fpn_v2
import sys
import os
os.environ["PYTHONUTF8"] = str(1)
from pathlib import Path
from typing import List, Tuple
import nncf
import openvino as ov
import torch
import torchvision.transforms as transforms
import distutils.command.build_ext
import numpy as np
import openvino.runtime as ovr
import time
from DTT_dataLoader import customDatasetDTT
from DTT_utils import createModel, validateDTT, getModelSize
if sys.platform == "win32":
VS_INSTALL_DIR = r"C:/Program Files (x86)/Microsoft Visual Studio"
cl_paths = sorted(list(Path(VS_INSTALL_DIR).glob("**/Hostx86/x64/cl.exe")))
if len(cl_paths) == 0:
raise ValueError(
"Cannot find Visual Studio. This notebook requires C++. If you installed "
"a C++ compiler, please add the directory that contains cl.exe to "
"
os.environ['PATH']
")
else:
cl_path = cl_paths[-1]
vs_dir = str(cl_path.parent)
os.environ["PATH"] += f"{os.pathsep}{vs_dir}"
d = distutils.core.Distribution()
b = distutils.command.build_ext.build_ext(d)
b.finalize_options()
os.environ["LIB"] = os.pathsep.join(b.library_dirs)
def transformDTT_fn(dataItem):
images, _ = dataItem
return images
torch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using {torch_device} device")
DATASET_FOLDER = -
OUTPUT_FOLDER = -
TASK_TYPE = -
BASE_MODEL = -
XML_PATH = -
PRETRAINED_MODEL_PATH = -
IMAGE_SIZE = [1024, 1024]
NUM_CLASSES = 4
BATCH_SIZE = 1
VALIDATE_TRIGGER = False
Path(OUTPUT_FOLDER).mkdir(exist_ok=True)
openVinoFp32Path = Path(OUTPUT_FOLDER) / Path(BASE_MODEL + "DTT_fp32").with_suffix(".xml")
openVinoInt8Path = Path(OUTPUT_FOLDER) / Path(BASE_MODEL + "DTT_int8").with_suffix(".xml")
torchModel = createModel(TASK_TYPE, BASE_MODEL, PRETRAINED_MODEL_PATH, NUM_CLASSES, IMAGE_SIZE[0], IMAGE_SIZE[1])
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
trainTransform = transforms.Compose([transforms.Resize(IMAGE_SIZE), transforms.ToTensor(), normalize])
validTransform = transforms.Compose([transforms.Resize(IMAGE_SIZE), transforms.ToTensor(), normalize])
testTransform = transforms.Compose([transforms.Resize(IMAGE_SIZE), transforms.ToTensor(), normalize])
trainDataset = customDatasetDTT(xmlFile=XML_PATH, mode='TRAINING', imageFolder=DATASET_FOLDER, transform=trainTransform)
validDataset = customDatasetDTT(xmlFile=XML_PATH, mode='VALIDATION', imageFolder=DATASET_FOLDER, transform=validTransform)
testDataset = customDatasetDTT(xmlFile=XML_PATH, mode='TEST', imageFolder=DATASET_FOLDER, transform=testTransform)
trainLoader = torch.utils.data.DataLoader(
trainDataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=0,
pin_memory=True,
sampler=None,
)
valLoader = torch.utils.data.DataLoader(
validDataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=0,
pin_memory=True,
)
testLoader = torch.utils.data.DataLoader(
testDataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=0,
pin_memory=True,
)
calibrationDataset = nncf.Dataset(trainLoader, transformDTT_fn)
quantized_model = nncf.quantize(torchModel, calibrationDataset)
################################################
and here is error code
RuntimeError Traceback (most recent call last)
Cell In[33], line 1
----> 1 quantized_model = nncf.quantize(torchModel, calibrationDataset)
File c:\Users\sa\anaconda3\envs\openVino\lib\site-packages\nncf\quantization\quantize_model.py:146, in quantize(model, calibration_dataset, mode, preset, target_device, subset_size, fast_bias_correction, model_type, ignored_scope, advanced_parameters)
143 if backend == BackendType.TORCH:
144 from nncf.torch.quantization.quantize_model import quantize_impl
--> 146 return quantize_impl(
147 model=model,
148 calibration_dataset=calibration_dataset,
149 mode=mode,
150 preset=preset,
151 target_device=target_device,
152 subset_size=subset_size,
153 fast_bias_correction=fast_bias_correction,
154 model_type=model_type,
155 ignored_scope=ignored_scope,
156 advanced_parameters=advanced_parameters,
157 )
159 raise RuntimeError(f"Unsupported type of backend: {backend}")
File c:\Users\sa\anaconda3\envs\openVino\lib\site-packages\nncf\torch\quantization\quantize_model.py:67, in quantize_impl(model, calibration_dataset, mode, preset, target_device, subset_size, fast_bias_correction, model_type, ignored_scope, advanced_parameters)
55 nncf_network = wrap_model(copied_model.eval(), example_input)
57 quantization_algorithm = PostTrainingQuantization(
58 preset=preset,
59 target_device=target_device,
(...)
64 advanced_parameters=advanced_parameters,
65 )
---> 67 quantized_model = quantization_algorithm.apply(
68 nncf_network, nncf_network.nncf.get_graph(), dataset=calibration_dataset
69 )
71 quantized_model.nncf.disable_dynamic_graph_building()
73 return quantized_model
File c:\Users\sa\anaconda3\envs\openVino\lib\site-packages\nncf\quantization\algorithms\post_training\algorithm.py:112, in PostTrainingQuantization.apply(self, model, graph, statistic_points, dataset)
109 if statistic_points:
110 step_index_to_statistics = {0: statistic_points}
--> 112 return self._pipeline.run_from_step(model, dataset, graph, 0, step_index_to_statistics)
File c:\Users\sa\anaconda3\envs\openVino\lib\site-packages\nncf\quantization\algorithms\pipeline.py:164, in Pipeline.run_from_step(self, model, dataset, graph, start_step_index, step_index_to_statistics)
161 step_statistics = collect_statistics(statistic_points, step_model, step_graph, dataset)
163 # Run current pipeline step
--> 164 step_model = self.run_step(step_index, step_statistics, step_model, step_graph)
166 step_graph = None # We should rebuild the graph for the next pipeline step
168 return step_model
File c:\Users\sa\anaconda3\envs\openVino\lib\site-packages\nncf\quantization\algorithms\pipeline.py:117, in Pipeline.run_step(self, step_index, step_statistics, model, graph)
115 pipeline_step = pipeline_steps[step_index]
116 for algorithm in pipeline_step[:-1]:
--> 117 current_model = algorithm.apply(current_model, current_graph, step_statistics)
118 current_graph = NNCFGraphFactory.create(current_model)
119 current_model = pipeline_step[-1].apply(current_model, current_graph, step_statistics)
File c:\Users\sa\anaconda3\envs\openVino\lib\site-packages\nncf\quantization\algorithms\min_max\algorithm.py:834, in MinMaxQuantization.apply(self, model, graph, statistic_points, dataset)
832 statistics = tensor_collector.get_statistics()
833 if statistics.min_values is None or statistics.max_values is None:
--> 834 raise RuntimeError(f"Statistics were not collected for the node {target_node_name}")
835 if self._mode is not None:
836 destination_type = self._quantization_params[quant_group].destination_type
RuntimeError: Statistics were not collected for the node FasterRCNN/RoIHeads[roi_heads]/__sub___4
The text was updated successfully, but these errors were encountered: