Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added support for QONNX Resize node ingestion and tested with tiny UNet model #1122

Merged
merged 21 commits into from
Nov 21, 2024
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
a761da9
Added support for `Resize` node from QONNX model
nghielme Nov 12, 2024
b62468a
Added a test on tiny UNet model in order to test `Resize` node
nghielme Nov 12, 2024
40a431f
pre-commit restyling
nghielme Nov 12, 2024
be55945
Aesthetic fix
nghielme Nov 12, 2024
743831f
Second aesthetic fix
nghielme Nov 12, 2024
aa46dbe
Merge branch 'main' into resize_pr
nghielme Nov 12, 2024
4f82810
Added one test on a simpler model extracted from UNet model `branched…
nghielme Nov 12, 2024
7e6b9af
Example models commit updated
nghielme Nov 12, 2024
5757ac6
An empty list is now appended to the shape of all the inputs of the c…
nghielme Nov 12, 2024
5e13800
Merge branch 'main' into resize_pr
nghielme Nov 14, 2024
cf80f64
Cleaned some code and added the removal of RoI input from `Resize` node
nghielme Nov 15, 2024
c7f6983
Merge branch 'resize_pr' of https://github.com/fastmachinelearning/hl…
nghielme Nov 15, 2024
a5e32c5
Merge branch 'main' into resize_pr
nghielme Nov 15, 2024
b07e998
revert some unneeded changes
jmitrevs Nov 16, 2024
354b535
Added some minor checks related to sizes parameter
nghielme Nov 18, 2024
3b5f8db
Merge branch 'resize_pr' of https://github.com/fastmachinelearning/hl…
nghielme Nov 18, 2024
3254942
Merge branch 'main' into resize_pr
nghielme Nov 18, 2024
9943350
Minor fix
nghielme Nov 18, 2024
5ff517b
Minor modification of the error msg
nghielme Nov 19, 2024
6a10129
Minor fixes
nghielme Nov 20, 2024
20ab44f
Merge branch 'main' into resize_pr
nghielme Nov 21, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion example-models
2 changes: 1 addition & 1 deletion hls4ml/backends/fpga/passes/clone.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def transform(self, model, node):
)
for i in range(len(output_map[output])):
key = output + '_cpy' + str(i + 1)
clone_layer.attributes[key].type = node.attributes['result_t']
clone_layer.attributes[key].type = node.get_output_variable().type
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Isn't this identical?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here the output of the tests if you run the code with the original line:

    def match(self, node):
        if isinstance(node, (Pooling1D, Pooling2D, GlobalPooling1D, GlobalPooling2D)) and node.get_attr('pool_op') == 'Max':
>           return isinstance(node.get_input_variable().type.precision, XnorPrecisionType) and not isinstance(
                node.get_output_variable().type.precision, XnorPrecisionType
            )
E           AttributeError: 'FixedPrecisionType' object has no attribute 'precision'

hls4ml/backends/fpga/passes/xnor_pooling.py:14: AttributeError

Solved if you update it as proposed.

model.insert_node(clone_layer)
transformed = True

Expand Down
24 changes: 23 additions & 1 deletion hls4ml/converters/onnx/reshape.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from hls4ml.converters.onnx_to_hls import onnx_handler
from hls4ml.converters.onnx_to_hls import get_onnx_attribute, onnx_handler


@onnx_handler('Transpose')
Expand Down Expand Up @@ -36,3 +36,25 @@ def parse_flatten_layer(node, input_names, input_shapes, graph):
layer['target_shape'] = [-1] # does not contain batch dimension

return layer


@onnx_handler('Resize')
def parse_resize_layer(node, input_names, input_shapes, graph):
layer = {}
layer['name'] = node.name
layer['class_name'] = 'Resize'
layer['inputs'] = input_names
layer['outputs'] = list(node.output)
layer['in_height'] = input_shapes[0][2]
layer['in_width'] = input_shapes[0][1]
layer['out_width'] = input_shapes[0][1]
layer['out_height'] = input_shapes[0][2]
layer['n_chan'] = input_shapes[0][3]
layer['algorithm'] = get_onnx_attribute(node, 'mode')
# The following is used in initialize() method.
# Probably a better solution would be to have a channels last parameter at QONNX level
layer['data_format'] = (
'channels_last' if any(node.domain == 'qonnx.custom_op.channels_last' for node in graph.node) else 'channels_first'
)

return layer
7 changes: 7 additions & 0 deletions hls4ml/converters/onnx_to_hls.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,13 @@ def get_input_shape(graph, node):
"""
rv = []
for inp in node.input:
# this couple of lines doesn't look very nice but I don't think it would be considered as wrong.
# It is necessary for `Resize` node, since RoI input is empty but necessary to specify also scales
# array. It might be better handled in QONNX, refers to this issue for more details:
# https://github.com/fastmachinelearning/qonnx/issues/150
if inp == '':
rv.append([])
continue
jmitrevs marked this conversation as resolved.
Show resolved Hide resolved
try:
value_info_idx = next((i for i, x in enumerate(graph.value_info) if x.name == inp))
dim = list(d.dim_value for d in graph.value_info[value_info_idx].type.tensor_type.shape.dim)
Expand Down
52 changes: 39 additions & 13 deletions hls4ml/model/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1147,20 +1147,46 @@ class Resize(Layer):
def initialize(self):
inp = self.get_input_variable()

if self.get_attr('data_format') == 'channels_last':
if len(inp.shape) == 2: # 1D -> width + chan
shape = [self.get_attr('out_width'), self.get_attr('n_chan')]
dims = [f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}']
elif len(inp.shape) == 3: # 2D -> height + width + chan
shape = [self.get_attr('out_height'), self.get_attr('out_width'), self.get_attr('n_chan')]
dims = [f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}']
if len(self.inputs) > 1:
# get the scales of Resize node from QONNX frontend
scales = self.get_input_node(self.inputs[-1]).get_attr('value')
if self.get_attr('data_format') == 'channels_last':
if len(inp.shape) == 2: # 1D -> width + chan
shape = [int(self.get_attr('out_width') * scales[1]), int(self.get_attr('n_chan') * scales[2])]
dims = [f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}']
elif len(inp.shape) == 3: # 2D -> height + width + chan
shape = [
int(self.get_attr('out_height') * scales[1]),
int(self.get_attr('out_width') * scales[2]),
int(self.get_attr('n_chan') * scales[3]),
]
dims = [f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}']
else:
if len(inp.shape) == 2: # 1D -> width + chan
shape = [int(self.get_attr('n_chan') * scales[1]), int(self.get_attr('out_width') * scales[2])]
dims = [f'N_CHAN_{self.index}', f'OUT_WIDTH_{self.index}']
elif len(inp.shape) == 3: # 2D -> height + width + chan
shape = [
int(self.get_attr('n_chan') * scales[1]),
int(self.get_attr('out_height') * scales[2]),
int(self.get_attr('out_width') * scales[3]),
]
dims = [f'N_CHAN_{self.index}', f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}']
else:
if len(inp.shape) == 2: # 1D -> width + chan
shape = [self.get_attr('n_chan'), self.get_attr('out_width')]
dims = [f'N_CHAN_{self.index}', f'OUT_WIDTH_{self.index}']
elif len(inp.shape) == 3: # 2D -> height + width + chan
shape = [self.get_attr('n_chan'), self.get_attr('out_height'), self.get_attr('out_width')]
dims = [f'N_CHAN_{self.index}', f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}']
if self.get_attr('data_format') == 'channels_last':
if len(inp.shape) == 2: # 1D -> width + chan
shape = [self.get_attr('out_width'), self.get_attr('n_chan')]
dims = [f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}']
elif len(inp.shape) == 3: # 2D -> height + width + chan
shape = [self.get_attr('out_height'), self.get_attr('out_width'), self.get_attr('n_chan')]
dims = [f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}']
else:
if len(inp.shape) == 2: # 1D -> width + chan
shape = [self.get_attr('n_chan'), self.get_attr('out_width')]
dims = [f'N_CHAN_{self.index}', f'OUT_WIDTH_{self.index}']
elif len(inp.shape) == 3: # 2D -> height + width + chan
shape = [self.get_attr('n_chan'), self.get_attr('out_height'), self.get_attr('out_width')]
dims = [f'N_CHAN_{self.index}', f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}']

self.add_output_variable(shape, dims, precision=inp.type.precision)

Expand Down
1 change: 1 addition & 0 deletions hls4ml/model/optimizer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
'parse_qonnx',
[
'reshape_constant',
'resize_constant',
'quant_constant_parameters',
'quant_to_activation',
'fuse_quant_with_constant',
Expand Down
27 changes: 27 additions & 0 deletions hls4ml/model/optimizer/passes/resize_const.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
from hls4ml.model.layers import Constant, Resize
from hls4ml.model.optimizer import OptimizerPass


class ResizeConstant(OptimizerPass):
"""
To compute the output shape of resize is necessary to access the scales, that
are stored as initilizer, later on converted as constant inputs.
"""

def match(self, node):
is_match = isinstance(node, Resize) and len(node.inputs) > 1 and node.get_input_node(node.inputs[-1])
return is_match

def transform(self, model, node):
"""
Remove Constant from new shape input. Note, input shape node is already used on initialize
"""
scales_node = node.get_input_node(node.inputs[-1])
node.inputs[-1] = ''
scales_values = scales_node.get_attr('value')
node.set_attr('out_width', int(node.get_attr('in_width') * scales_values[1]))
node.set_attr('out_height', int(node.get_attr('in_height') * scales_values[2]))
if not isinstance(scales_node, Constant):
raise RuntimeError("Non-constant shape inputs are not supported")
model.remove_node(scales_node, rewire=False)
return True
100 changes: 100 additions & 0 deletions test/pytest/test_qonnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

# To conveniently run QONNX inference
from qonnx.core.modelwrapper import ModelWrapper
from qonnx.transformation.base import Transformation
from qonnx.transformation.channels_last import ConvertToChannelsLastAndClean
from qonnx.transformation.gemm_to_matmul import GemmToMatMul

Expand Down Expand Up @@ -101,6 +102,32 @@ def sep_conv_model():
return model


@pytest.fixture(scope='module')
def branched_model():
"""
Load branched model using separable convs, already channels-last and cleaned
"""
dl_file = str(example_model_path / "onnx/branched_model_ch_last.onnx")
assert os.path.isfile(dl_file)

model = ModelWrapper(dl_file)

return model


@pytest.fixture(scope='module')
def tiny_unet_model():
"""
Load tiny unet model, already channels-last and cleaned
"""
dl_file = str(example_model_path / "onnx/tiny_unet_ch_last.onnx")
assert os.path.isfile(dl_file)

model = ModelWrapper(dl_file)

return model


@pytest.fixture(scope='module')
def two_layer_keras_model():
"""
Expand Down Expand Up @@ -309,6 +336,79 @@ def test_sep_conv(sep_conv_model, backend):
np.testing.assert_allclose(y_qonnx.ravel(), y_hls4ml.ravel(), atol=1e-2, rtol=1)


class EmptyFilledRoI(Transformation):
jmitrevs marked this conversation as resolved.
Show resolved Hide resolved
"Remove RoI tensor of Resize node added for shape inference"

def apply(self, model):
graph_modified = False
for node in model.graph.node:
if node.op_type == 'Resize':
# Assuming 'roi' is the second input
if len(node.input) > 2 and node.input[1] != '':
init_names = [x.name for x in model.graph.initializer]
i = init_names.index(node.input[1])
init_to_remove = model.graph.initializer[i]
model.graph.initializer.remove(init_to_remove)
node.input[1] = ''
graph_modified = True
return (model, graph_modified)


@pytest.mark.parametrize('backend', ['Vitis'])
def test_branched_model(branched_model, backend):
model = branched_model
ishape = tuple(model.get_tensor_shape(model.graph.input[0].name))
X = np.random.uniform(low=0, high=1, size=np.prod(ishape)).reshape(ishape)
X = (np.round(X * 2**16) * 2**-16).astype(np.float32)
idict = {model.graph.input[0].name: X}
y_qonnx = oxe.execute_onnx(model, idict)[model.graph.output[0].name]

config = hls4ml.utils.config.config_from_onnx_model(
model, granularity='name', backend=backend, default_precision='fixed<32,16>'
)

model = model.transform(EmptyFilledRoI())
hls_model = hls4ml.converters.convert_from_onnx_model(
model,
output_dir=str(test_root_path / f'hls4mlprj_qonnx_branched_model_{backend}'),
io_type='io_stream',
backend=backend,
hls_config=config,
)
hls_model.compile()
y_hls4ml = hls_model.predict(np.ascontiguousarray(X))

np.testing.assert_array_equal(y_qonnx.ravel(), y_hls4ml.ravel())


@pytest.mark.parametrize('backend', ['Vitis'])
def test_tiny_unet_model(tiny_unet_model, backend):

model = tiny_unet_model
ishape = tuple(model.get_tensor_shape(model.graph.input[0].name))
X = np.random.uniform(low=0, high=1, size=np.prod(ishape)).reshape(ishape)
X = (np.round(X * 2**16) * 2**-16).astype(np.float32)
idict = {model.graph.input[0].name: X}
y_qonnx = oxe.execute_onnx(model, idict)[model.graph.output[0].name]

config = hls4ml.utils.config.config_from_onnx_model(
model, granularity='name', backend=backend, default_precision='fixed<32,16>'
)

model = model.transform(EmptyFilledRoI())
hls_model = hls4ml.converters.convert_from_onnx_model(
model,
output_dir=str(test_root_path / f'hls4mlprj_qonnx_tiny_unet_model_{backend}'),
io_type='io_stream',
backend=backend,
hls_config=config,
)
hls_model.compile()
y_hls4ml = hls_model.predict(np.ascontiguousarray(X))

np.testing.assert_array_equal(y_qonnx.ravel(), y_hls4ml.ravel())


@pytest.mark.parametrize(
'model_name',
[
Expand Down
Loading