Skip to content

Commit

Permalink
Merge branch 'develop' into bcast_transpose_scalar
Browse files Browse the repository at this point in the history
  • Loading branch information
causten authored Sep 21, 2023
2 parents b38f20b + 4637621 commit 2d13a8d
Show file tree
Hide file tree
Showing 14 changed files with 212 additions and 14 deletions.
2 changes: 1 addition & 1 deletion src/include/migraphx/op/roialign.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ struct roialign
{
xy[ii] = roi_start[ii] + p[ii] * bin_size[ii] +
(i[ii] + .5f) * bin_size[ii] / bin_grid_size[ii];
xy[ii] = (coord_trans_mode == "output_half_pixel") ? (xy[ii] - 0.5f) : xy[ii];
xy[ii] = (coord_trans_mode == "half_pixel") ? (xy[ii] - 0.5f) : xy[ii];
if(xy[ii] < -1.0 or xy[ii] > dims[ii])
{
results[index] = pos_weight{};
Expand Down
29 changes: 26 additions & 3 deletions src/onnx/parse_constant.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include <migraphx/ranges.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/stringutils.hpp>

namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
Expand All @@ -39,16 +40,38 @@ struct parse_constant : op_parser<parse_constant>
onnx_parser::node_info info,
const std::vector<instruction_ref>& /*args*/) const
{
literal v = parser.parse_value(info.attributes.at("value"));
static const std::vector<std::string> attributes = {
"value", "value_float", "value_floats", "value_int", "value_ints"};

std::vector<std::string> present_attributes;
std::copy_if(attributes.begin(),
attributes.end(),
std::back_inserter(present_attributes),
[&](const std::string& a) { return contains(info.attributes, a); });

if(present_attributes.empty())
{
MIGRAPHX_THROW("Constant node does not contain any supported attribute");
}

if(present_attributes.size() > 1)
{
MIGRAPHX_THROW("Constant contains multiple attributes: " +
join_strings(std::move(present_attributes), ", "));
}

// cppcheck-suppress accessMoved
auto&& attr = info.attributes[present_attributes[0]];
literal v = parser.parse_value(attr);

// return empty literal
if(v.get_shape().elements() == 0)
{
return info.add_literal(literal{v.get_shape().type()});
}

auto dim_size = info.attributes.at("value").t().dims_size();
// if dim_size is 0, it is a scalar
if(dim_size == 0)
if(attr.has_t() and attr.t().dims_size() == 0)
{
migraphx::shape scalar_shape{v.get_shape().type()};
return info.add_literal(migraphx::literal{scalar_shape, v.data()});
Expand Down
11 changes: 7 additions & 4 deletions src/onnx/parse_roialign.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,15 +37,18 @@ struct parse_roialign : op_parser<parse_roialign>
std::vector<op_desc> operators() const { return {{"RoiAlign"}}; }

instruction_ref parse(const op_desc& /*opd*/,
const onnx_parser& /*parser*/,
const onnx_parser& parser,
onnx_parser::node_info info,
const std::vector<instruction_ref>& args) const
{
std::string coord_trans_mode = "half_pixel";
if(contains(info.attributes, "coordinate_transformation_mode"))
std::string coord_trans_mode =
parser.opset_version >= 16 ? "half_pixel" : "output_half_pixel";

if(const auto* a = "coordinate_transformation_mode"; contains(info.attributes, a))
{
coord_trans_mode = info.attributes.at("coordinate_transformation_mode").s();
coord_trans_mode = info.attributes.at(a).s();
}

if(not contains({"half_pixel", "output_half_pixel"}, coord_trans_mode))
{
MIGRAPHX_THROW("coordinate_transformation_mode \"" + coord_trans_mode +
Expand Down
2 changes: 1 addition & 1 deletion src/targets/gpu/jit/roialign.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ struct roialign_compiler : compiler<roialign_compiler>

// coord_trans_mode
auto ctm = v.at("coordinate_transformation_mode").to<std::string>();
float rois_offset = (ctm == "output_half_pixel") ? -0.5f : 0.0f;
float rois_offset = (ctm == "half_pixel") ? -0.5f : 0.0f;
options.params += " -DROIS_OFFSET=" + std::to_string(rois_offset);

// spatial_scale
Expand Down
Binary file added test/onnx/constant_multiple_attributes_test.onnx
Binary file not shown.
3 changes: 3 additions & 0 deletions test/onnx/constant_no_attributes_test.onnx
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
constant_no_attributes_test:)

"Constantconstant_no_attributes_testB
Binary file added test/onnx/constant_value_float_test.onnx
Binary file not shown.
Binary file added test/onnx/constant_value_floats_test.onnx
Binary file not shown.
3 changes: 3 additions & 0 deletions test/onnx/constant_value_int_test.onnx
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
constant_value_int_test:7
"Constant*
value_int@�constant_value_int_testB
4 changes: 4 additions & 0 deletions test/onnx/constant_value_ints_test.onnx
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
constant_value_ints_test:=
!"Constant*

value_ints@@@�constant_value_ints_testB
70 changes: 70 additions & 0 deletions test/onnx/gen_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -825,6 +825,76 @@ def constant_test():
return ([node], [], [y])


@onnx_test()
def constant_value_float_test():

node = onnx.helper.make_node('Constant',
inputs=[],
outputs=[],
value_float=[1.0])

return ([node], [], [])


@onnx_test()
def constant_value_floats_test():

node = onnx.helper.make_node('Constant',
inputs=[],
outputs=[],
value_floats=[1.0, 2.0, 3.0])

return ([node], [], [])


@onnx_test()
def constant_value_int_test():

node = onnx.helper.make_node('Constant',
inputs=[],
outputs=[],
value_int=[1])

return ([node], [], [])


@onnx_test()
def constant_value_ints_test():

node = onnx.helper.make_node('Constant',
inputs=[],
outputs=[],
value_ints=[1, 2, 3])

return ([node], [], [])


@onnx_test()
def constant_no_attributes_test():

node = onnx.helper.make_node('Constant', inputs=[], outputs=[])

return ([node], [], [])


@onnx_test()
def constant_multiple_attributes_test():
x = np.array([0, 1, 2])

node = onnx.helper.make_node('Constant',
inputs=[],
outputs=[],
value_floats=[1.0, 2.0],
value_ints=[1, 2],
value=onnx.helper.make_tensor(
name='const_tensor',
data_type=TensorProto.FLOAT,
dims=x.shape,
vals=x.flatten().astype(float)))

return ([node], [], [])


@onnx_test()
def constant_fill_test():
value = helper.make_tensor_value_info('value', TensorProto.FLOAT, [2, 3])
Expand Down
60 changes: 59 additions & 1 deletion test/onnx/onnx_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -930,6 +930,58 @@ TEST_CASE(constant_test)
EXPECT(p == prog);
}

TEST_CASE(constant_value_float_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::float_type, {1}}, {1.0f}});
auto prog = optimize_onnx("constant_value_float_test.onnx");

EXPECT(p == prog);
}

TEST_CASE(constant_value_floats_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::float_type, {3}}, {1.0f, 2.0f, 3.0f}});
auto prog = optimize_onnx("constant_value_floats_test.onnx");

EXPECT(p == prog);
}

TEST_CASE(constant_value_int_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::int64_type, {1}}, {1}});
auto prog = optimize_onnx("constant_value_int_test.onnx");

EXPECT(p == prog);
}

TEST_CASE(constant_value_ints_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::int64_type, {3}}, {1, 2, 3}});
auto prog = optimize_onnx("constant_value_ints_test.onnx");

EXPECT(p == prog);
}

TEST_CASE(constant_no_attributes_test)
{
EXPECT(test::throws([&] { optimize_onnx("constant_no_attributes_test.onnx"); }));
}

TEST_CASE(constant_multiple_attributes_test)
{
EXPECT(test::throws([&] { optimize_onnx("constant_multiple_attributes_test.onnx"); }));
}

TEST_CASE(constant_fill_test)
{
migraphx::program p;
Expand Down Expand Up @@ -5899,7 +5951,13 @@ TEST_CASE(roialign_default_test)
auto rois = mm->add_parameter("rois", srois);
auto bi = mm->add_parameter("batch_ind", sbi);

auto r = mm->add_instruction(migraphx::make_op("roialign"), x, rois, bi);
// Due to the onnx model using opset 12, the coordinate_transformation_mode should be set to
// output_half_pixel
auto r = mm->add_instruction(
migraphx::make_op("roialign", {{"coordinate_transformation_mode", "output_half_pixel"}}),
x,
rois,
bi);
mm->add_return({r});

auto prog = migraphx::parse_onnx("roialign_default_test.onnx");
Expand Down
34 changes: 34 additions & 0 deletions test/py/onnx_backend_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,34 @@ def disabled_tests_onnx_1_12_0(backend_test):
backend_test.exclude(r'test_scatter_elements_with_duplicate_indices_cpu')


def disabled_tests_onnx_1_13_0(backend_test):
# The following tests fail due to the CastLike operator being unsupported
backend_test.exclude(r'test_elu_default_expanded_ver18_cpu')
backend_test.exclude(r'test_elu_example_expanded_ver18_cpu')
backend_test.exclude(r'test_elu_expanded_ver18_cpu')
backend_test.exclude(r'test_hardsigmoid_default_expanded_ver18_cpu')
backend_test.exclude(r'test_hardsigmoid_example_expanded_ver18_cpu')
backend_test.exclude(r'test_hardsigmoid_expanded_ver18_cpu')
backend_test.exclude(r'test_leakyrelu_default_expanded_cpu')
backend_test.exclude(r'test_leakyrelu_example_expanded_cpu')
backend_test.exclude(r'test_leakyrelu_expanded_cpu')
backend_test.exclude(r'test_selu_default_expanded_ver18_cpu')
backend_test.exclude(r'test_selu_example_expanded_ver18_cpu')
backend_test.exclude(r'test_selu_expanded_ver18_cpu')
backend_test.exclude(r'test_thresholdedrelu_default_expanded_ver18_cpu')
backend_test.exclude(r'test_thresholdedrelu_example_expanded_ver18_cpu')
backend_test.exclude(r'test_thresholdedrelu_expanded_ver18_cpu')
backend_test.exclude(r'test_relu_expanded_ver18_cpu')
backend_test.exclude(r'test_softsign_example_expanded_ver18_cpu')
backend_test.exclude(r'test_softsign_expanded_ver18_cpu')


def disabled_tests_onnx_1_14_0(backend_test):
# The following tests fail due to the CastLike operator being unsupported
backend_test.exclude(r'test_softplus_example_expanded_ver18_cpu')
backend_test.exclude(r'test_softplus_expanded_ver18_cpu')


def create_backend_test(testname=None, target_device=None):
if target_device is not None:
c2.set_device(target_device)
Expand Down Expand Up @@ -334,6 +362,12 @@ def create_backend_test(testname=None, target_device=None):
if version.parse(onnx.__version__) >= version.parse("1.12.0"):
disabled_tests_onnx_1_12_0(backend_test)

if version.parse(onnx.__version__) >= version.parse("1.13.0"):
disabled_tests_onnx_1_13_0(backend_test)

if version.parse(onnx.__version__) >= version.parse("1.14.0"):
disabled_tests_onnx_1_14_0(backend_test)


# import all test cases at global scope to make
# them visible to python.unittest.
Expand Down
8 changes: 4 additions & 4 deletions test/ref/roialign.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ TEST_CASE(roialign_out_of_bound_test)
};

{
auto p = create_program("output_half_pixel");
auto p = create_program("half_pixel");
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<float> results_vector;
Expand Down Expand Up @@ -130,7 +130,7 @@ TEST_CASE(roialign_test)
};

{
auto p = create_program();
auto p = create_program("output_half_pixel");
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<float> results_vector;
Expand All @@ -154,7 +154,7 @@ TEST_CASE(roialign_test)
}

{
auto p = create_program("output_half_pixel");
auto p = create_program("half_pixel");
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<float> results_vector;
Expand All @@ -175,7 +175,7 @@ TEST_CASE(roialign_test)
}

{
auto p = create_program("output_half_pixel", migraphx::op::pooling_mode::max, 0);
auto p = create_program("half_pixel", migraphx::op::pooling_mode::max, 0);
p.compile(migraphx::make_target("ref"));
auto result = p.eval({}).back();
std::vector<float> results_vector;
Expand Down

0 comments on commit 2d13a8d

Please sign in to comment.