From 7b8a28f507628cb6f85ffb028a65f4dbbe235d7b Mon Sep 17 00:00:00 2001 From: Brian Pickrell <95253842+bpickrel@users.noreply.github.com> Date: Tue, 29 Aug 2023 11:25:35 -0700 Subject: [PATCH 1/2] Fix dyn pooling (#1768) Adds support for dynamic input shape in pooling operator along with auto-padding. This combination requires that the padding (and therefore the output shape) can't be computed until runtime. --- src/include/migraphx/op/common.hpp | 8 +- src/include/migraphx/op/convolution.hpp | 4 +- src/include/migraphx/op/pooling.hpp | 124 ++++++++++++++---- src/include/migraphx/pad_calc.hpp | 10 +- src/normalize_attributes.cpp | 35 +++-- src/onnx/parse_pooling.cpp | 55 ++++---- src/pad_calc.cpp | 41 +++++- .../averagepool_dyn_autopad_error_test.onnx | Bin 216 -> 0 bytes test/onnx/averagepool_dyn_autopad_test.onnx | Bin 0 -> 224 bytes test/onnx/averagepool_dyn_test.onnx | Bin 161 -> 204 bytes test/onnx/gen_onnx.py | 26 ++-- test/onnx/onnx_test.cpp | 60 ++++++--- test/op_shape_test.cpp | 7 + test/ref_ops_test.cpp | 98 ++++++++++++-- 14 files changed, 363 insertions(+), 105 deletions(-) delete mode 100644 test/onnx/averagepool_dyn_autopad_error_test.onnx create mode 100644 test/onnx/averagepool_dyn_autopad_test.onnx diff --git a/src/include/migraphx/op/common.hpp b/src/include/migraphx/op/common.hpp index cb28b41ff24..e6b85f19e23 100644 --- a/src/include/migraphx/op/common.hpp +++ b/src/include/migraphx/op/common.hpp @@ -1,7 +1,7 @@ /* * The MIT License (MIT) * - * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. + * Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -33,8 +33,12 @@ namespace migraphx { inline namespace MIGRAPHX_INLINE_NS { namespace op { +// Specifies where to add the "extra" cell of padding if the +// calculated padding is an odd number. // Padding mode is default_ for fixed shape padding. -// same_lower and same_upper used for dynamic padding. +// same_lower and same_upper specify dynamic padding. +// The odd cell goes at the beginning of the dimension +// (same_lower) or end (same_upper). enum padding_mode_t { default_, // NOLINT diff --git a/src/include/migraphx/op/convolution.hpp b/src/include/migraphx/op/convolution.hpp index daa7d055169..ce2f157eabd 100644 --- a/src/include/migraphx/op/convolution.hpp +++ b/src/include/migraphx/op/convolution.hpp @@ -1,7 +1,7 @@ /* * The MIT License (MIT) * - * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. + * Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -206,6 +206,7 @@ struct convolution std::vector new_padding; if(padding_mode != op::padding_mode_t::default_) { + // auto-Calculate the padding sizes with calc_dyn_auto_pad auto input_lens = args[0].get_shape().lens(); auto weights_lens = args[1].get_shape().lens(); new_padding = @@ -217,6 +218,7 @@ struct convolution } else { + // Use the padding that was given new_padding = padding; if(output_shape.dynamic()) { diff --git a/src/include/migraphx/op/pooling.hpp b/src/include/migraphx/op/pooling.hpp index 3d31e5d9181..684d539e32b 100644 --- a/src/include/migraphx/op/pooling.hpp +++ b/src/include/migraphx/op/pooling.hpp @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -40,10 +41,20 @@ namespace migraphx { inline namespace MIGRAPHX_INLINE_NS { namespace op { +// The Pooling operator mostly follows the specifications for the Onnx pooling op. +// It assumes an NCHW layout, extended to support any number of spatial dimensions +// from 1 on up; dimensions are +// struct pooling { + // Class members mode, ceil_mode, padding_mode have similar names but refer to separate + // concepts. pooling_mode mode = {pooling_mode::average}; + // If the input has rank other than 4 then padding, stride, lengths must all be specified + // since the defaults have 2-dimensions. Exception: padding not required if + // padding_mode != default_ + // Padding along each spatial input dimension // Can be ndim or 2*ndim values where ndim is size of lengths // ndim values means pad the same before and after each dimension @@ -63,13 +74,14 @@ struct pooling // ceiling mode is a flag affecting output size // or equivalently, placements of the pooling kernel. - // When true, round the size upwards, possibly - // including partial placements where the kernel extends beyond the edge - // of input and even padding. When false, round down so that all + // When true, round the size upwards. When false, round down so that all // kernel placements fit but some input values may be dropped. bool ceil_mode = false; int lp_order = 2; + // Mode for auto padding. default_ indicates no auto padding. + padding_mode_t padding_mode = padding_mode_t::default_; + // Global pooling with dynamic shape input bool dyn_global = false; @@ -84,6 +96,7 @@ struct pooling { return pack(f(self.mode, "mode"), f(self.padding, "padding"), + f(self.padding_mode, "padding_mode"), f(self.stride, "stride"), f(self.lengths, "lengths"), f(self.ceil_mode, "ceil_mode"), @@ -97,7 +110,8 @@ struct pooling { if(dyn_global) return; - if((padding.size() != stride.size() and (padding.size()) != stride.size() * 2) or + if((padding_mode != default_ and padding.size() != stride.size() and + (padding.size()) != stride.size() * 2) or stride.size() != lengths.size()) { MIGRAPHX_THROW("POOLING: inconsistent attribute sizes"); @@ -137,8 +151,19 @@ struct pooling std::size_t padding_factor = 2 * padding[i]; if(padding.size() == 2 * kdims) padding_factor = padding[i] + padding[i + kdims]; - assert(input_lens[i + 2] + padding_factor >= lengths[i]); - std::size_t dim_size = input_lens[i + 2] + padding_factor - lengths[i]; + std::size_t dim_size; + if(input_lens[i + 2] + padding_factor < lengths[i]) + { + if(padding_mode == default_) + MIGRAPHX_THROW("POOLING: not enough padding for the given kernel size"); + // lengths can be legitimately larger only if we're doing auto padding + // with a dynamic shape, in which case given padding is ignored. Set a dummy value. + dim_size = 2; + } + else + { + dim_size = input_lens[i + 2] + padding_factor - lengths[i]; + } std::size_t len = (ceil_mode) ? dim_size / stride[i] + @@ -151,17 +176,13 @@ struct pooling shape normalize_compute_shape(std::vector inputs) const { - check_shapes{inputs, *this, true}.has(1); + check_shapes{inputs, *this, true}.has(1).min_ndims(3); check_attribute_size(); const shape& input = inputs.at(0); - auto padding_size = padding.size(); + auto stride_size = stride.size(); size_t kdims = input.ndim() - 2; - if(input.ndim() < 3) - { - MIGRAPHX_THROW("POOLING: input must have 3 or more dimensions and be nonempty"); - } - if(input.ndim() * 2 != padding_size + 4 and input.ndim() != padding_size + 2) + if(input.ndim() != stride_size + 2) { MIGRAPHX_THROW("POOLING: input and attribute size mismatch!"); } @@ -179,6 +200,28 @@ struct pooling } return {input.type(), output_dyn_dims}; } + else if(padding_mode != default_) + { + const size_t num_spatial_dims = inputs[0].ndim() - 2; + const shape& x_shape = inputs[0]; + // same as convolution::dynamic_compute_shape() + + for(std::size_t i = 0; i < num_spatial_dims; ++i) + { + auto ceil_div = [](std::size_t x, std::size_t y) { return (x + y - 1) / y; }; + auto s = stride[i]; + + auto x = x_shape.dyn_dims()[i + 2]; + std::set optimals{}; + std::transform(x.optimals.begin(), + x.optimals.end(), + std::inserter(optimals, optimals.begin()), + [&](auto o) { return ceil_div(o, s); }); + output_dyn_dims.push_back( + shape::dynamic_dimension{ceil_div(x.min, s), ceil_div(x.max, s), optimals}); + } + return {input.type(), output_dyn_dims}; + } else { // does not compute optimals @@ -267,6 +310,7 @@ struct pooling Out& output, const In& input, const std::vector& kernel_dims, + const std::vector& padding_vals, Op op) const { auto in_s = input.get_shape(); @@ -283,9 +327,9 @@ struct pooling // For each spatial dimension, find starting and ending index of pooling kernel for(std::size_t dim = 2; dim < n_dim; ++dim) { - auto d_2 = dim - 2; - int start = - static_cast(idx_o[dim] * stride[d_2]) - static_cast(padding[d_2]); + auto d_2 = dim - 2; + int start = static_cast(idx_o[dim] * stride[d_2]) - + static_cast(padding_vals[d_2]); int end; // NOLINT if(count_include_pad and ceil_mode and (mode != pooling_mode::max)) @@ -297,7 +341,7 @@ struct pooling // Check if this kernel extends beyond the padding at end of dimension end = std::min(start + kernel_dims[d_2], - in_lens[dim] + static_cast(padding[d_2])); + in_lens[dim] + static_cast(padding_vals[d_2])); } else { @@ -316,6 +360,7 @@ struct pooling } shape win_shape{output_shape.type(), win_size}; + auto pool_size = win_shape.elements(); double output_val = op.template init(); @@ -354,30 +399,65 @@ struct pooling argument compute(const dyn_output& dyn_out, std::vector args) const { - argument result{dyn_out.computed_shape}; + argument result; auto input_lens = args[0].get_shape().lens(); std::vector kernel_dims; + shape output_shape; + // If we have to auto-calculate padding, it will be passed to calc_pooling() as an argument + // instead of the member variable padding. + std::vector temp_padding(padding); if(dyn_global) { + // for dynamic GlobalPooling, there's no padding kernel_dims.insert(kernel_dims.end(), input_lens.begin() + 2, input_lens.end()); + output_shape = dyn_out.computed_shape; + result = dyn_out.computed_shape; } - else + else if((padding_mode != op::padding_mode_t::default_)) { + // if padding_mode is set, input was a dynamic size. Calculate padded size now. + + // kernel_lens is the same as kernel_dims, but prepended with the 2 non- + // spatial dimensions. For size computations, it's used like the weights + // tensor for convolutions. + std::vector kernel_lens; + kernel_lens.insert(kernel_lens.end(), input_lens.begin(), input_lens.begin() + 2); + kernel_lens.insert(kernel_lens.end(), lengths.begin(), lengths.end()); kernel_dims = this->lengths; + + auto type = args[0].get_shape().type(); + // dilation not currently supported for pooling, so default to all 1's + temp_padding = calc_dyn_auto_pad( + input_lens, kernel_lens, stride, {1, 1}, bool(padding_mode == op::same_upper)); + + output_shape = compute_padded_pool_shape( + args[0].get_shape(), shape(type, kernel_dims), temp_padding, stride, {1, 1}); + + result = argument(output_shape); + } + else // fixed/static input + { + kernel_dims = this->lengths; + output_shape = dyn_out.computed_shape; + result = dyn_out.computed_shape; } + + // Perform the computation and populate result visit_all(result, args[0])([&](auto output, auto input) { using type = typename decltype(output)::value_type; switch(mode) { case migraphx::op::pooling_mode::average: - calc_pooling(dyn_out.computed_shape, output, input, kernel_dims, avg_pool{}); + calc_pooling( + output_shape, output, input, kernel_dims, temp_padding, avg_pool{}); break; case migraphx::op::pooling_mode::max: - calc_pooling(dyn_out.computed_shape, output, input, kernel_dims, max_pool{}); + calc_pooling( + output_shape, output, input, kernel_dims, temp_padding, max_pool{}); break; case migraphx::op::pooling_mode::lpnorm: calc_pooling( - dyn_out.computed_shape, output, input, kernel_dims, lpnorm_pool{lp_order}); + output_shape, output, input, kernel_dims, temp_padding, lpnorm_pool{lp_order}); break; } }); diff --git a/src/include/migraphx/pad_calc.hpp b/src/include/migraphx/pad_calc.hpp index 06c209f6073..a17c0bc3028 100644 --- a/src/include/migraphx/pad_calc.hpp +++ b/src/include/migraphx/pad_calc.hpp @@ -1,7 +1,7 @@ /* * The MIT License (MIT) * - * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. + * Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -62,6 +62,14 @@ shape compute_padded_shape(const shape& input, const std::vector& stride, const std::vector& dilation); +// Used for dynamic auto padding of pooling operators where padding needs to be computed at +// evaulation time. +shape compute_padded_pool_shape(const shape& input, + const shape& kernel, + const std::vector& padding, + const std::vector& stride, + const std::vector& dilation); + } // namespace MIGRAPHX_INLINE_NS } // namespace migraphx diff --git a/src/normalize_attributes.cpp b/src/normalize_attributes.cpp index cea38402b81..6402d4ee2ec 100644 --- a/src/normalize_attributes.cpp +++ b/src/normalize_attributes.cpp @@ -26,7 +26,7 @@ #include #include #include - +#include namespace migraphx { inline namespace MIGRAPHX_INLINE_NS { @@ -192,20 +192,27 @@ bool normalize_attributes(operation& op, const shape& input_shape) auto val = op.to_value(); if(attrs.contains("normalize_padding")) { - auto padding = val.at(attrs.at("normalize_padding").to()); - auto padding_size = padding.size(); - auto padding_start = 2; - - if(padding_size == 2 * (input_shape.ndim() - padding_start)) - tuned = true; - else if(padding_size != (input_shape.ndim() - padding_start)) - MIGRAPHX_THROW("inconsistent padding size"); - else + bool use_auto_padding = + (val.contains("padding_mode") and + (val.at("padding_mode").to() != migraphx::op::padding_mode_t::default_)); + if(not use_auto_padding) { - auto result = tune_pad_attribute(padding); - val["padding"] = result; - op.from_value(val); - tuned = true; + auto padding = val.at(attrs.at("normalize_padding").to()); + auto padding_size = padding.size(); + auto padding_start = 2; + if(padding_size == 2 * (input_shape.ndim() - padding_start)) + tuned = true; + else if(padding_size != (input_shape.ndim() - padding_start)) + { + MIGRAPHX_THROW("normalize_attributes: inconsistent padding vector size "); + } + else + { + auto result = tune_pad_attribute(padding); + val["padding"] = result; + op.from_value(val); + tuned = true; + } } } if(not attrs.contains("normalize_axes")) diff --git a/src/onnx/parse_pooling.cpp b/src/onnx/parse_pooling.cpp index 556d3297061..4a9cb35c875 100644 --- a/src/onnx/parse_pooling.cpp +++ b/src/onnx/parse_pooling.cpp @@ -1,7 +1,7 @@ /* * The MIT License (MIT) * - * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. + * Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -151,26 +151,6 @@ struct parse_pooling : op_parser kdims, paddings.size() / 2, "PARSE_POOLING: inconsistent explicit paddings"); } - if(contains(info.attributes, "auto_pad")) - { - if(in_shape.dynamic()) - { - MIGRAPHX_THROW( - "PARSE_POOLING: Auto padding pooling with dynamic input shape not supported"); - } - else - { - values["padding"].clear(); - // return paddings could be empty, then setting to 0 for no padding - cal_auto_padding_size(info, - values, - values["lengths"].to_vector(), - {1, 1}, - in_shape.lens(), - paddings); - } - } - if(paddings.size() != 2 * kdims) { paddings.resize(kdims * 2); @@ -192,6 +172,36 @@ struct parse_pooling : op_parser // used to calculate the supposed output shape std::vector orig_padding = paddings; + // TODO: add parsing for dilations + if(contains(info.attributes, "auto_pad") and + to_upper(info.attributes["auto_pad"].s()) != "NOTSET") + { + auto auto_pad = to_upper(info.attributes["auto_pad"].s()); + // don't use the given padding sizes, if any + // values["padding"].clear(); + if(in_shape.dynamic()) + { + // set padding_mode to trigger auto padding at runtime + bool is_same_upper = (auto_pad.find("SAME_UPPER") != std::string::npos); + values["padding_mode"] = is_same_upper ? to_value(op::padding_mode_t::same_upper) + : to_value(op::padding_mode_t::same_lower); + } + else + { + // Calculate auto padding + // dilations (argument 4) not supported; default to all 1's + cal_auto_padding_size(info, + values, + values["lengths"].to_vector(), + std::vector(in_shape.ndim() - 2, 1), + in_shape.lens(), + paddings); + values["padding"] = paddings; + // default padding_mode indicates that padding sizes are not calculated dynamically + values["padding_mode"] = migraphx::op::padding_mode_t::default_; + } + } + std::vector slice_start; std::vector slice_end; tune_padding_size(values, paddings, count_include_pad, slice_start); @@ -208,8 +218,9 @@ struct parse_pooling : op_parser orig_padding.insert(orig_padding.begin(), 2, 0); op::pad pad{orig_padding, 0.0f}; shape padded_shape = pad.compute_shape({l0->get_shape()}); - auto out_lens = make_op("pooling", values).compute_shape({padded_shape}).lens(); + // make an op just to get its output shape + auto out_lens = make_op("pooling", values).compute_shape({padded_shape}).lens(); // compute slice_end information slice_end.resize(slice_start.size()); std::transform(out_lens.begin() + 2, diff --git a/src/pad_calc.cpp b/src/pad_calc.cpp index 5662dfb4000..3fe9603aa45 100644 --- a/src/pad_calc.cpp +++ b/src/pad_calc.cpp @@ -1,7 +1,7 @@ /* * The MIT License (MIT) * - * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. + * Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -52,6 +52,11 @@ void calculate_padding(int64_t idx, } } +/** + * Given the input array dimensions; kernel (wei_lens); strides; and dilations, + * calculate the padding value in each dimension. + * + */ std::vector calc_dyn_auto_pad(const std::vector& input_lens, const std::vector& wei_lens, const std::vector& strides, @@ -60,6 +65,7 @@ std::vector calc_dyn_auto_pad(const std::vector& input { std::vector padding; assert(input_lens.size() >= 3); + assert(input_lens.size() == wei_lens.size()); std::size_t num_spatial_dims = input_lens.size() - 2; padding.resize(2 * num_spatial_dims); for(std::size_t i = 0; i < num_spatial_dims; i++) @@ -88,6 +94,11 @@ std::vector calc_dyn_auto_pad(const std::vector& input return padding; } +/** + * Calculate the correct output shape for a convolution with + * a given input size and other parameters. + * + */ shape compute_padded_shape(const shape& input, const shape& weights, const std::vector& padding, @@ -111,5 +122,33 @@ shape compute_padded_shape(const shape& input, return input.with_lens(output_lens); } +/** + * Calculate the correct output shape for a pooling with + * a given input size and other parameters. This uses + * the same formula for pooling that compute_padded_shape() uses + * for convolutions, but takes slightly different inputs. + * + */ +shape compute_padded_pool_shape(const shape& input, + const shape& kernel, + const std::vector& padding, + const std::vector& stride, + const std::vector& dilation) +{ + const size_t num_spatial_dims = input.lens().size() - 2; + + std::vector output_lens{input.lens()[0], input.lens()[1]}; + // calculate the output shape of the pooling: ((W - K + 2P) / S) + 1 + for(size_t i = 0; i < num_spatial_dims; ++i) + { + auto padding_factor = padding[i] + padding[i + num_spatial_dims]; + output_lens.push_back(std::size_t(std::max( + 1, + (input.lens()[i + 2] - (1 + dilation[i] * (kernel.lens()[i] - 1)) + padding_factor) / + stride[i] + + 1))); + } + return input.with_lens(output_lens); +} } // namespace MIGRAPHX_INLINE_NS } // namespace migraphx diff --git a/test/onnx/averagepool_dyn_autopad_error_test.onnx b/test/onnx/averagepool_dyn_autopad_error_test.onnx deleted file mode 100644 index 524c08963998b6c83878140961a39590c3ed3aae..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 216 zcmd;J7g9k|D#|a4FG(#fv0BZ@<<7-uAjD{> z#O(-C9spFXCCSABQXLOeuEZ7W=<6Eq;~(xCw1AOWOPq@*JGCe;H7C9}Be5XWff)oB rFcM{7lq|?H2`(`XMj;_C1}-KJW*}yT;Uu86A%-+a2^c#u3Ge~{b2~KO diff --git a/test/onnx/averagepool_dyn_autopad_test.onnx b/test/onnx/averagepool_dyn_autopad_test.onnx new file mode 100644 index 0000000000000000000000000000000000000000..248ae610114641114941ade5de89b560fc889749 GIT binary patch literal 224 zcmdP2vHaS zRH!A%#Q{add_parameter( "0", {migraphx::shape::float_type, {{1, 4}, {3, 3}, {5, 5}, {5, 5}, {5, 5}}}); - auto ret = mm->add_instruction(migraphx::make_op("pooling", - {{"mode", migraphx::op::pooling_mode::average}, - {"padding", {0, 0, 0, 0, 0, 0}}, - {"stride", {1, 1, 1}}, - {"lengths", {3, 3, 3}}}), - l0); + auto ret = + mm->add_instruction(migraphx::make_op("pooling", + { + {"mode", migraphx::op::pooling_mode::average}, + {"stride", {2, 2, 2}}, + {"lengths", {3, 3, 3}}, + {"padding", {1, 1, 1, 1, 1, 1}}, + {"padding_mode", 0}, + }), + l0); mm->add_return({ret}); migraphx::onnx_options options; @@ -310,12 +315,29 @@ TEST_CASE(averagepool_dyn_test) EXPECT(p == prog); } -TEST_CASE(averagepool_dyn_autopad_error_test) +TEST_CASE(averagepool_dyn_autopad_test) { + // Pooling with dynamic input and auto padding. Default padding values will be overridden. + migraphx::program p; + auto* mm = p.get_main_module(); + auto l0 = mm->add_parameter( + "0", {migraphx::shape::float_type, {{1, 4}, {3, 3}, {5, 5}, {5, 5}, {5, 5}}}); + auto ret = mm->add_instruction( + migraphx::make_op("pooling", + { + {"mode", migraphx::op::pooling_mode::average}, + {"stride", {2, 2, 2}}, + {"lengths", {3, 3, 3}}, + {"padding", {0, 0, 0, 0, 0, 0}}, + {"padding_mode", migraphx::op::padding_mode_t::same_upper}, + }), + l0); + mm->add_return({ret}); + migraphx::onnx_options options; options.default_dyn_dim_value = {1, 4}; - EXPECT(test::throws( - [&] { migraphx::parse_onnx("averagepool_dyn_autopad_error_test.onnx", options); })); + auto prog = migraphx::parse_onnx("averagepool_dyn_autopad_test.onnx", options); + EXPECT(p == prog); } TEST_CASE(averagepool_dyn_asym_padding_error_test) @@ -374,16 +396,22 @@ TEST_CASE(averagepool_nt_cip_test) TEST_CASE(averagepool_same_lower_test) { + // auto_pad mode of SAME_LOWER with a static input shape is handled in parsing and + // padding_mode is set to default_ when the operation is created migraphx::program p; auto* mm = p.get_main_module(); auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}}); - auto ins = mm->add_instruction(migraphx::make_op("pooling", - {{"mode", migraphx::op::pooling_mode::average}, - {"padding", {1, 1, 1, 1}}, - {"stride", {1, 1}}, - {"lengths", {2, 2}}}), - input); - auto ret = mm->add_instruction( + auto ins = mm->add_instruction( + migraphx::make_op("pooling", + { + {"mode", migraphx::op::pooling_mode::average}, + {"padding", {1, 1, 1, 1}}, + {"stride", {1, 1}}, + {"lengths", {2, 2}}, + {"padding_mode", migraphx::op::padding_mode_t::default_}, + }), + input); + auto ret = mm->add_instruction( migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {0, 0}}, {"ends", {5, 5}}}), ins); mm->add_return({ret}); auto prog = migraphx::parse_onnx("averagepool_same_lower_test.onnx"); diff --git a/test/op_shape_test.cpp b/test/op_shape_test.cpp index cdee089a287..0457f1bb3b8 100644 --- a/test/op_shape_test.cpp +++ b/test/op_shape_test.cpp @@ -2116,6 +2116,13 @@ TEST_CASE(pooling_shape3) input); } +TEST_CASE(pooling_shape4) +{ + migraphx::shape tiny_input{migraphx::shape::float_type, {4, 1}}; + throws_shape(migraphx::make_op("pooling", {{"mode", migraphx::op::pooling_mode::max}}), + tiny_input); +} + TEST_CASE(pooling_dyn_shape0) { migraphx::shape input{migraphx::shape::float_type, {{1, 4}, {3, 3, {3}}, {3, 3, {3}}, {3, 3}}}; diff --git a/test/ref_ops_test.cpp b/test/ref_ops_test.cpp index 24314be30ac..f1e67fc7d5d 100644 --- a/test/ref_ops_test.cpp +++ b/test/ref_ops_test.cpp @@ -613,6 +613,7 @@ TEST_CASE(avgpool_rank3_test) TEST_CASE(avgpool_dyn_test) { + // Dynamic input, no padding migraphx::program p; auto* mm = p.get_main_module(); auto s = migraphx::shape{migraphx::shape::float_type, {{1, 4}, {3, 3}, {4, 4}}}; @@ -638,34 +639,99 @@ TEST_CASE(avgpool_dyn_test) TEST_CASE(avgpool_dyn_pad_test) { - // pooling with dynamic input and padding, ceiling mode for output size + // Dynamic input with explicit padding migraphx::program p; auto* mm = p.get_main_module(); - auto s = migraphx::shape{migraphx::shape::float_type, {{1, 4}, {1, 3}, {2, 4}, {2, 4}}}; + auto s = migraphx::shape{migraphx::shape::float_type, {{1, 3}, {3, 3}, {4, 4}}}; auto x = mm->add_parameter("X", s); mm->add_instruction(migraphx::make_op("pooling", {{"mode", migraphx::op::pooling_mode::average}, - {"lengths", {2, 2}}, - {"padding", {1, 0}}, - {"ceil_mode", true}, - {"stride", {2, 2}}}), + {"lengths", {2}}, + {"padding", {1}}, + {"stride", {1}}}), x); p.compile(migraphx::make_target("ref")); - std::vector data{1, 2, 3, 4, 5, 6}; + std::vector data{0.3, 0.2, 0.4, 0.1, 0.8, 0.5, 0.9, 0.1, 0.1, 0.7, 0.1, 0.6}; + migraphx::shape input_fixed_shape{migraphx::shape::float_type, {1, 3, 4}}; + migraphx::parameter_map params; + params["X"] = migraphx::argument(input_fixed_shape, data.data()); + auto result = p.eval(params).back(); + std::vector results_vector; + result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); + + std::vector gold{ + 0.3, 0.25, 0.3, 0.25, 0.1, 0.8, 0.65, 0.7, 0.5, 0.1, 0.1, 0.4, 0.4, 0.35, 0.6}; + EXPECT(migraphx::verify::verify_range(results_vector, gold)); +} + +TEST_CASE(avgpool_dyn_auto_pad_test) +{ + // Pooling with dynamic input, multidimensional kernel and auto-padding + migraphx::program p; + auto* mm = p.get_main_module(); + auto s = + migraphx::shape{migraphx::shape::float_type, {{1, 1}, {1, 3}, {2, 6, {2}}, {2, 6, {2}}}}; + auto x = mm->add_parameter("X", s); + mm->add_instruction( + migraphx::make_op("pooling", + { + {"mode", migraphx::op::pooling_mode::average}, + {"dyn_global", false}, + // non-default auto padding + {"padding_mode", migraphx::op::padding_mode_t::same_upper}, + {"lengths", {2, 3}}, + }), + x); + p.compile(migraphx::make_target("ref")); + + std::vector data{1, 2, 3, 4}; - // * * * - // 1 2 3 padding will look like this - // 4 5 6 The * are used when tiling the kernel - // * * * but are ignored in averaging + // * 1 2 * auto padding should look like this + // * 3 4 * + // * * * * - migraphx::shape input_fixed_shape{migraphx::shape::float_type, {1, 1, 2, 3}}; + migraphx::shape input_fixed_shape{migraphx::shape::float_type, {1, 1, 2, 2}}; migraphx::parameter_map params; params["X"] = migraphx::argument(input_fixed_shape, data.data()); auto result = p.eval(params).back(); std::vector results_vector(12); result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); - std::vector gold{1.5, 3.0, 4.5, 6.0}; + std::vector gold{2.5, 2.5, 3.5, 3.5}; + EXPECT(migraphx::verify::verify_range(results_vector, gold)); +} + +TEST_CASE(avgpool_dyn_auto_pad_1d_test) +{ + // Dynamic input with auto padding (== padding_mode specified) + migraphx::program p; + auto* mm = p.get_main_module(); + auto s = migraphx::shape{migraphx::shape::float_type, {{1, 3}, {3, 3}, {4, 4}}}; + auto x = mm->add_parameter("X", s); + mm->add_instruction( + migraphx::make_op("pooling", + {{"mode", migraphx::op::pooling_mode::average}, + {"lengths", {2}}, + // padding added will be {1, 0} to make output + // the same size as input + {"padding_mode", migraphx::op::padding_mode_t::same_lower}, + {"stride", {1}}}), + x); + p.compile(migraphx::make_target("ref")); + + std::vector data{0.3, 0.2, 0.4, 0.1, 0.8, 0.5, 0.9, 0.1, 0.1, 0.7, 0.1, 0.6}; + migraphx::shape input_fixed_shape{migraphx::shape::float_type, {1, 3, 4}}; + migraphx::parameter_map params; + params["X"] = migraphx::argument(input_fixed_shape, data.data()); + auto result = p.eval(params).back(); + std::vector results_vector; + result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); + + // clang-format off + std::vector gold{0.3, 0.25, 0.3, 0.25, + 0.8, 0.65, 0.7, 0.5, + 0.1, 0.4, 0.4, 0.35}; + // clang-format on EXPECT(migraphx::verify::verify_range(results_vector, gold)); } @@ -1157,7 +1223,11 @@ TEST_CASE(conv_dyn_batch_test) auto input = mm->add_parameter("X", input_dyn_shape); auto weights = mm->add_parameter("W", weights_shape); - mm->add_instruction(migraphx::make_op("convolution", {{"padding", {1, 1}}, {"stride", {2, 2}}}), + mm->add_instruction(migraphx::make_op("convolution", + { + {"padding", {1, 1}}, + {"stride", {2, 2}}, + }), input, weights); From d2486dcd0fca1bde4673496d2ee0d1c102f0b15f Mon Sep 17 00:00:00 2001 From: Paul Fultz II Date: Tue, 29 Aug 2023 19:17:43 -0500 Subject: [PATCH 2/2] Check that migraphx_gpu is not using device compilation in migraphx_gpu (#2050) --- src/targets/gpu/CMakeLists.txt | 1 + src/targets/gpu/no_device.cpp | 28 ++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 src/targets/gpu/no_device.cpp diff --git a/src/targets/gpu/CMakeLists.txt b/src/targets/gpu/CMakeLists.txt index 32265887a98..0492f0680b2 100644 --- a/src/targets/gpu/CMakeLists.txt +++ b/src/targets/gpu/CMakeLists.txt @@ -123,6 +123,7 @@ add_library(migraphx_gpu lrn.cpp mlir.cpp multinomial.cpp + no_device.cpp nonzero.cpp pack_args.cpp pack_int8_args.cpp diff --git a/src/targets/gpu/no_device.cpp b/src/targets/gpu/no_device.cpp new file mode 100644 index 00000000000..a02d5254cb2 --- /dev/null +++ b/src/targets/gpu/no_device.cpp @@ -0,0 +1,28 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifdef __HIP_DEVICE_COMPILE__ +#error \ + "Device compilation not allowed for migraphx_gpu. Do not link with hip::device. Device code should go into migraphx_device or migraphx_kernels" +#endif