From 299bb508335e0118da68e9219800098b19728d9b Mon Sep 17 00:00:00 2001 From: Fisher Date: Tue, 9 May 2023 14:43:46 +0800 Subject: [PATCH] Add reduce test using new test helper (#1379) * Add reduce test using new test helper * Fix output shape error when numel = 1 Add cast op on paddle reduce_sum when dtype is int32 * Fix reduce result error when keepdim = True --- cinn/frontend/net_builder.cc | 4 +- cinn/hlir/pe/ir_schedule_pe.cc | 8 +- python/tests/ops/test_reduce_op_new.py | 189 +++++++++++++++++++++++ python/tests/ops/test_reduce_op_other.py | 87 +++++++++++ 4 files changed, 286 insertions(+), 2 deletions(-) create mode 100644 python/tests/ops/test_reduce_op_new.py create mode 100644 python/tests/ops/test_reduce_op_other.py diff --git a/cinn/frontend/net_builder.cc b/cinn/frontend/net_builder.cc index 53563ca8f2..8b62d25a5a 100644 --- a/cinn/frontend/net_builder.cc +++ b/cinn/frontend/net_builder.cc @@ -23,6 +23,7 @@ #include "cinn/runtime/flags.h" #include "cinn/utils/functional.h" #include "cinn/utils/profiler.h" +#include "glog/logging.h" namespace cinn { namespace frontend { @@ -110,7 +111,8 @@ Variable NetBuilder::Reduce(const std::string& op_type, const Variable& x, const if (keep_dim) { return Identity(x); } else { - int new_rank = dim.empty() ? 1 : x->shape.size() - dim.size() + 1; + CHECK_GE(x->shape.size(), dim.size()) << "The inputs rank should be greater than or equal to axes."; + int new_rank = x->shape.size() == dim.size() ? 1 : x->shape.size() - dim.size(); std::vector new_shape(new_rank, 1); return Reshape(x, new_shape); } diff --git a/cinn/hlir/pe/ir_schedule_pe.cc b/cinn/hlir/pe/ir_schedule_pe.cc index 8621aa2a38..bf75ffe1b8 100644 --- a/cinn/hlir/pe/ir_schedule_pe.cc +++ b/cinn/hlir/pe/ir_schedule_pe.cc @@ -31,6 +31,7 @@ #include "cinn/ir/ir_base.h" #include "cinn/optim/ir_simplify.h" #include "cinn/poly/isl_utils.h" +#include "cinn/utils/string.h" namespace cinn { namespace hlir { @@ -462,7 +463,12 @@ void IRCudaScheduleBlockReduce(ir::IRSchedule &ir_sch, } } - if (tmp_out->shape.size() == 1) { + // Special handling when keepdim = True in reduce stage 1. When keepdim = True, shape size may not be equal to 1. But + // we still need to split the loops, otherwise there will be a problem of data read and write conflict. + int numel = std::accumulate(tmp_out->shape.begin(), tmp_out->shape.end(), 1, [](const int &num, const ir::Expr &e) { + return num * e.as_int32(); + }); + if (tmp_out->shape.size() == 1 || (numel == tmp_out->shape.back().as_int32())) { CHECK_EQ(out->shape[0], Expr(1)); // block and root diff --git a/python/tests/ops/test_reduce_op_new.py b/python/tests/ops/test_reduce_op_new.py new file mode 100644 index 0000000000..af01ce28fc --- /dev/null +++ b/python/tests/ops/test_reduce_op_new.py @@ -0,0 +1,189 @@ +# Copyright (c) 2023 CINN Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest, OpTestTool +from op_test_helper import TestCaseHelper +import paddle +import cinn +from cinn.frontend import * +from cinn.common import * + + +@OpTestTool.skip_if(not is_compiled_with_cuda(), + "x86 test will be skipped due to timeout.") +class TestReduceOp(OpTest): + def setUp(self): + print(f"\nRunning {self.__class__.__name__}: {self.case}") + self.prepare_inputs() + + def prepare_inputs(self): + self.x_np = self.random( + shape=self.case["shape"], dtype=self.case["dtype"]) + + def build_paddle_program(self, target): + x = paddle.to_tensor(self.x_np, stop_gradient=True) + if self.case["op_type"] == "sum": + out = paddle.sum( + x, axis=self.case["axis"], keepdim=self.case["keepdim"]) + if self.case["dtype"] == "int32": + out = out.cast(self.case["dtype"]) + elif self.case["op_type"] == "prod": + out = paddle.prod( + x, axis=self.case["axis"], keepdim=self.case["keepdim"]) + elif self.case["op_type"] == "max": + out = paddle.max( + x, axis=self.case["axis"], keepdim=self.case["keepdim"]) + elif self.case["op_type"] == "min": + out = paddle.min( + x, axis=self.case["axis"], keepdim=self.case["keepdim"]) + elif self.case["op_type"] == "all": + out = paddle.all( + x, axis=self.case["axis"], keepdim=self.case["keepdim"]) + elif self.case["op_type"] == "any": + out = paddle.any( + x, axis=self.case["axis"], keepdim=self.case["keepdim"]) + else: + out = paddle.assign(x) + self.paddle_outputs = [out] + + def build_cinn_program(self, target): + builder = NetBuilder("reduce") + x = builder.create_input( + self.nptype2cinntype(self.case["dtype"]), self.case["shape"], "x") + if self.case["op_type"] == "sum": + out = builder.reduce_sum(x, self.case["axis"], + self.case["keepdim"]) + elif self.case["op_type"] == "prod": + out = builder.reduce_prod(x, self.case["axis"], + self.case["keepdim"]) + elif self.case["op_type"] == "max": + out = builder.reduce_max(x, self.case["axis"], + self.case["keepdim"]) + elif self.case["op_type"] == "min": + out = builder.reduce_min(x, self.case["axis"], + self.case["keepdim"]) + elif self.case["op_type"] == "all": + out = builder.reduce_all(x, self.case["axis"], + self.case["keepdim"]) + elif self.case["op_type"] == "any": + out = builder.reduce_any(x, self.case["axis"], + self.case["keepdim"]) + else: + out = builder.identity(x) + prog = builder.build() + res = self.get_cinn_output(prog, target, [x], [self.x_np], [out]) + self.cinn_outputs = res + + def test_check_results(self): + max_relative_error = self.case[ + "max_relative_error"] if "max_relative_error" in self.case else 1e-5 + self.check_outputs_and_grads(max_relative_error=max_relative_error) + + +class TestReduceAll(TestCaseHelper): + def init_attrs(self): + self.class_name = "TestReduceOpCase" + self.cls = TestReduceOp + self.inputs = [ + { + "shape": [1], + "axis": [-1], + }, + { + "shape": [1024], + "axis": [0], + }, + { + "shape": [512, 256], + "axis": [1], + }, + { + "shape": [128, 64, 32], + "axis": [2], + }, + { + "shape": [16, 8, 4, 2], + "axis": [3], + }, + { + "shape": [16, 8, 4, 2, 1], + "axis": [3], + }, + { + "shape": [1, 1, 1, 1, 1], + "axis": [3], + }, + ] + self.dtypes = [ + # Paddle reduce not support + # { + # "dtype": "int16", + # }, + { + "dtype": "int32", + }, + { + "dtype": "int64", + }, + # Paddle reduce not support + # { + # "dtype": "float16", + # }, + { + "dtype": "float32", + }, + { + "dtype": "float64", + }, + ] + self.attrs = [ + { + "op_type": "sum", + "keepdim": True + }, + { + "op_type": "sum", + "keepdim": False + }, + { + "op_type": "prod", + "keepdim": True + }, + { + "op_type": "prod", + "keepdim": False + }, + { + "op_type": "max", + "keepdim": True + }, + { + "op_type": "max", + "keepdim": False + }, + { + "op_type": "min", + "keepdim": True + }, + { + "op_type": "min", + "keepdim": False + }, + ] + + +if __name__ == "__main__": + TestReduceAll().run() diff --git a/python/tests/ops/test_reduce_op_other.py b/python/tests/ops/test_reduce_op_other.py new file mode 100644 index 0000000000..1f54dd3b20 --- /dev/null +++ b/python/tests/ops/test_reduce_op_other.py @@ -0,0 +1,87 @@ +# Copyright (c) 2023 CINN Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from test_reduce_op_new import TestReduceAll + + +class TestReduceForBool(TestReduceAll): + def init_attrs(self): + super().init_attrs() + self.dtypes = [{"dtype": "bool"}] + self.attrs = [ + { + "op_type": "all", + "keepdim": True + }, + { + "op_type": "all", + "keepdim": False + }, + { + "op_type": "any", + "keepdim": True + }, + { + "op_type": "any", + "keepdim": False + }, + ] + + +class TestReduceAxis(TestReduceAll): + def init_attrs(self): + super().init_attrs() + self.inputs = [ + { + "shape": [1, 512, 1], + "axis": [1], + }, + { + "shape": [1, 1024, 1], + "axis": [1], + }, + { + "shape": [1, 2048, 1], + "axis": [1], + }, + { + "shape": [64, 32, 16, 8, 4], + "axis": [0, 2], + }, + { + "shape": [64, 32, 16, 8, 4], + "axis": [1, 2, 3], + }, + { + # No axis, all reduce + "shape": [64, 32, 16, 8, 4], + "axis": [], + }, + ] + self.dtypes = [{"dtype": "float32"}] + self.attrs = [ + { + "op_type": "sum", + "keepdim": True, + }, + { + "op_type": "sum", + "keepdim": False, + }, + ] + + +if __name__ == "__main__": + TestReduceForBool().run() + TestReduceAxis().run()