Skip to content

Commit

Permalink
Add reduce test using new test helper (PaddlePaddle#1379)
Browse files Browse the repository at this point in the history
* Add reduce test using new test helper

* Fix output shape error when numel = 1

Add cast op on paddle reduce_sum when dtype is int32

* Fix reduce result error when keepdim = True
  • Loading branch information
FisherWY authored and jiahy0825 committed May 25, 2023
1 parent ee1f818 commit 299bb50
Show file tree
Hide file tree
Showing 4 changed files with 286 additions and 2 deletions.
4 changes: 3 additions & 1 deletion cinn/frontend/net_builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include "cinn/runtime/flags.h"
#include "cinn/utils/functional.h"
#include "cinn/utils/profiler.h"
#include "glog/logging.h"

namespace cinn {
namespace frontend {
Expand Down Expand Up @@ -110,7 +111,8 @@ Variable NetBuilder::Reduce(const std::string& op_type, const Variable& x, const
if (keep_dim) {
return Identity(x);
} else {
int new_rank = dim.empty() ? 1 : x->shape.size() - dim.size() + 1;
CHECK_GE(x->shape.size(), dim.size()) << "The inputs rank should be greater than or equal to axes.";
int new_rank = x->shape.size() == dim.size() ? 1 : x->shape.size() - dim.size();
std::vector<int> new_shape(new_rank, 1);
return Reshape(x, new_shape);
}
Expand Down
8 changes: 7 additions & 1 deletion cinn/hlir/pe/ir_schedule_pe.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include "cinn/ir/ir_base.h"
#include "cinn/optim/ir_simplify.h"
#include "cinn/poly/isl_utils.h"
#include "cinn/utils/string.h"

namespace cinn {
namespace hlir {
Expand Down Expand Up @@ -462,7 +463,12 @@ void IRCudaScheduleBlockReduce(ir::IRSchedule &ir_sch,
}
}

if (tmp_out->shape.size() == 1) {
// Special handling when keepdim = True in reduce stage 1. When keepdim = True, shape size may not be equal to 1. But
// we still need to split the loops, otherwise there will be a problem of data read and write conflict.
int numel = std::accumulate(tmp_out->shape.begin(), tmp_out->shape.end(), 1, [](const int &num, const ir::Expr &e) {
return num * e.as_int32();
});
if (tmp_out->shape.size() == 1 || (numel == tmp_out->shape.back().as_int32())) {
CHECK_EQ(out->shape[0], Expr(1));

// block and root
Expand Down
189 changes: 189 additions & 0 deletions python/tests/ops/test_reduce_op_new.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,189 @@
# Copyright (c) 2023 CINN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper
import paddle
import cinn
from cinn.frontend import *
from cinn.common import *


@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
class TestReduceOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
self.prepare_inputs()

def prepare_inputs(self):
self.x_np = self.random(
shape=self.case["shape"], dtype=self.case["dtype"])

def build_paddle_program(self, target):
x = paddle.to_tensor(self.x_np, stop_gradient=True)
if self.case["op_type"] == "sum":
out = paddle.sum(
x, axis=self.case["axis"], keepdim=self.case["keepdim"])
if self.case["dtype"] == "int32":
out = out.cast(self.case["dtype"])
elif self.case["op_type"] == "prod":
out = paddle.prod(
x, axis=self.case["axis"], keepdim=self.case["keepdim"])
elif self.case["op_type"] == "max":
out = paddle.max(
x, axis=self.case["axis"], keepdim=self.case["keepdim"])
elif self.case["op_type"] == "min":
out = paddle.min(
x, axis=self.case["axis"], keepdim=self.case["keepdim"])
elif self.case["op_type"] == "all":
out = paddle.all(
x, axis=self.case["axis"], keepdim=self.case["keepdim"])
elif self.case["op_type"] == "any":
out = paddle.any(
x, axis=self.case["axis"], keepdim=self.case["keepdim"])
else:
out = paddle.assign(x)
self.paddle_outputs = [out]

def build_cinn_program(self, target):
builder = NetBuilder("reduce")
x = builder.create_input(
self.nptype2cinntype(self.case["dtype"]), self.case["shape"], "x")
if self.case["op_type"] == "sum":
out = builder.reduce_sum(x, self.case["axis"],
self.case["keepdim"])
elif self.case["op_type"] == "prod":
out = builder.reduce_prod(x, self.case["axis"],
self.case["keepdim"])
elif self.case["op_type"] == "max":
out = builder.reduce_max(x, self.case["axis"],
self.case["keepdim"])
elif self.case["op_type"] == "min":
out = builder.reduce_min(x, self.case["axis"],
self.case["keepdim"])
elif self.case["op_type"] == "all":
out = builder.reduce_all(x, self.case["axis"],
self.case["keepdim"])
elif self.case["op_type"] == "any":
out = builder.reduce_any(x, self.case["axis"],
self.case["keepdim"])
else:
out = builder.identity(x)
prog = builder.build()
res = self.get_cinn_output(prog, target, [x], [self.x_np], [out])
self.cinn_outputs = res

def test_check_results(self):
max_relative_error = self.case[
"max_relative_error"] if "max_relative_error" in self.case else 1e-5
self.check_outputs_and_grads(max_relative_error=max_relative_error)


class TestReduceAll(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestReduceOpCase"
self.cls = TestReduceOp
self.inputs = [
{
"shape": [1],
"axis": [-1],
},
{
"shape": [1024],
"axis": [0],
},
{
"shape": [512, 256],
"axis": [1],
},
{
"shape": [128, 64, 32],
"axis": [2],
},
{
"shape": [16, 8, 4, 2],
"axis": [3],
},
{
"shape": [16, 8, 4, 2, 1],
"axis": [3],
},
{
"shape": [1, 1, 1, 1, 1],
"axis": [3],
},
]
self.dtypes = [
# Paddle reduce not support
# {
# "dtype": "int16",
# },
{
"dtype": "int32",
},
{
"dtype": "int64",
},
# Paddle reduce not support
# {
# "dtype": "float16",
# },
{
"dtype": "float32",
},
{
"dtype": "float64",
},
]
self.attrs = [
{
"op_type": "sum",
"keepdim": True
},
{
"op_type": "sum",
"keepdim": False
},
{
"op_type": "prod",
"keepdim": True
},
{
"op_type": "prod",
"keepdim": False
},
{
"op_type": "max",
"keepdim": True
},
{
"op_type": "max",
"keepdim": False
},
{
"op_type": "min",
"keepdim": True
},
{
"op_type": "min",
"keepdim": False
},
]


if __name__ == "__main__":
TestReduceAll().run()
87 changes: 87 additions & 0 deletions python/tests/ops/test_reduce_op_other.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
# Copyright (c) 2023 CINN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from test_reduce_op_new import TestReduceAll


class TestReduceForBool(TestReduceAll):
def init_attrs(self):
super().init_attrs()
self.dtypes = [{"dtype": "bool"}]
self.attrs = [
{
"op_type": "all",
"keepdim": True
},
{
"op_type": "all",
"keepdim": False
},
{
"op_type": "any",
"keepdim": True
},
{
"op_type": "any",
"keepdim": False
},
]


class TestReduceAxis(TestReduceAll):
def init_attrs(self):
super().init_attrs()
self.inputs = [
{
"shape": [1, 512, 1],
"axis": [1],
},
{
"shape": [1, 1024, 1],
"axis": [1],
},
{
"shape": [1, 2048, 1],
"axis": [1],
},
{
"shape": [64, 32, 16, 8, 4],
"axis": [0, 2],
},
{
"shape": [64, 32, 16, 8, 4],
"axis": [1, 2, 3],
},
{
# No axis, all reduce
"shape": [64, 32, 16, 8, 4],
"axis": [],
},
]
self.dtypes = [{"dtype": "float32"}]
self.attrs = [
{
"op_type": "sum",
"keepdim": True,
},
{
"op_type": "sum",
"keepdim": False,
},
]


if __name__ == "__main__":
TestReduceForBool().run()
TestReduceAxis().run()

0 comments on commit 299bb50

Please sign in to comment.