diff --git a/cinn/frontend/net_builder.cc b/cinn/frontend/net_builder.cc index 4919a15b06..0d04897d1d 100644 --- a/cinn/frontend/net_builder.cc +++ b/cinn/frontend/net_builder.cc @@ -816,11 +816,7 @@ Variable NetBuilder::Arange(const float start, const float stop, const float ste } Variable NetBuilder::Flip(const Variable& operand, const std::vector& axes) { - Instruction instr("flip", {operand}); - instr.SetAttr("axes", axes); - InferShape(instr); - AppendInstruction(instr); - return instr.GetOutput(0); + return CustomInstr("reverse", {operand}, {{"axis", utils::GetPositiveAxes(axes, operand->shape.size())}}).front(); } Variable NetBuilder::Matmul(const Variable& x, const Variable& y, bool trans_x, bool trans_y, float alpha) { diff --git a/cinn/frontend/net_builder.h b/cinn/frontend/net_builder.h index 578473eb7f..b16b9a91a4 100644 --- a/cinn/frontend/net_builder.h +++ b/cinn/frontend/net_builder.h @@ -901,7 +901,10 @@ class NetBuilder { const std::string& padding_algorithm = "EXPLICIT"); /** - * This API flipes the Variable x along the given axis. + * @brief This API reverse the Variable x along the given axis. + * @param x An N-D variable. + * @param axis Specify the axis to operate on the input reverse. + * @return A reversed variable with the same data type as x. */ Variable Flip(const Variable& operand, const std::vector& axes); diff --git a/cinn/frontend/net_builder_test.cc b/cinn/frontend/net_builder_test.cc index 1fb87e6a95..e57ec7a241 100644 --- a/cinn/frontend/net_builder_test.cc +++ b/cinn/frontend/net_builder_test.cc @@ -984,76 +984,6 @@ TEST(net_build, program_execute_arange_int) { } } -TEST(net_build, program_execute_flip) { - const int C = 2; - const int H = 2; - const int W = 2; - const std::vector axes{0}; - - NetBuilder builder("net_builder"); - Placeholder input = builder.CreateInput(Float(32), {C, H, W}, "Img"); - Variable output = builder.Flip(input, axes); - auto program = builder.Build(); - -#ifdef CINN_WITH_CUDA - Target target = common::DefaultNVGPUTarget(); -#else - Target target = common::DefaultHostTarget(); -#endif - std::unordered_set fetch_ids; - auto graph = Optimize(&program, fetch_ids, target); - - auto scope = BuildScope(target, graph); - hlir::framework::GraphCompiler gc(target, scope, graph); - auto runtime_program = gc.Build(); - - scope->Var(std::string(input.id())); - scope->Var(std::string(output->id)); - - auto input_tensor = scope->GetTensor(std::string(input.id())); - SetRandData(input_tensor, target); - std::vector input_data = GetTensorData(input_tensor, target); - - runtime_program->Execute(); - auto output_tensor = scope->GetTensor(std::string(output->id)); - const std::vector& output_shape = output_tensor->shape().data(); - EXPECT_EQ(output_tensor->type(), Float(32)); - EXPECT_EQ(output_shape.size(), 3UL); - EXPECT_EQ(output_shape[0], C); - EXPECT_EQ(output_shape[1], H); - EXPECT_EQ(output_shape[2], W); - - std::vector output_data = GetTensorData(output_tensor, target); - VLOG(6) << "Visualize flip input_data"; - for (int c = 0; c < C; c++) { - for (int h = 0; h < H; h++) { - std::string line; - for (int w = 0; w < W; w++) { - int index = c * (H * W) + h * W + w; - line += (std::to_string(index) + ": " + std::to_string(input_data[index]) + ", "); - } - VLOG(6) << line; - } - } - - VLOG(6) << "Visualize flip output_data"; - for (int c = 0; c < C; c++) { - int flip_c = std::find(axes.begin(), axes.end(), 0) == axes.end() ? c : C - c - 1; - for (int h = 0; h < H; h++) { - std::string line; - int flip_h = std::find(axes.begin(), axes.end(), 1) == axes.end() ? h : H - h - 1; - for (int w = 0; w < W; w++) { - int flip_w = std::find(axes.begin(), axes.end(), 2) == axes.end() ? w : W - w - 1; - int flip_index = flip_c * H * W + flip_h * W + flip_w; - int index = c * (H * W) + h * W + w; - line += (std::to_string(index) + ": " + std::to_string(output_data[index]) + ", "); - EXPECT_EQ(input_data[index], output_data[flip_index]); - } - VLOG(6) << line; - } - } -} - TEST(net_build, program_argmax_case1) { const int N = 4; const int IN_C = 3; diff --git a/cinn/hlir/op/contrib/CMakeLists.txt b/cinn/hlir/op/contrib/CMakeLists.txt index 48565a4edb..d8237fb503 100644 --- a/cinn/hlir/op/contrib/CMakeLists.txt +++ b/cinn/hlir/op/contrib/CMakeLists.txt @@ -2,7 +2,6 @@ core_gather_headers() gather_srcs(cinnapi_src SRCS gather_nd.cc - flip.cc sort.cc argmin.cc argmax.cc @@ -24,7 +23,6 @@ cc_test(test_gather_nd SRCS gather_nd_test.cc DEPS cinncore) cc_test(test_sort SRCS sort_test.cc DEPS cinncore) cc_test(test_argmin SRCS argmin_test.cc DEPS cinncore) cc_test(test_argmax SRCS argmax_test.cc DEPS cinncore) -cc_test(test_flip SRCS flip_test.cc DEPS cinncore) cc_test(test_repeat SRCS repeat_test.cc DEPS cinncore) cc_test(test_one_hot SRCS one_hot_test.cc DEPS cinncore) cc_test(test_lookup_table SRCS lookup_table_test.cc DEPS cinncore) diff --git a/cinn/hlir/op/contrib/flip.cc b/cinn/hlir/op/contrib/flip.cc deleted file mode 100644 index 8157266ff5..0000000000 --- a/cinn/hlir/op/contrib/flip.cc +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright (c) 2022 CINN Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "cinn/hlir/op/contrib/flip.h" - -#include - -#include -#include -#include -#include - -#include "cinn/common/cas.h" -#include "cinn/common/common.h" -#include "cinn/common/context.h" -#include "cinn/common/macros.h" -#include "cinn/hlir/framework/node.h" -#include "cinn/hlir/framework/op.h" -#include "cinn/hlir/framework/op_strategy.h" -#include "cinn/hlir/op/op_util.h" -#include "cinn/hlir/pe/elementwise.h" -#include "cinn/hlir/pe/ir_schedule_pe.h" -#include "cinn/hlir/pe/transform.h" -#include "cinn/ir/ir.h" -#include "cinn/ir/ir_base.h" -#include "cinn/ir/ir_schedule.h" -#include "cinn/ir/tensor.h" -#include "cinn/lang/builtin.h" -#include "cinn/lang/compute.h" - -DECLARE_bool(cinn_ir_schedule); - -namespace cinn { -namespace hlir { -namespace op { - -using common::CINNValue; -using common::CINNValuePack; -using framework::shape_t; - -ir::Tensor Flip(const ir::Tensor &input, const std::vector &axes, const std::string &name) { - return cinn::hlir::pe::Reverse(input, axes, name); -} - -std::shared_ptr StrategyForFlip(const framework::NodeAttr &attrs, - const std::vector &inputs, - const std::vector &out_type, - const std::vector> &output_shapes, - const Target &target) { - CHECK(attrs.attr_store.count("axes")) << "find no attr of axes"; - std::vector axes = absl::get>(attrs.attr_store.at("axes")); - std::string op_name("flip"); - - framework::CINNCompute flip_compute([=](lang::Args args, lang::RetValue *ret) { - CHECK(!args.empty()) << "The input argument of " << op_name << " compute is empty! Please check."; - CINNValuePack pack_args = args[0]; - CHECK_GE(pack_args.size(), 1U) << "1 input tensor for " << op_name << " compute"; - std::string tensor_name = UniqName(op_name + "_Out"); - if (FLAGS_cinn_ir_schedule) { - CHECK_EQ(pack_args.size(), 2U); - tensor_name = pack_args[1].operator std::string(); - } - Expr A_expr = pack_args[0]; - CHECK(A_expr.as_tensor()); - ir::Tensor A = A_expr.as_tensor_ref(); - auto out = Flip(A, axes, tensor_name); - auto stages = CreateStages({A}); - std::vector res; - stages->InsertLazily(out); - res.push_back(CINNValue(out)); - res.push_back(CINNValue(stages)); - *ret = CINNValuePack{res}; - }); - - auto strategy = std::make_shared(); - strategy->AddImpl(flip_compute, GetInjectiveScheduleFunc(output_shapes, target), "strategy.flip.x86", 1); - return strategy; -} - -std::vector InferShapeForFlip(const std::vector &inputs_shape, const framework::AttrMapType &attrs) { - CHECK_EQ(inputs_shape.size(), 1U) << "The input's shape size should be 1! Please check again."; - std::vector res{inputs_shape[0]}; - return res; -} - -std::vector InferDtypeForFlip(const std::vector &inputs_type, const framework::AttrMapType &attrs) { - CHECK(!inputs_type.empty()) << "The input's type size is 0! Please check again."; - std::vector res{inputs_type[0]}; - return res; -} - -} // namespace op -} // namespace hlir -} // namespace cinn - -CINN_REGISTER_HELPER(flip_ops) { - CINN_REGISTER_OP(flip) - .describe("Flip.") - .set_num_inputs(1) - .set_num_outputs(1) - .set_attr("CINNStrategy", cinn::hlir::op::StrategyForFlip) - .set_attr("infershape", MakeOpFunction(cinn::hlir::op::InferShapeForFlip)) - .set_attr("inferdtype", MakeOpFunction(cinn::hlir::op::InferDtypeForFlip)) - .set_support_level(4); - - return true; -} diff --git a/cinn/hlir/op/contrib/flip.h b/cinn/hlir/op/contrib/flip.h deleted file mode 100644 index 8c52cf0449..0000000000 --- a/cinn/hlir/op/contrib/flip.h +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2022 CINN Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include - -#include "cinn/ir/ir.h" -#include "cinn/ir/ir_base.h" -#include "cinn/ir/tensor.h" - -namespace cinn { -namespace hlir { -namespace op { - -ir::Tensor Flip(const ir::Tensor& input, const std::vector& axis, const std::string& name); - -} // namespace op -} // namespace hlir -} // namespace cinn diff --git a/cinn/hlir/op/contrib/flip_test.cc b/cinn/hlir/op/contrib/flip_test.cc deleted file mode 100644 index e86c9e29ed..0000000000 --- a/cinn/hlir/op/contrib/flip_test.cc +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) 2022 CINN Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "cinn/hlir/op/contrib/flip.h" - -#include -#include - -#include -#include - -#include "cinn/backends/codegen_c.h" -#include "cinn/backends/codegen_c_x86.h" -#include "cinn/backends/codegen_cuda_dev.h" -#include "cinn/common/context.h" -#include "cinn/lang/lower.h" -#include "cinn/lang/placeholder.h" -#include "cinn/poly/stage.h" - -namespace cinn { -namespace hlir { -namespace op { - -TEST(GenerateCode_Cpu, Flip) { - common::Context::Global().ResetNameId(); - - common::Target target = common::DefaultHostTarget(); - - ir::Expr n(4); - ir::Expr h(28); - - lang::Placeholder in("in", {n, h}); - ir::Tensor res = Flip(in, {1}, "test_flip"); - - poly::StageMap stages = poly::CreateStages({res}); - std::vector funcs = - lang::LowerVec("TestGenerateCodeCpu_Flip", stages, {res}, {}, {}, nullptr, target, true); - - VLOG(6) << "Expr before CPU codegen:"; - VLOG(6) << funcs[0]->body; - - ir::Module::Builder builder("Flip_Module", target); - for (auto& f : funcs) { - builder.AddFunction(f); - } - - backends::CodeGenCX86 codegen(target, backends::CodeGenCX86::Feature::AVX512); - codegen.SetInlineBuiltinCodes(false); - std::string code = codegen.Compile(builder.Build(), backends::CodeGenC::OutputKind::CImpl); - VLOG(6) << "Cpu Codegen result:"; - VLOG(6) << code << std::endl; -} - -} // namespace op -} // namespace hlir -} // namespace cinn diff --git a/cinn/hlir/op/transform.cc b/cinn/hlir/op/transform.cc index 31f3fdc3ee..24951be324 100644 --- a/cinn/hlir/op/transform.cc +++ b/cinn/hlir/op/transform.cc @@ -831,7 +831,6 @@ std::shared_ptr StrategyForReverse(const framework::NodeAttr &attrs, std::vector axis; if (attrs.attr_store.find("axis") != attrs.attr_store.end()) { axis = absl::get>(attrs.attr_store.at("axis")); - CHECK(!axis.empty()) << "axis is empty! Please check setting.\n"; for (auto &e : axis) { if (e >= static_cast(output_shapes[0].size()) || e < -1 * static_cast(output_shapes[0].size())) { LOG(FATAL) << "axis is not in [0, n_dim), Please check."; @@ -840,8 +839,6 @@ std::shared_ptr StrategyForReverse(const framework::NodeAttr &attrs, e += output_shapes[0].size(); } } - } else { - LOG(FATAL) << "axis is not be set! Please check."; } framework::CINNCompute reverse_compute([=](lang::Args args, lang::RetValue *ret) { @@ -875,7 +872,6 @@ std::vector InferShapeForReverse(const std::vector res{inputs_shape[0]}; if (attrs.find("axis") != attrs.end()) { auto axis = absl::get>(attrs.at("axis")); - CHECK(!axis.empty()) << "axis is empty! Please check setting.\n"; for (auto &e : axis) { if (e >= static_cast(inputs_shape[0].size()) || e < -1 * static_cast(inputs_shape[0].size())) { LOG(FATAL) << "axis is not in [-n_dim, n_dim), Please check."; @@ -884,8 +880,6 @@ std::vector InferShapeForReverse(const std::vector> InferLayoutForReverse(const std::vector>(attrs.attr_store.at("axis")); - CHECK(!axis.empty()) << "axis is empty! Please check setting.\n"; for (auto &e : axis) { if (e >= static_cast(input_shapes[0].size()) || e < -1 * static_cast(input_shapes[0].size())) { LOG(FATAL) << "axis is not in [-n_dim, n_dim), Please check."; } } - } else { - LOG(FATAL) << "axis is not be set! Please check."; } CHECK_EQ(input_layouts.size(), 1U) << "The input's layout size is not 1! Please check again."; return {input_layouts, input_layouts}; diff --git a/cinn/hlir/op/use_ops.h b/cinn/hlir/op/use_ops.h index e29941efd7..9589bb96b0 100644 --- a/cinn/hlir/op/use_ops.h +++ b/cinn/hlir/op/use_ops.h @@ -28,7 +28,6 @@ CINN_USE_REGISTER(argmin_ops) CINN_USE_REGISTER(argmax_ops) CINN_USE_REGISTER(reduce_ops) CINN_USE_REGISTER(custom_call_op) -CINN_USE_REGISTER(flip_ops) CINN_USE_REGISTER(repeat_ops) CINN_USE_REGISTER(one_hot_ops) CINN_USE_REGISTER(lookup_table_ops) diff --git a/cinn/pybind/frontend.cc b/cinn/pybind/frontend.cc index 0dc037d7b7..73de15adab 100644 --- a/cinn/pybind/frontend.cc +++ b/cinn/pybind/frontend.cc @@ -701,6 +701,8 @@ void BindFrontend(pybind11::module *m) { py::arg("max") = 0, py::arg("seed") = 0, py::arg("dtype") = "int64") + .def("repeat", &NetBuilder::Repeat, py::arg("x"), py::arg("repeats"), py::arg("axis")) + .def("flip", &NetBuilder::Flip, py::arg("x"), py::arg("axis")) .def("cholesky", &NetBuilder::Cholesky, py::arg("x"), py::arg("upper") = false) .def("triangular_solve", &NetBuilder::TriangularSolve, diff --git a/python/tests/ops/op_test_helper.py b/python/tests/ops/op_test_helper.py index eb8c99889a..d5fd1935c8 100644 --- a/python/tests/ops/op_test_helper.py +++ b/python/tests/ops/op_test_helper.py @@ -35,6 +35,9 @@ class TestCaseHelper(): Helper class for constructing test cases. """ + def __init__(self): + self.custom_attrs_list = [] + def init_attrs(self): """ Initialize attributes for op @@ -51,6 +54,12 @@ def _flatten_tuple(self, cur_tuple): new_dict.append((k, v)) return dict(new_dict) + def _register_custom_attrs(self, custom_attrs): + """ + register custom attribute + """ + self.custom_attrs_list.append(custom_attrs) + def _init_cases(self): """ Generate all test cases @@ -59,7 +68,9 @@ def _init_cases(self): assert isinstance(self.dtypes, list) assert isinstance(self.attrs, list) self.all_cases = [] - all_lists = [self.inputs, self.dtypes, self.attrs] + all_lists = [ + self.inputs, self.dtypes, self.attrs, *self.custom_attrs_list + ] filtered_lists = filter(lambda x: len(x) > 0, all_lists) for case in itertools.product(*filtered_lists): self.all_cases.append(self._flatten_tuple(case)) diff --git a/python/tests/ops/test_add_op.py b/python/tests/ops/test_add_op.py index 03136c35a8..80ea1f0863 100644 --- a/python/tests/ops/test_add_op.py +++ b/python/tests/ops/test_add_op.py @@ -1,6 +1,4 @@ -#!/usr/bin/env python3 - -# Copyright (c) 2021 CINN Authors. All Rights Reserved. +# Copyright (c) 2023 CINN Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,32 +12,38 @@ # See the License for the specific language governing permissions and # limitations under the License. -import unittest import numpy as np -from op_test import OpTest, OpTestTool import paddle -import cinn -from cinn.frontend import * from cinn.common import * +from cinn.frontend import * +from op_test import OpTest, OpTestTool +from op_test_helper import TestCaseHelper @OpTestTool.skip_if(not is_compiled_with_cuda(), "x86 test will be skipped due to timeout.") class TestElementwiseAddOp(OpTest): def setUp(self): - self.init_case() + print(f"\nRunning {self.__class__.__name__}: {self.case}") + self.prepare_inputs() - def init_case(self): - self.inputs = { - "x": np.random.random([32, 64]).astype("float32"), - "y": np.random.random([32, 64]).astype("float32"), - "dout": np.random.random((32, 64)).astype("float32") - } - self.axis = -1 + def prepare_inputs(self): + self.x_np = self.random( + shape=self.case["x_shape"], + dtype=self.case["x_dtype"], + low=-10, + high=10) + self.y_np = self.random( + shape=self.case["y_shape"], + dtype=self.case["y_dtype"], + low=-10, + high=10) + self.dout_np = self.random( + self.case["dout_shape"], dtype=self.case["dout_dtype"]) def build_paddle_program(self, target): - x = paddle.to_tensor(self.inputs["x"], stop_gradient=False) - y = paddle.to_tensor(self.inputs["y"], stop_gradient=False) + x = paddle.to_tensor(self.x_np, stop_gradient=False) + y = paddle.to_tensor(self.y_np, stop_gradient=False) def get_unsqueeze_axis(x_rank, y_rank, axis): self.assertTrue( @@ -48,83 +52,206 @@ def get_unsqueeze_axis(x_rank, y_rank, axis): axis = axis if axis >= 0 else x_rank - y_rank unsqueeze_axis = np.arange(0, axis).tolist() + np.arange( axis + y_rank, x_rank).tolist() - return unsqueeze_axis unsqueeze_axis = get_unsqueeze_axis( - len(self.inputs["x"].shape), len(self.inputs["y"].shape), - self.axis) + len(x.shape), len(y.shape), self.case["axis"]) y_t = paddle.unsqueeze( y, axis=unsqueeze_axis) if len(unsqueeze_axis) > 0 else y out = paddle.add(x, y_t) self.paddle_outputs = [out] self.paddle_grads = self.get_paddle_grads([out], [x, y], - [self.inputs["dout"]]) + [self.dout_np]) def build_cinn_program(self, target): builder = NetBuilder("add") - x = builder.create_input(Float(32), self.inputs["x"].shape, "x") - y = builder.create_input(Float(32), self.inputs["y"].shape, "y") - out = builder.add(x, y, axis=self.axis) + x = builder.create_input( + self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], + "x") + y = builder.create_input( + self.nptype2cinntype(self.case["y_dtype"]), self.case["y_shape"], + "y") + out = builder.add(x, y, axis=self.case["axis"]) dout = builder.create_input( - Float(32), self.inputs["dout"].shape, "dout") + self.nptype2cinntype(self.case["dout_dtype"]), + self.case["dout_shape"], "dout") x_grad, y_grad = builder.elementwise_add_grad( - dout, x, y, axis=self.axis) + dout, x, y, axis=self.case["axis"]) prog = builder.build() - res = self.get_cinn_output( - prog, target, [x, y, dout], - [self.inputs["x"], self.inputs["y"], self.inputs["dout"]], - [out, x_grad, y_grad]) + res = self.get_cinn_output(prog, target, [x, y, dout], + [self.x_np, self.y_np, self.dout_np], + [out, x_grad, y_grad]) self.cinn_outputs = [res[0]] self.cinn_grads = [res[1], res[2]] def test_check_results(self): - self.check_outputs_and_grads() - - -class TestAddCase1(TestElementwiseAddOp): - def init_case(self): - self.inputs = { - "x": np.random.random([8, 16, 32, 32]).astype("float32"), - "y": np.random.random([32, 32]).astype("float32"), - "dout": np.random.random((8, 16, 32, 32)).astype("float32") - } - self.axis = -1 - - -class TestAddCase2(TestElementwiseAddOp): - def init_case(self): - self.inputs = { - "x": np.random.random([8, 1, 32, 32]).astype("float32"), - "y": np.random.random([16, 32]).astype("float32"), - "dout": np.random.random((8, 16, 32, 32)).astype("float32") - } - self.axis = 1 + max_relative_error = self.case[ + "max_relative_error"] if "max_relative_error" in self.case else 1e-5 + self.check_outputs_and_grads(max_relative_error=max_relative_error) -class TestAddCase3(TestElementwiseAddOp): - def init_case(self): - self.inputs = { - "x": np.random.random([4, 16, 8, 32]).astype("float32"), - "y": np.random.random([4, 16]).astype("float32"), - "dout": np.random.random((4, 16, 8, 32)).astype("float32") - } - self.axis = 0 +class TestAddAll(TestCaseHelper): + def init_attrs(self): + self.class_name = "TestElementwiseAddOpCase" + self.cls = TestElementwiseAddOp + self.inputs = [ + { + "x_shape": [1], + "y_shape": [1], + "dout_shape": [1], + "axis": 0, + }, + { + "x_shape": [1024], + "y_shape": [1024], + "dout_shape": [1024], + "axis": -1, + }, + { + "x_shape": [512, 256], + "y_shape": [512, 256], + "dout_shape": [512, 256], + "axis": 0, + }, + { + "x_shape": [128, 64, 32], + "y_shape": [128, 64, 32], + "dout_shape": [128, 64, 32], + "axis": -1, + }, + { + "x_shape": [16, 8, 4, 2], + "y_shape": [16, 8, 4, 2], + "dout_shape": [16, 8, 4, 2], + "axis": 0, + }, + { + "x_shape": [16, 8, 4, 2, 1], + "y_shape": [16, 8, 4, 2, 1], + "dout_shape": [16, 8, 4, 2, 1], + "axis": -1, + }, + ] + self.dtypes = [ + # TODO: paddle 2.3.1 unsupport int16 now, remove after ci paddle updated + # { + # "x_dtype": "int16", + # "y_dtype": "int16", + # "dout_dtype": "int16", + # }, + { + "x_dtype": "int32", + "y_dtype": "int32", + "dout_dtype": "int32", + }, + { + "x_dtype": "int64", + "y_dtype": "int64", + "dout_dtype": "int64", + }, + { + "x_dtype": "float16", + "y_dtype": "float16", + "dout_dtype": "float16", + "max_relative_error": 1e-3, + }, + { + "x_dtype": "float32", + "y_dtype": "float32", + "dout_dtype": "float32", + }, + { + "x_dtype": "float64", + "y_dtype": "float64", + "dout_dtype": "float64", + }, + ] + self.attrs = [] -class TestAddCase4(TestElementwiseAddOp): - def init_case(self): - self.inputs = { - "x": np.random.random([4, 16, 8, 32]).astype("float32"), - "y": np.random.random([1]).astype("float32"), - "dout": np.random.random((4, 16, 8, 32)).astype("float32") - } - self.axis = -1 +class TestAddAllWithBroadcast(TestCaseHelper): + def init_attrs(self): + self.class_name = "TestElementwiseAddOpCase" + self.cls = TestElementwiseAddOp + self.inputs = [ + { + "x_shape": [1], + "y_shape": [1], + "dout_shape": [1], + "axis": 0, + }, + { + "x_shape": [1024], + "y_shape": [1], + "dout_shape": [1024], + "axis": -1, + }, + { + "x_shape": [512, 256], + "y_shape": [1, 1], + "dout_shape": [512, 256], + "axis": 0, + }, + { + "x_shape": [128, 64, 32], + "y_shape": [1, 1, 1], + "dout_shape": [128, 64, 32], + "axis": -1, + }, + { + "x_shape": [16, 8, 4, 2], + "y_shape": [1, 1, 1, 1], + "dout_shape": [16, 8, 4, 2], + "axis": 0, + }, + { + "x_shape": [16, 8, 4, 2, 1], + "y_shape": [1, 1, 1, 1, 1], + "dout_shape": [16, 8, 4, 2, 1], + "axis": -1, + }, + ] + self.dtypes = [ + # Todo: Reduce does in support int16 + # { + # "x_dtype": "int16", + # "y_dtype": "int16", + # "dout_dtype": "int16", + # }, + { + "x_dtype": "int32", + "y_dtype": "int32", + "dout_dtype": "int32", + }, + { + "x_dtype": "int64", + "y_dtype": "int64", + "dout_dtype": "int64", + }, + { + "x_dtype": "float16", + "y_dtype": "float16", + "dout_dtype": "float16", + "max_relative_error": 1e-3, + }, + { + "x_dtype": "float32", + "y_dtype": "float32", + "dout_dtype": "float32", + }, + { + "x_dtype": "float64", + "y_dtype": "float64", + "dout_dtype": "float64", + }, + ] + self.attrs = [] if __name__ == "__main__": - unittest.main() + TestAddAll().run() + TestAddAllWithBroadcast().run() diff --git a/python/tests/ops/test_add_op_new.py b/python/tests/ops/test_add_op_new.py deleted file mode 100644 index 75a602d783..0000000000 --- a/python/tests/ops/test_add_op_new.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright (c) 2023 CINN Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest -import numpy as np -from op_test import OpTest, OpTestTool -from op_test_helper import TestCaseHelper -import paddle -import cinn -from cinn.frontend import * -from cinn.common import * - - -@OpTestTool.skip_if(not is_compiled_with_cuda(), - "x86 test will be skipped due to timeout.") -class TestElementwiseAddOp(OpTest): - def setUp(self): - print(f"\nRunning {self.__class__.__name__}: {self.case}") - self.prepare_inputs() - - def prepare_inputs(self): - self.x_np = self.random( - shape=self.case["x_shape"], - dtype=self.case["x_dtype"], - low=-10, - high=10) - self.y_np = self.random( - shape=self.case["y_shape"], - dtype=self.case["y_dtype"], - low=-10, - high=10) - self.dout_np = self.random( - self.case["dout_shape"], dtype=self.case["dout_dtype"]) - - def build_paddle_program(self, target): - x = paddle.to_tensor(self.x_np, stop_gradient=False) - y = paddle.to_tensor(self.y_np, stop_gradient=False) - - def get_unsqueeze_axis(x_rank, y_rank, axis): - self.assertTrue( - x_rank >= y_rank, - "The rank of x should be greater or equal to that of y.") - axis = axis if axis >= 0 else x_rank - y_rank - unsqueeze_axis = np.arange(0, axis).tolist() + np.arange( - axis + y_rank, x_rank).tolist() - return unsqueeze_axis - - unsqueeze_axis = get_unsqueeze_axis( - len(x.shape), len(y.shape), self.case["axis"]) - y_t = paddle.unsqueeze( - y, axis=unsqueeze_axis) if len(unsqueeze_axis) > 0 else y - out = paddle.add(x, y_t) - - self.paddle_outputs = [out] - self.paddle_grads = self.get_paddle_grads([out], [x, y], - [self.dout_np]) - - def build_cinn_program(self, target): - builder = NetBuilder("add") - x = builder.create_input( - self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"], - "x") - y = builder.create_input( - self.nptype2cinntype(self.case["y_dtype"]), self.case["y_shape"], - "y") - out = builder.add(x, y, axis=self.case["axis"]) - - dout = builder.create_input( - self.nptype2cinntype(self.case["dout_dtype"]), - self.case["dout_shape"], "dout") - x_grad, y_grad = builder.elementwise_add_grad( - dout, x, y, axis=self.case["axis"]) - - prog = builder.build() - res = self.get_cinn_output(prog, target, [x, y, dout], - [self.x_np, self.y_np, self.dout_np], - [out, x_grad, y_grad]) - - self.cinn_outputs = [res[0]] - self.cinn_grads = [res[1], res[2]] - - def test_check_results(self): - max_relative_error = self.case[ - "max_relative_error"] if "max_relative_error" in self.case else 1e-5 - self.check_outputs_and_grads(max_relative_error=max_relative_error) - - -class TestAddAll(TestCaseHelper): - def init_attrs(self): - self.class_name = "TestElementwiseAddOpCase" - self.cls = TestElementwiseAddOp - self.inputs = [ - { - "x_shape": [1], - "y_shape": [1], - "dout_shape": [1], - "axis": 0, - }, - { - "x_shape": [1024], - "y_shape": [1024], - "dout_shape": [1024], - "axis": -1, - }, - { - "x_shape": [512, 256], - "y_shape": [512, 256], - "dout_shape": [512, 256], - "axis": 0, - }, - { - "x_shape": [128, 64, 32], - "y_shape": [128, 64, 32], - "dout_shape": [128, 64, 32], - "axis": -1, - }, - { - "x_shape": [16, 8, 4, 2], - "y_shape": [16, 8, 4, 2], - "dout_shape": [16, 8, 4, 2], - "axis": 0, - }, - { - "x_shape": [16, 8, 4, 2, 1], - "y_shape": [16, 8, 4, 2, 1], - "dout_shape": [16, 8, 4, 2, 1], - "axis": -1, - }, - ] - self.dtypes = [ - # TODO: paddle 2.3.1 unsupport int16 now, remove after ci paddle updated - # { - # "x_dtype": "int16", - # "y_dtype": "int16", - # "dout_dtype": "int16", - # }, - { - "x_dtype": "int32", - "y_dtype": "int32", - "dout_dtype": "int32", - }, - { - "x_dtype": "int64", - "y_dtype": "int64", - "dout_dtype": "int64", - }, - { - "x_dtype": "float16", - "y_dtype": "float16", - "dout_dtype": "float16", - "max_relative_error": 1e-3, - }, - { - "x_dtype": "float32", - "y_dtype": "float32", - "dout_dtype": "float32", - }, - { - "x_dtype": "float64", - "y_dtype": "float64", - "dout_dtype": "float64", - }, - { - "x_dtype": "bfloat16", - "y_dtype": "bfloat16", - "dout_dtype": "bfloat16", - "max_relative_error": 1e-2, - }, - ] - self.attrs = [] - - -class TestAddAllWithBroadcast(TestCaseHelper): - def init_attrs(self): - self.class_name = "TestElementwiseAddOpCase" - self.cls = TestElementwiseAddOp - self.inputs = [ - { - "x_shape": [1], - "y_shape": [1], - "dout_shape": [1], - "axis": 0, - }, - { - "x_shape": [1024], - "y_shape": [1], - "dout_shape": [1024], - "axis": -1, - }, - { - "x_shape": [512, 256], - "y_shape": [1, 1], - "dout_shape": [512, 256], - "axis": 0, - }, - { - "x_shape": [128, 64, 32], - "y_shape": [1, 1, 1], - "dout_shape": [128, 64, 32], - "axis": -1, - }, - { - "x_shape": [16, 8, 4, 2], - "y_shape": [1, 1, 1, 1], - "dout_shape": [16, 8, 4, 2], - "axis": 0, - }, - { - "x_shape": [16, 8, 4, 2, 1], - "y_shape": [1, 1, 1, 1, 1], - "dout_shape": [16, 8, 4, 2, 1], - "axis": -1, - }, - ] - self.dtypes = [ - # Todo: Reduce does in support int16 - # { - # "x_dtype": "int16", - # "y_dtype": "int16", - # "dout_dtype": "int16", - # }, - { - "x_dtype": "int32", - "y_dtype": "int32", - "dout_dtype": "int32", - }, - { - "x_dtype": "int64", - "y_dtype": "int64", - "dout_dtype": "int64", - }, - { - "x_dtype": "float16", - "y_dtype": "float16", - "dout_dtype": "float16", - "max_relative_error": 1e-3, - }, - { - "x_dtype": "float32", - "y_dtype": "float32", - "dout_dtype": "float32", - }, - { - "x_dtype": "float64", - "y_dtype": "float64", - "dout_dtype": "float64", - }, - { - "x_dtype": "bfloat16", - "y_dtype": "bfloat16", - "dout_dtype": "bfloat16", - "max_relative_error": 1e-2, - }, - ] - self.attrs = [] - - -if __name__ == "__main__": - TestAddAll().run() - TestAddAllWithBroadcast().run() diff --git a/python/tests/ops/test_arange_op.py b/python/tests/ops/test_arange_op.py new file mode 100644 index 0000000000..2402400bfc --- /dev/null +++ b/python/tests/ops/test_arange_op.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2023 CINN Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from cinn.frontend import * +from cinn.common import * +from op_test import OpTest, OpTestTool +from op_test_helper import TestCaseHelper + + +@OpTestTool.skip_if(not is_compiled_with_cuda(), + "x86 test will be skipped due to timeout.") +class TestArangeOp(OpTest): + def setUp(self): + print(f"\nRunning {self.__class__.__name__}: {self.case}") + self.inputs = {} + self.prepare_inputs() + + def prepare_inputs(self): + self.inputs = { + "start": self.case["start"], + "end": self.case["end"], + "step": self.case["step"], + "dtype": self.case["dtype"] + } + + def build_paddle_program(self, target): + out = paddle.arange(self.inputs["start"], self.inputs["end"], + self.inputs["step"], self.inputs["dtype"]) + self.paddle_outputs = [out] + + def build_cinn_program(self, target): + builder = NetBuilder("arange") + out = builder.arange(self.inputs["start"], self.inputs["end"], + self.inputs["step"], self.inputs["dtype"]) + + prog = builder.build() + res = self.get_cinn_output(prog, target, [], [], [out]) + + self.cinn_outputs = res + + def test_check_results(self): + self.check_outputs_and_grads(all_equal=True) + + +class TestArangeOpShapeAndAttr(TestCaseHelper): + def init_attrs(self): + self.class_name = "TestArangeOpShapeAndAttr" + self.cls = TestArangeOp + self.inputs = [ + # basic shape test + { + "start": 0, + "end": 10, + "step": 1, + }, + { + "start": 0, + "end": 1024, + "step": 16, + }, + { + "start": 512, + "end": 2600, + "step": 512, + }, + { + "start": 0, + "end": 65536, + "step": 1024, + }, + { + "start": 0, + "end": 131072, + "step": 2048, + }, + { + "start": 0, + "end": 1, + "step": 2, + }, + { + "start": 0, + "end": 1, + "step": 2, + }, + # step test + { + "start": 1024, + "end": 512, + "step": -2, + }, + { + "start": 2048, + "end": 0, + "step": -64, + }, + # range test + { + "start": -2048, + "end": 2048, + "step": 32, + }, + { + "start": -2048, + "end": -512, + "step": 64, + }, + { + "start": 1024, + "end": 4096, + "step": 512, + }, + { + "start": 1024, + "end": -1024, + "step": -128, + }, + { + "start": -1024, + "end": -2048, + "step": -64, + }, + { + "start": 2048, + "end": 512, + "step": -32, + }, + ] + self.dtypes = [ + { + "dtype": "float32" + }, + ] + self.attrs = [] + + +class TestArangeOpDtype(TestCaseHelper): + def init_attrs(self): + self.class_name = "TestArangeOpDtype" + self.cls = TestArangeOp + self.inputs = [ + { + "start": 5, + "end": 10, + "step": 1, + }, + { + "start": -10, + "end": -100, + "step": -10, + }, + { + "start": -10, + "end": 10, + "step": 1, + }, + ] + self.dtypes = [ + { + "dtype": "int32" + }, + { + "dtype": "int64" + }, + { + "dtype": "float32" + }, + { + "dtype": "float64" + }, + ] + self.attrs = [] + + +if __name__ == "__main__": + TestArangeOpShapeAndAttr().run() + TestArangeOpDtype().run() diff --git a/python/tests/ops/test_repeat_op.py b/python/tests/ops/test_repeat_op.py new file mode 100644 index 0000000000..3745054e5c --- /dev/null +++ b/python/tests/ops/test_repeat_op.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2023 CINN Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import numpy as np +from cinn.frontend import * +from cinn.common import * +from op_test import OpTest, OpTestTool +from op_test_helper import TestCaseHelper + + +@OpTestTool.skip_if(not is_compiled_with_cuda(), + "x86 test will be skipped due to timeout.") +class TestRepeatOp(OpTest): + def setUp(self): + print(f"\nRunning {self.__class__.__name__}: {self.case}") + self.inputs = {} + self.prepare_inputs() + + def prepare_inputs(self): + shape = self.case["shape"] + dtype = self.case["dtype"] + repeats = self.case["repeats"] + axis = self.case["axis"] + dims = len(shape) + axis = min(axis, dims - 1) + axis = max(axis, -dims) + self.inputs = { + "x": self.random(shape, dtype, -1.0, 1.0), + "repeats": repeats, + "axis": axis + } + + def build_paddle_program(self, target): + x = np.repeat(self.inputs["x"], self.inputs["repeats"], + self.inputs["axis"]) + out = paddle.to_tensor(x, stop_gradient=True) + self.paddle_outputs = [out] + + def build_cinn_program(self, target): + builder = NetBuilder("repeat") + x = builder.create_input( + self.nptype2cinntype(self.inputs["x"].dtype), + self.inputs["x"].shape, "x") + out = builder.repeat(x, self.inputs["repeats"], self.inputs["axis"]) + + prog = builder.build() + res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], + [out]) + + self.cinn_outputs = res + + def test_check_results(self): + self.check_outputs_and_grads() + + +class TestRepeatOpShape(TestCaseHelper): + def init_attrs(self): + self.class_name = "TestRepeatOpShape" + self.cls = TestRepeatOp + self.inputs = [ + { + "shape": [10], + }, + { + "shape": [8, 5], + }, + { + "shape": [10, 3, 5], + }, + { + "shape": [80, 40, 5, 7], + }, + { + "shape": [80, 1, 5, 7], + }, + { + "shape": [80, 3, 1024, 7], + }, + { + "shape": [10, 5, 1024, 2048], + }, + { + "shape": [1], + }, + { + "shape": [512], + }, + { + "shape": [1024], + }, + { + "shape": [2048], + }, + { + "shape": [1, 1, 1, 1], + }, + ] + self.dtypes = [ + { + "dtype": "float32" + }, + ] + self.attrs = [ + { + "repeats": 2, + "axis": 0 + }, + ] + + +class TestRepeatOpDtype(TestCaseHelper): + def init_attrs(self): + self.class_name = "TestRepeatOpDtype" + self.cls = TestRepeatOp + self.inputs = [ + { + "shape": [1], + }, + { + "shape": [5], + }, + { + "shape": [80, 40, 5, 7], + }, + ] + self.dtypes = [ + { + "dtype": "bool" + }, + { + "dtype": "int8" + }, + { + "dtype": "int32" + }, + { + "dtype": "int64" + }, + { + "dtype": "float16" + }, + { + "dtype": "float32" + }, + { + "dtype": "float64" + }, + ] + self.attrs = [ + { + "repeats": 4, + "axis": 0 + }, + ] + + +class TestRepeatOpAttributeRepeats(TestCaseHelper): + def init_attrs(self): + self.class_name = "TestRepeatOpAttributeRepeats" + self.cls = TestRepeatOp + self.inputs = [ + { + "shape": [10], + }, + { + "shape": [8, 5], + }, + { + "shape": [80, 40, 5, 7], + }, + ] + self.dtypes = [ + { + "dtype": "float32" + }, + ] + self.attrs = [ + { + "repeats": 256, + "axis": 0 + }, + { + "repeats": 1024, + "axis": 0 + }, + { + "repeats": 2048, + "axis": 0 + }, + ] + + +class TestRepeatOpAttributeAxis(TestCaseHelper): + def init_attrs(self): + self.class_name = "TestRepeatOpAttributeAxis" + self.cls = TestRepeatOp + self.inputs = [ + { + "shape": [10], + }, + { + "shape": [8, 5], + }, + { + "shape": [80, 40, 5, 7], + }, + ] + self.dtypes = [ + { + "dtype": "float32" + }, + ] + self.attrs = [ + { + "repeats": 128, + "axis": 0 + }, + { + "repeats": 128, + "axis": 1 + }, + { + "repeats": 128, + "axis": 2 + }, + { + "repeats": 128, + "axis": 3 + }, + { + "repeats": 128, + "axis": -1 + }, + { + "repeats": 128, + "axis": -2 + }, + { + "repeats": 128, + "axis": -3 + }, + { + "repeats": 128, + "axis": -4 + }, + ] + + +if __name__ == "__main__": + TestRepeatOpShape().run() + TestRepeatOpDtype().run() + TestRepeatOpAttributeRepeats().run() + TestRepeatOpAttributeAxis().run() diff --git a/python/tests/ops/test_reverse_op.py b/python/tests/ops/test_reverse_op.py new file mode 100755 index 0000000000..3bde72d323 --- /dev/null +++ b/python/tests/ops/test_reverse_op.py @@ -0,0 +1,311 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2023 CINN Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from cinn.common import * +from cinn.frontend import * +from op_test import OpTest, OpTestTool +from op_test_helper import TestCaseHelper + + +@OpTestTool.skip_if(not is_compiled_with_cuda(), + "x86 test will be skipped due to timeout.") +class TestReverseOp(OpTest): + def setUp(self): + print(f"\nRunning {self.__class__.__name__}: {self.case}") + self.inputs = {} + self.prepare_inputs() + + def prepare_inputs(self): + dims = len(self.case["shape"]) + axes = self.case["axes"].copy() + for i in range(len(axes)): + axes[i] = min(axes[i], dims - 1) + axes[i] = max(axes[i], -dims) + self.inputs = { + "x": self.random(self.case["shape"], self.case["dtype"]), + "axes": axes + } + self.net_builder_api = self.case["net_builder_api"] + + def build_paddle_program(self, target): + x = paddle.to_tensor(self.inputs["x"], stop_gradient=True) + if self.net_builder_api == "reverse": + out = paddle.reverse(x, self.inputs["axes"]) + elif self.net_builder_api == "flip": + out = paddle.flip(x, self.inputs["axes"]) + else: + raise NotImplementedError + self.paddle_outputs = [out] + + def build_cinn_program(self, target): + builder = NetBuilder("reverse") + x = builder.create_input( + self.nptype2cinntype(self.inputs["x"].dtype), + self.inputs["x"].shape, "x") + if self.net_builder_api == "reverse": + out = builder.reverse(x, self.inputs["axes"]) + elif self.net_builder_api == "flip": + out = builder.flip(x, self.inputs["axes"]) + else: + raise NotImplementedError + + prog = builder.build() + res = self.get_cinn_output(prog, target, [x], [self.inputs["x"]], + [out]) + + self.cinn_outputs = res + + def test_check_results(self): + self.check_outputs_and_grads(all_equal=True) + + +class TestReverseOpShape(TestCaseHelper): + def init_attrs(self): + self.class_name = "TestReverseOpShape" + self.cls = TestReverseOp + self.inputs = [ + { + "shape": [10], + }, + { + "shape": [8, 5], + }, + { + "shape": [10, 3, 5], + }, + { + "shape": [80, 40, 5, 7], + }, + { + "shape": [80, 1, 5, 7], + }, + { + "shape": [80, 3, 1024, 7], + }, + { + "shape": [10, 5, 1024, 2048], + }, + { + "shape": [1], + }, + { + "shape": [512], + }, + { + "shape": [1024], + }, + { + "shape": [2048], + }, + { + "shape": [65536], + }, + { + "shape": [131072], + }, + { + "shape": [1, 1, 1, 1], + }, + ] + self.dtypes = [ + { + "dtype": "float32" + }, + ] + self.attrs = [ + { + "axes": [0] + }, + ] + net_builder_api_attrs = [ + { + "net_builder_api": "reverse", + }, + { + "net_builder_api": "flip", + }, + ] + self._register_custom_attrs(net_builder_api_attrs) + + +class TestReverseOpDtype(TestCaseHelper): + def init_attrs(self): + self.class_name = "TestReverseOpDtype" + self.cls = TestReverseOp + self.inputs = [ + { + "shape": [1], + }, + { + "shape": [5, 10], + }, + { + "shape": [80, 40, 5, 7], + }, + ] + self.dtypes = [ + { + "dtype": "bool" + }, + { + "dtype": "int32" + }, + { + "dtype": "int64" + }, + { + "dtype": "float16" + }, + { + "dtype": "float32" + }, + { + "dtype": "float64" + }, + ] + self.attrs = [ + { + "axes": [0] + }, + ] + net_builder_api_attrs = [ + { + "net_builder_api": "reverse", + }, + { + "net_builder_api": "flip", + }, + ] + self._register_custom_attrs(net_builder_api_attrs) + + +class TestReverseOpAxis(TestCaseHelper): + def init_attrs(self): + self.class_name = "TestReverseOpAxis" + self.cls = TestReverseOp + self.inputs = [ + { + "shape": [8, 4, 2, 16], + }, + { + "shape": [1, 1, 1, 1], + }, + ] + self.dtypes = [ + { + "dtype": "float32" + }, + ] + self.attrs = [ + { + "axes": [0] + }, + { + "axes": [1] + }, + { + "axes": [2] + }, + { + "axes": [3] + }, + { + "axes": [-1] + }, + { + "axes": [-2] + }, + { + "axes": [-3] + }, + { + "axes": [-4] + }, + ] + net_builder_api_attrs = [ + { + "net_builder_api": "reverse", + }, + { + "net_builder_api": "flip", + }, + ] + self._register_custom_attrs(net_builder_api_attrs) + + +class TestReverseOpMultiAxis(TestCaseHelper): + def init_attrs(self): + self.class_name = "TestReverseOpMultiAxis" + self.cls = TestReverseOp + self.inputs = [ + { + "shape": [8, 4, 2, 16], + }, + { + "shape": [1, 1, 1, 1], + }, + ] + self.dtypes = [ + { + "dtype": "float32" + }, + ] + self.attrs = [ + { + "axes": [] + }, + { + "axes": [0] + }, + { + "axes": [1, 2] + }, + { + "axes": [2, -1, 3] + }, + { + "axes": [0, -3, 3, 1] + }, + { + "axes": [-1] + }, + { + "axes": [-2, -1] + }, + { + "axes": [-3, -2, 3] + }, + { + "axes": [0, 3, -3, -2] + }, + ] + net_builder_api_attrs = [ + { + "net_builder_api": "reverse", + }, + { + "net_builder_api": "flip", + }, + ] + self._register_custom_attrs(net_builder_api_attrs) + + +if __name__ == "__main__": + TestReverseOpShape().run() + TestReverseOpDtype().run() + TestReverseOpAxis().run() + TestReverseOpMultiAxis().run()