Skip to content
This repository has been archived by the owner on Jan 24, 2024. It is now read-only.

Commit

Permalink
fix ci problems
Browse files Browse the repository at this point in the history
  • Loading branch information
wenming2014 committed Sep 25, 2020
1 parent d625c4f commit 894ecf8
Show file tree
Hide file tree
Showing 7 changed files with 9,519 additions and 14 deletions.
9,432 changes: 9,432 additions & 0 deletions 0924.log

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions Testing/Temporary/CTestCostData.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
---
3 changes: 3 additions & 0 deletions Testing/Temporary/LastTest.log
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
Start testing: Sep 25 08:24 UTC
----------------------------------------------------------
End testing: Sep 25 08:24 UTC
78 changes: 73 additions & 5 deletions cinn/hlir/pe/nn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,55 @@ Tensor PRelu(const Tensor &A, const Tensor &slope, const int axis, const std::st
output_name);
}

// std::vector<ir::Tensor> Conv2d_NCHW(const ir::Tensor &input,
// const ir::Tensor &weights,
// int pad_h,
// int pad_w,
// int stride_h,
// int stride_w,
// int dilation_h,
// int dilation_w,
// const std::vector<std::vector<int>> &output_shapes,
// const std::string &output_name) {
// CHECK_EQ(4, input->shape.size()) << "Input's dimension of Conv2d op is not 4! Please check.";
// CHECK_EQ(4, weights->shape.size()) << "Weight's dimension of Conv2d op is not 4! Please check.";
// CHECK_EQ(3, output_shapes.size()) << "The size of output_shapes of Conv2d op is not 3! Please check.";
// CHECK_EQ(4, output_shapes[0].size()) << "The size of output_shapes[0] of Conv2d op is not 4! Please check.";
// CHECK_EQ(4, output_shapes[1].size()) << "The size of output_shapes[1] of Conv2d op is not 4! Please check.";
// CHECK_EQ(4, output_shapes[2].size()) << "The size of output_shapes[2] of Conv2d op is not 4! Please check.";
// std::vector<Expr> output_shape{
// Expr(output_shapes[2][0]), Expr(output_shapes[2][1]), Expr(output_shapes[2][2]), Expr(output_shapes[2][3])};
// auto input_pad = Compute(
// {Expr(output_shapes[0][0]), Expr(output_shapes[0][1]), Expr(output_shapes[0][2]), Expr(output_shapes[0][3])},
// [=](Expr nn, Expr cc, Expr yy, Expr xx) {
// auto cond =
// ir::logic_and({yy >= pad_h, yy - pad_h < input->shape[2], xx >= pad_w, xx - pad_w < input->shape[3]});
// return ir::Select::Make(cond, input(nn, cc, yy - pad_h, xx - pad_w), Expr(0.f));
// },
// UniqName("input_pad"));
// auto weights_dilation = Compute(
// {Expr(output_shapes[1][0]), Expr(output_shapes[1][1]), Expr(output_shapes[1][2]), Expr(output_shapes[1][3])},
// [=](Expr nn, Expr cc, Expr yy, Expr xx) {
// auto cond = ir::logic_and({(xx) % dilation_h == 0, yy % dilation_w == 0});
// return ir::Select::Make(cond, weights(nn, cc, yy / dilation_h, xx / dilation_w), Expr(0.f));
// },
// UniqName("weights_dilation"));

// Var rc(input_pad->shape[1], UniqName("rc"));
// Var ry(weights_dilation->shape[2], UniqName("ry"));
// Var rx(weights_dilation->shape[3], UniqName("rx"));

// auto res = Compute(output_shape,
// [=](Expr nn, Expr ff, Expr yy, Expr xx) {
// return ir::ReduceSum(
// input_pad(nn, rc, yy * stride_h + ry, xx * stride_w + rx) * weights_dilation(ff, rc, ry,
// rx), Expr(0.f));
// },
// output_name,
// {ry, rx, rc});
// return {input_pad, weights_dilation, res};
// }

std::vector<ir::Tensor> Conv2d_NCHW(const ir::Tensor &input,
const ir::Tensor &weights,
int pad_h,
Expand All @@ -51,6 +100,7 @@ std::vector<ir::Tensor> Conv2d_NCHW(const ir::Tensor &input,
CHECK_EQ(4, weights->shape.size()) << "Weight's dimension of Conv2d_NCHW op is not 4! Please check.";
std::vector<Expr> output_shape;
std::vector<Expr> new_weights_shape;
std::vector<Expr> input_pad_shape;
if (output_shapes.size() == 3) {
// already computed by infer_shape
CHECK_EQ(4, output_shapes[0].size()) << "The size of output_shapes[0] of Conv2d op is not 4! Please check.";
Expand All @@ -60,6 +110,8 @@ std::vector<ir::Tensor> Conv2d_NCHW(const ir::Tensor &input,
Expr(output_shapes[2][0]), Expr(output_shapes[2][1]), Expr(output_shapes[2][2]), Expr(output_shapes[2][3])};
new_weights_shape = {
Expr(output_shapes[1][0]), Expr(output_shapes[1][1]), Expr(output_shapes[1][2]), Expr(output_shapes[1][3])};
input_pad_shape = {
Expr(output_shapes[0][0]), Expr(output_shapes[0][1]), Expr(output_shapes[0][2]), Expr(output_shapes[0][3])};
} else {
output_shape = {
input->shape[0], // B
Expand All @@ -71,10 +123,16 @@ std::vector<ir::Tensor> Conv2d_NCHW(const ir::Tensor &input,
weights->shape[1],
dilation_h * (weights->shape[2] - 1) + 1,
dilation_w * (weights->shape[3] - 1) + 1};
input_pad_shape = {input->shape[0], input->shape[1], input->shape[2] + 2 * pad_h, input->shape[3] + 2 * pad_w};
}
auto input_pad =
(pad_h == 0 && pad_w == 0) ? Identity(input) : Pad(input, {Expr(0), Expr(0), Expr(pad_h), Expr(pad_w)});

auto input_pad = Compute(
input_pad_shape,
[=](Expr nn, Expr cc, Expr yy, Expr xx) {
auto cond =
ir::logic_and({yy >= pad_h, yy - pad_h < input->shape[2], xx >= pad_w, xx - pad_w < input->shape[3]});
return ir::Select::Make(cond, input(nn, cc, yy - pad_h, xx - pad_w), ir::Zero(input->type()));
},
UniqName("input_pad"));
auto weights_dilation = Compute(
new_weights_shape,
[=](Expr nn, Expr cc, Expr yy, Expr xx) {
Expand Down Expand Up @@ -120,6 +178,7 @@ std::vector<ir::Tensor> Conv2d_NHWC(const ir::Tensor &input,
CHECK_EQ(4, weights->shape.size()) << "Weight's dimension of Conv2d_NHWC op is not 4! Please check.";
std::vector<Expr> output_shape;
std::vector<Expr> new_weights_shape;
std::vector<Expr> input_pad_shape;
if (output_shapes.size() == 3) {
// already computed by infer_shape
CHECK_EQ(4, output_shapes[0].size()) << "The size of output_shapes[0] of Conv2d op is not 4! Please check.";
Expand All @@ -129,6 +188,8 @@ std::vector<ir::Tensor> Conv2d_NHWC(const ir::Tensor &input,
Expr(output_shapes[2][0]), Expr(output_shapes[2][1]), Expr(output_shapes[2][2]), Expr(output_shapes[2][3])};
new_weights_shape = {
Expr(output_shapes[1][0]), Expr(output_shapes[1][1]), Expr(output_shapes[1][2]), Expr(output_shapes[1][3])};
input_pad_shape = {
Expr(output_shapes[0][0]), Expr(output_shapes[0][1]), Expr(output_shapes[0][2]), Expr(output_shapes[0][3])};
} else {
output_shape = {
input->shape[0], // B
Expand All @@ -140,9 +201,16 @@ std::vector<ir::Tensor> Conv2d_NHWC(const ir::Tensor &input,
weights->shape[1],
dilation_h * (weights->shape[2] - 1) + 1,
dilation_w * (weights->shape[3] - 1) + 1};
input_pad_shape = {input->shape[0], input->shape[1] + 2 * pad_h, input->shape[2] + 2 * pad_w, input->shape[3]};
}
auto input_pad =
(pad_h == 0 && pad_w == 0) ? Identity(input) : Pad(input, {Expr(0), Expr(pad_h), Expr(pad_w), Expr(0)});
auto input_pad = Compute(
input_pad_shape,
[=](Expr nn, Expr yy, Expr xx, Expr cc) {
auto cond =
ir::logic_and({yy >= pad_h, yy - pad_h < input->shape[1], xx >= pad_w, xx - pad_w < input->shape[2]});
return ir::Select::Make(cond, input(nn, yy - pad_h, xx - pad_w, cc), ir::Zero(input->type()));
},
UniqName("input_pad"));

auto weights_dilation = Compute(
new_weights_shape,
Expand Down
11 changes: 6 additions & 5 deletions python/tests/conv2d_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,10 +73,10 @@ def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise):

res_shape = output.shape[1:]
pad_shape = list(input_shape)
dialtion_shape = list(filter_size_new)
dilation_shape = list(filter_size_new)
assert len(padding) == 2
assert len(pad_shape) == 4
assert len(dialtion_shape) == 4
assert len(dilation_shape) == 4
if data_format == "NCHW":
h_index = 2
w_index = 3
Expand All @@ -86,11 +86,12 @@ def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise):

pad_shape[h_index] += 2 * padding[0]
pad_shape[w_index] += 2 * padding[1]
dialtion_shape[h_index] = (filter_size_new[h_index] - 1) * dilation[0] + 1
dialtion_shape[w_index] = (filter_size_new[w_index] - 1) * dilation[1] + 1
dilation_shape[2] = (filter_size_new[2] - 1) * dilation[0] + 1
dilation_shape[3] = (filter_size_new[3] - 1) * dilation[1] + 1

print("pad's shape is:", pad_shape)
print("dilation's shape is:", dilation_shape)
if is_depthwise:
return output, [pad_shape, res_shape]
else:
return output, [pad_shape, dialtion_shape, res_shape]
return output, [pad_shape, dilation_shape, res_shape]
6 changes: 3 additions & 3 deletions python/tests/test_op_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def init_testcase(self):
self.groups = 1
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [64, f_c, 7, 7]
self.filter_size = [2, f_c, 2, 2]
assert np.mod(self.filter_size[0], self.groups) == 0
self.data_format = "NCHW"
self.attrs = framework.NodeAttr()
Expand Down Expand Up @@ -170,9 +170,9 @@ def init_testcase(self):
self.filter_size = [16, f_c, 7, 7]
self.data_format = "NHWC"
self.attrs = framework.NodeAttr()
self.padding = [1, 1]
self.padding = [2, 2]
self.stride = [2, 2]
self.dilation = [1, 1]
self.dilation = [2, 2]
self.attrs.attr_store = {
"stride": self.stride,
"padding": self.padding,
Expand Down
2 changes: 1 addition & 1 deletion python/tests/test_pe_reduction.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def reduction_tester(self, fn_name, cinn_fn, np_fn, axes, keep_dims,
stages = create_stages([x.to_tensor()])
if initial:
y = cinn_fn(x.to_tensor(), stages, axes_expr, keep_dims,
ir.Expr(initial))
ir.Expr(initial))
func = lang.lower(func_name, stages, [x.to_tensor(), y])
else:
y = cinn_fn(x.to_tensor(), stages, axes_expr, keep_dims)
Expand Down

0 comments on commit 894ecf8

Please sign in to comment.