Skip to content
This repository has been archived by the owner on Jan 24, 2024. It is now read-only.

Add model MobileNetV1, ResNet50 and SqueezeNet #441

Open
wants to merge 4 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ function cmake_ {
function _download_and_untar {
local tar_file=$1
if [[ ! -f $tar_file ]]; then
wget http://paddle-inference-dist.bj.bcebos.com/CINN/$tar_file
wget https://paddle-inference-dist.bj.bcebos.com/CINN/$tar_file
tar -xvf $tar_file
fi
}
Expand All @@ -105,6 +105,9 @@ function prepare_model {
_download_and_untar ResNet18.tar
_download_and_untar MobileNetV2.tar
_download_and_untar EfficientNet.tar
_download_and_untar MobilenetV1.tar
_download_and_untar ResNet50.tar
_download_and_untar SqueezeNet.tar

mkdir -p $build_dir/paddle
cd $build_dir/paddle
Expand Down
2 changes: 1 addition & 1 deletion cinn/backends/codegen_cuda_dev_test.cc
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -2340,7 +2340,7 @@ TEST(Cudnn, external_function_cudnn2) {
dev_bufs[1]->memory = reinterpret_cast<uint8_t*>(B_dev);

runtime::cuda::cinn_gpu_cudnn_pool2d(
{2, 64, 112, 112, 3, 3, 1, 1, 1, 1, 2, 2, 2, 64, 56, 56}, {"max"}, dev_bufs[0], dev_bufs[1]);
{2, 64, 112, 112, 3, 3, 1, 1, 1, 1, 2, 2, 2, 64, 56, 56, 0}, {"max"}, dev_bufs[0], dev_bufs[1]);
}

TEST(Cudnn, external_function_cudnn3) {
Expand Down
56 changes: 56 additions & 0 deletions cinn/frontend/paddle_model_to_program.cc
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,60 @@ void PaddleModelToProgram::AddOpMapper_mul() {
};
}

void PaddleModelToProgram::AddOpMapper_matmul() {
op_mappers_["matmul"] = [&](const paddle::cpp::OpDesc& op_desc) {
CHECK_EQ(op_desc.Input("X").size(), 1UL);
auto x_name = op_desc.Input("X").front();
CHECK_EQ(op_desc.Input("Y").size(), 1UL);
auto y_name = op_desc.Input("Y").front();
auto x = GetVar(utils::TransValidVarName(x_name));
auto y = GetVar(utils::TransValidVarName(y_name));
bool trans_a = op_desc.GetAttr<bool>("transpose_X");
bool trans_b = op_desc.GetAttr<bool>("transpose_Y");
float alpha = op_desc.GetAttr<float>("alpha");
VLOG(4) << "x shape: " << utils::Join(x->shape, ",");
VLOG(4) << "y shape: " << utils::Join(y->shape, ",");
auto out = program_->matmul(x, y, trans_a, trans_b, alpha);
CHECK_EQ(op_desc.Output("Out").size(), 1UL);
auto out_name = op_desc.Output("Out").front();
AddVar(utils::TransValidVarName(out_name), out);
var_model_to_program_map_[out_name] = out->id;
};
}

void PaddleModelToProgram::AddOpMapper_reshape2() {
op_mappers_["reshape2"] = [&](const paddle::cpp::OpDesc& op_desc) {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

reshape2?改下名字,下同

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done, thanks

CHECK_EQ(op_desc.Input("X").size(), 1UL);
auto x_name = op_desc.Input("X").front();
auto x = GetVar(utils::TransValidVarName(x_name));
std::vector<int> shape = op_desc.GetAttr<std::vector<int>>("shape");
VLOG(4) << "x shape: " << utils::Join(x->shape, ",");
auto out = program_->reshape(x, shape);
CHECK_EQ(op_desc.Output("Out").size(), 1UL);
auto out_name = op_desc.Output("Out").front();
AddVar(utils::TransValidVarName(out_name), out);
var_model_to_program_map_[out_name] = out->id;
};
}

void PaddleModelToProgram::AddOpMapper_concat() {
op_mappers_["concat"] = [&](const paddle::cpp::OpDesc& op_desc) {
// now only supports case: input tensor number is 2 .
CHECK_EQ(op_desc.Input("X").size(), 2UL);
auto x_name = op_desc.Input("X")[0];
auto x = GetVar(utils::TransValidVarName(x_name));
auto y_name = op_desc.Input("X")[1];
auto y = GetVar(utils::TransValidVarName(y_name));
int axis = op_desc.GetAttr<int>("axis");
VLOG(4) << "axis in op concat is : " << axis;
auto out = program_->concat(x, y, axis);
CHECK_EQ(op_desc.Output("Out").size(), 1UL);
auto out_name = op_desc.Output("Out").front();
AddVar(utils::TransValidVarName(out_name), out);
var_model_to_program_map_[out_name] = out->id;
};
}

void PaddleModelToProgram::AddOpMapper_relu() {
op_mappers_["relu"] = [&](const paddle::cpp::OpDesc& op_desc) {
CHECK_EQ(op_desc.Input("X").size(), 1UL);
Expand Down Expand Up @@ -318,6 +372,8 @@ void PaddleModelToProgram::AddOpMapper_pool2d() {
attrs["data_format"] = op_desc.GetAttr<std::string>("data_format");
CHECK(op_desc.HasAttr("global_pooling"));
attrs["global_pooling"] = op_desc.GetAttr<bool>("global_pooling");
CHECK(op_desc.HasAttr("adaptive"));
attrs["adaptive"] = op_desc.GetAttr<bool>("adaptive");

auto x = GetVar(TransValidVarName(x_name));
auto out = program_->pool2d(x, attrs);
Expand Down
6 changes: 6 additions & 0 deletions cinn/frontend/paddle_model_to_program.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ class PaddleModelToProgram {
AddOpMapper_sigmoid();
AddOpMapper_slice();
AddOpMapper_dropout_infer();
AddOpMapper_matmul();
AddOpMapper_reshape2();
AddOpMapper_concat();
}

std::unique_ptr<Program> operator()(const std::string& model_dir, bool is_combined);
Expand All @@ -68,6 +71,9 @@ class PaddleModelToProgram {
void AddOpMapper_sigmoid();
void AddOpMapper_slice();
void AddOpMapper_dropout_infer();
void AddOpMapper_matmul();
void AddOpMapper_reshape2();
void AddOpMapper_concat();
// @}

const std::unordered_map<std::string, Variable>& var_map() const { return var_map_; }
Expand Down
23 changes: 23 additions & 0 deletions cinn/frontend/syntax.cc
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,29 @@ Variable Program::mul(const Variable& a, const Variable& b, int x_num_col_dims,
return instr.GetOutput(0);
}

Variable Program::matmul(const Variable& a, const Variable& b, bool trans_a, bool trans_b, float alpha) {
Instruction instr("matmul", {a, b});
instr.SetAttr("trans_a", trans_a);
instr.SetAttr("trans_b", trans_b);
instr.SetAttr("alpha", alpha);
AppendInstruction(instr);
return instr.GetOutput(0);
}

Variable Program::reshape(const Variable& a, const std::vector<int>& shape) {
Instruction instr("reshape", {a});
instr.SetAttr("shape", shape);
AppendInstruction(instr);
return instr.GetOutput(0);
}

Variable Program::concat(const Variable& a, const Variable& b, int axis) {
Instruction instr("concat", {a, b});
instr.SetAttr("axis", axis);
AppendInstruction(instr);
return instr.GetOutput(0);
}

Variable Program::mulbias(
const Variable& a, const Variable& b, const Variable& c, int x_num_col_dims, int y_num_col_dims) {
Instruction instr("mulbias", {a, b, c});
Expand Down
22 changes: 22 additions & 0 deletions cinn/frontend/syntax.h
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,28 @@ struct Program {
*/
Variable mul(const Variable& a, const Variable& b, int x_num_col_dims = 1, int y_num_col_dims = 1);

/**
* Multiply two matrix.
*/
Variable matmul(const Variable& a, const Variable& b, bool trans_a = false, bool trans_b = false, float alpha = 1);

/**
* Reshape a tensor.
* @param a The input tensor.
* @param shape The output tensor's shape we specified.
* @return The reshaped output tensor.
*/
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

注释具体点?下同

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

Variable reshape(const Variable& a, const std::vector<int>& shape);

/**
* Concat 2 tensors.
* @param a The first input tensor.
* @param b The second input tensor.
* @param axis The axis specified to do the concat operation.
* @return The concated output tensor.
*/
Variable concat(const Variable& a, const Variable& b, int axis = 0);

/**
* Multiply two matrix and add a bias.
*/
Expand Down
10 changes: 9 additions & 1 deletion cinn/hlir/framework/graph_compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -428,6 +428,7 @@ std::vector<std::unique_ptr<Instruction>> GraphCompiler::BuildInstructions() {
if (node->attrs.attr_store.find("padding_size") != node->attrs.attr_store.end()) {
if (global_pooling == false) {
auto stride = std::get<std::vector<int>>(node->attrs.attr_store.at("padding_size"));
CHECK_EQ(stride.size(), 4UL);
instr->attrs.insert(instr->attrs.end(), stride.begin(), stride.end());
} else {
instr->attrs.push_back(0);
Expand All @@ -443,7 +444,14 @@ std::vector<std::unique_ptr<Instruction>> GraphCompiler::BuildInstructions() {
auto out_shape = shape_dict.at(out_id);
instr->attrs.insert(instr->attrs.end(), out_shape.begin(), out_shape.end());
}
CHECK_EQ(instr->attrs.size(), 16UL);
if (node->attrs.attr_store.find("adaptive") != node->attrs.attr_store.end()) {
bool adaptive = std::get<bool>(node->attrs.attr_store.at("adaptive"));
if (adaptive)
instr->attrs.push_back(1);
else
instr->attrs.push_back(0);
}
CHECK_EQ(instr->attrs.size(), 17UL);
CHECK_EQ(instr->str_attrs.size(), 1UL);
} else if (node->op()->name == "softmax") {
auto& shape_dict = graph_->GetAttrs<std::unordered_map<std::string, shape_t>>("infershape");
Expand Down
2 changes: 2 additions & 0 deletions cinn/hlir/op/broadcast.cc
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,8 @@ std::vector<shape_t> InferShapeForBroadcast(const std::vector<shape_t> &inputs_s
break;
}
}
VLOG(3) << "broadcast input shapes are : " << utils::Join(inputs_shape[0], ", ") << "; "
<< utils::Join(inputs_shape[1], ", ");
pe::GetBroadcastOutShape(inputs_shape[0], inputs_shape[1], &out_shape, axis);
VLOG(3) << "broadcast out shape: " << utils::Join(out_shape, ", ");
return {out_shape};
Expand Down
17 changes: 17 additions & 0 deletions cinn/hlir/op/nn.cc
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -1085,6 +1085,7 @@ std::shared_ptr<OpStrategy> StrategyForPool2d(const framework::NodeAttr &attrs,
bool ceil_mode = false;
bool exclusive = true;
bool global_pooling = false;
bool adaptive = false;
std::string data_format = "NCHW";
for (auto &iter : attrs.attr_store) {
if (iter.first == "kernel_size") {
Expand All @@ -1103,6 +1104,8 @@ std::shared_ptr<OpStrategy> StrategyForPool2d(const framework::NodeAttr &attrs,
data_format = std::get<std::string>(iter.second);
} else if (iter.first == "global_pooling") {
global_pooling = std::get<bool>(iter.second);
} else if (iter.first == "adaptive") {
adaptive = std::get<bool>(iter.second);
}
}
CHECK(!kernel_size.empty()) << "kernel_size for pool2d is empty. Please check.\n";
Expand Down Expand Up @@ -1131,6 +1134,9 @@ std::shared_ptr<OpStrategy> StrategyForPool2d(const framework::NodeAttr &attrs,
kernel_size = {A_tensor->shape[height_index].as_int32(), A_tensor->shape[width_index].as_int32()};
padding_size = {0, 0, 0, 0};
}
if (kernel_size.size() == padding_size.size()) {
padding_size.insert(padding_size.end(), padding_size.begin(), padding_size.end());
}

auto out = pe::Pool2d(A_tensor,
kernel_size,
Expand All @@ -1140,6 +1146,7 @@ std::shared_ptr<OpStrategy> StrategyForPool2d(const framework::NodeAttr &attrs,
ceil_mode,
exclusive,
data_format,
adaptive,
UniqName("T_Pool2d_out"));

auto stages = CreateStages({A_tensor});
Expand Down Expand Up @@ -1194,6 +1201,7 @@ std::vector<std::vector<int>> InferShapeForPool2d(const std::vector<std::vector<
bool exclusive = true;
std::string data_format = "NCHW";
bool global_pooling = false;
bool adaptive = false;
for (auto &iter : attrs.attr_store) {
if (iter.first == "kernel_size") {
kernel_size = std::get<std::vector<int>>(iter.second);
Expand All @@ -1209,6 +1217,8 @@ std::vector<std::vector<int>> InferShapeForPool2d(const std::vector<std::vector<
global_pooling = std::get<bool>(iter.second);
} else if (iter.first == "data_format") {
data_format = std::get<std::string>(iter.second);
} else if (iter.first == "adaptive") {
adaptive = std::get<bool>(iter.second);
}
}
CHECK_EQ(kernel_size.size(), 2U) << "kernel size for pool2d should be 2.\n";
Expand Down Expand Up @@ -1252,6 +1262,13 @@ std::vector<std::vector<int>> InferShapeForPool2d(const std::vector<std::vector<
(inputs_shape[0][width_axis] - kernel_size[1] + padding_size[1] + padding_size[3]) / stride_size[1] + 1;
}

if (adaptive) {
kernel_size = std::get<std::vector<int>>(attr_store["kernel_size"]);
if (kernel_size.size() == 1UL) kernel_size.push_back(kernel_size[0]);
CHECK(kernel_size.size() >= 2UL) << "In pool2d, kernel_size's size should be >= 2, please check!";
output_shape1[height_axis] = kernel_size[0];
output_shape1[width_axis] = kernel_size[1];
}
std::vector<std::vector<int>> res{output_shape1};
return res;
}
Expand Down
Loading