Skip to content

Commit

Permalink
[dicp] clang-format code (#433)
Browse files Browse the repository at this point in the history
  • Loading branch information
jinminxi104 authored Nov 17, 2023
1 parent d777c0d commit 4dc5c1c
Show file tree
Hide file tree
Showing 11 changed files with 400 additions and 502 deletions.
3 changes: 0 additions & 3 deletions dicp/.clang-format

This file was deleted.

5 changes: 3 additions & 2 deletions dicp/dicp/vendor/AscendGraph/codegen/graph_compile.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#include "graph_utils.h"

static void compile(const std::string& graph_path, const std::string& graph_json_file) {
static void compile(const std::string& graph_path,
const std::string& graph_json_file) {
std::string graph_name = "BuildGraph";
Graph graph(graph_name.c_str());
std::ifstream f(graph_json_file);
Expand All @@ -10,7 +11,7 @@ static void compile(const std::string& graph_path, const std::string& graph_json
std::map<AscendString, AscendString> options;
bool has_dynamic_shape = graph_json["has_dynamic_shape"].get<bool>();
if (has_dynamic_shape) {
for (const auto &item : graph_json["build_options"]) {
for (const auto& item : graph_json["build_options"]) {
auto key = item["name"].get<std::string>();
auto value = item["value"].get<std::string>();
options.insert({AscendString(key.c_str()), AscendString(value.c_str())});
Expand Down
96 changes: 34 additions & 62 deletions dicp/dicp/vendor/AscendGraph/codegen/graph_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,14 @@
#include <fstream>
#include <functional>
#include <iostream>
#include <json.hpp>
#include <map>
#include <numeric>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>

#include <json.hpp>

#include "all_ops.h"
#include "ascend_string.h"
#include "ge_api.h"
Expand All @@ -31,61 +30,44 @@ using json = nlohmann::json;
using namespace ge;

static std::unordered_set<std::string> op_with_dynamic_inputs_outputs = {
"ConcatD",
"IdentityN",
"Pack",
"SplitD"};
"ConcatD", "IdentityN", "Pack", "SplitD"};

void check_op(
std::unordered_map<std::string, ge::Operator>& op_map,
const std::string& op_name) {
void check_op(std::unordered_map<std::string, ge::Operator>& op_map,
const std::string& op_name) {
if (op_map.count(op_name) > 0) {
throw std::runtime_error("op_name duplicated!");
}
}

void setTensorData(
Tensor& tensor,
uint8_t* src_data,
uint64_t data_size,
const std::string& debug_name = "") {
void setTensorData(Tensor& tensor, uint8_t* src_data, uint64_t data_size,
const std::string& debug_name = "") {
auto status = tensor.SetData(reinterpret_cast<uint8_t*>(src_data), data_size);
if (status != ge::GRAPH_SUCCESS) {
std::cout << "Set " << debug_name << " tensor data failed!" << std::endl;
}
}

ge::Tensor genTensor(
const std::vector<int64_t>& tensor_shape,
ge::Format format,
ge::DataType data_type) {
ge::Tensor genTensor(const std::vector<int64_t>& tensor_shape,
ge::Format format, ge::DataType data_type) {
TensorDesc desc(ge::Shape(tensor_shape), format, data_type);
Tensor result(desc);
return result;
}

template <typename T>
ge::Tensor genTensorWithData(
const std::vector<int64_t>& tensor_shape,
ge::Format format,
ge::DataType data_type,
std::vector<T> value) {
ge::Tensor genTensorWithData(const std::vector<int64_t>& tensor_shape,
ge::Format format, ge::DataType data_type,
std::vector<T> value) {
TensorDesc desc(ge::Shape(tensor_shape), format, data_type);
Tensor result(desc);
setTensorData(
result,
reinterpret_cast<uint8_t*>(value.data()),
value.size() * sizeof(T),
"genTensorWithData");
setTensorData(result, reinterpret_cast<uint8_t*>(value.data()),
value.size() * sizeof(T), "genTensorWithData");
return result;
}

ge::Operator genInput(
const std::string op_name,
const std::vector<int64_t> shape,
ge::Format format,
ge::DataType data_type,
int index = -1) {
ge::Operator genInput(const std::string op_name,
const std::vector<int64_t> shape, ge::Format format,
ge::DataType data_type, int index = -1) {
TensorDesc tensor_desc_data_op =
TensorDesc(ge::Shape(shape), format, data_type);
auto op = op::Data(op_name.c_str());
Expand Down Expand Up @@ -115,10 +97,8 @@ class AclgraphBuilder {
}
}

void saveGraph(
const std::string& path,
const Graph& graph,
std::map<AscendString, AscendString>& options) {
void saveGraph(const std::string& path, const Graph& graph,
std::map<AscendString, AscendString>& options) {
ModelBufferData model;

auto status = aclgrphBuildModel(graph, options, model);
Expand Down Expand Up @@ -159,10 +139,8 @@ ge::Format get_ascend_format(const std::string& format) {

ge::DataType get_ascend_datatype(const std::string& data_type) {
static std::unordered_map<std::string, ge::DataType> datatype_map = {
{"FLOAT", ge::DataType::DT_FLOAT},
{"FLOAT16", ge::DataType::DT_FLOAT16},
{"INT32", ge::DataType::DT_INT32},
{"INT64", ge::DataType::DT_INT64},
{"FLOAT", ge::DataType::DT_FLOAT}, {"FLOAT16", ge::DataType::DT_FLOAT16},
{"INT32", ge::DataType::DT_INT32}, {"INT64", ge::DataType::DT_INT64},
{"BOOL", ge::DataType::DT_BOOL},
};
if (datatype_map.count(data_type) > 0) {
Expand All @@ -177,10 +155,8 @@ T genDynamicOp(const std::string& op_name) {
}

template <typename T>
void parseDynamicInput(
std::unordered_map<std::string, ge::Operator>& op_map,
T& op,
const json& node) {
void parseDynamicInput(std::unordered_map<std::string, ge::Operator>& op_map,
T& op, const json& node) {
if (node.contains("dynamic_inputs")) {
for (const auto& i : node["dynamic_inputs"]) {
auto num = i["num"].get<unsigned int>();
Expand All @@ -191,8 +167,8 @@ void parseDynamicInput(
auto index = item["index"].get<uint32_t>();
auto value = op_map[item["value"].get<std::string>()];
if (item.contains("edge")) {
op.set_dynamic_input_x(
index, value, item["edge"].get<std::string>().c_str());
op.set_dynamic_input_x(index, value,
item["edge"].get<std::string>().c_str());
} else {
op.set_dynamic_input_x(index, value);
}
Expand Down Expand Up @@ -220,8 +196,7 @@ void parseDynamicOutput(T& op, const json& node) {
}

ge::Operator genDynamicOperator(
std::unordered_map<std::string, ge::Operator>& op_map,
const json& node) {
std::unordered_map<std::string, ge::Operator>& op_map, const json& node) {
auto op_type = node["op_type"].get<std::string>();
auto op_name = node["op_name"].get<std::string>();
if (op_type == "ConcatD") {
Expand All @@ -245,10 +220,8 @@ ge::Operator genDynamicOperator(
throw std::runtime_error("invalid dynamic opeartor!");
}

void parseCommonNode(
std::unordered_map<std::string, ge::Operator>& op_map,
ge::Operator& op,
const json& node) {
void parseCommonNode(std::unordered_map<std::string, ge::Operator>& op_map,
ge::Operator& op, const json& node) {
if (node.contains("inputs")) {
for (const auto& i : node["inputs"]) {
auto name = i["name"].get<std::string>().c_str();
Expand All @@ -260,13 +233,12 @@ void parseCommonNode(
auto format = desc["format"].get<std::string>();
auto data_type = desc["data_type"].get<std::string>();
auto shape = desc["shape"].get<std::vector<int64_t>>();
TensorDesc tensor_desc = TensorDesc(
ge::Shape(shape),
get_ascend_format(format),
get_ascend_datatype(data_type));
TensorDesc tensor_desc =
TensorDesc(ge::Shape(shape), get_ascend_format(format),
get_ascend_datatype(data_type));
auto output_name = desc["output_name"].get<std::string>();
op_map[i["value"].get<std::string>()].UpdateOutputDesc(
output_name.c_str(), tensor_desc);
output_name.c_str(), tensor_desc);
op.SetInput(name, value);
} else {
op.SetInput(name, value);
Expand Down Expand Up @@ -353,8 +325,8 @@ void buildGraph(Graph& graph, const json& graph_json) {
if (op_with_dynamic_inputs_outputs.count(op_type) > 0) {
op_map[node_name] = genDynamicOperator(op_map, node);
} else {
op_map[node_name] = ge::OperatorFactory::CreateOperator(
node_name.c_str(), op_type.c_str());
op_map[node_name] = ge::OperatorFactory::CreateOperator(node_name.c_str(),
op_type.c_str());
}
parseCommonNode(op_map, op_map[node_name], node);
graph.AddOp(op_map[node_name]);
Expand All @@ -370,4 +342,4 @@ void buildGraph(Graph& graph, const json& graph_json) {
graph.SetInputs(graph_inputs).SetOutputs(graph_outputs);
}

#endif // DAVINCI_GRAPH_UTILS_H
#endif // DAVINCI_GRAPH_UTILS_H
101 changes: 45 additions & 56 deletions dicp/dicp/vendor/TopsGraph/codegen/include/common_ops.h
Original file line number Diff line number Diff line change
@@ -1,41 +1,34 @@
#pragma once

#include <memory>
#include <vector>
#include <string>
#include <limits>
#include <cmath>
#include <cstdint>
#include <iostream>
#include <fstream>
#include <iostream>
#include <limits>
#include <memory>
#include <sstream>
#include <string>
#include <vector>

#include "dtu_utils.h"

namespace enflame {
builder::Op Gather(
std::shared_ptr<builder::Builder> tmp_builder,
builder::Op input,
builder::Op index,
const int64_t dim,
builder::Type gather_type);

builder::Op ViewAsComplex(
std::shared_ptr<builder::Builder> hlir_builder,
builder::Op input,
const std::vector<int64_t> shape);

builder::Op ViewAsReal(
std::shared_ptr<builder::Builder> hlir_builder,
builder::Op input,
const std::vector<int64_t> shape);

builder::Op ComplexMul(
std::shared_ptr<builder::Builder> hlir_builder,
builder::Op lhs,
builder::Op rhs);

static void PadToSize(builder::Op& operand, const std::vector<int64_t>& target_shape, builder::Op& output, builder::Op& pad_value) {
builder::Op Gather(std::shared_ptr<builder::Builder> tmp_builder,
builder::Op input, builder::Op index, const int64_t dim,
builder::Type gather_type);

builder::Op ViewAsComplex(std::shared_ptr<builder::Builder> hlir_builder,
builder::Op input, const std::vector<int64_t> shape);

builder::Op ViewAsReal(std::shared_ptr<builder::Builder> hlir_builder,
builder::Op input, const std::vector<int64_t> shape);

builder::Op ComplexMul(std::shared_ptr<builder::Builder> hlir_builder,
builder::Op lhs, builder::Op rhs);

static void PadToSize(builder::Op& operand,
const std::vector<int64_t>& target_shape,
builder::Op& output, builder::Op& pad_value) {
bool has_padding = false;
auto operand_shape = operand.GetType().GetShape();
std::vector<int64_t> edge_padding_low;
Expand All @@ -49,24 +42,21 @@ static void PadToSize(builder::Op& operand, const std::vector<int64_t>& target_s
has_padding = has_padding || diff_in_high != 0;
}
if (has_padding) {
output = builder::Pad(operand, pad_value, 0, edge_padding_low, edge_padding_high, interior_padding);
output = builder::Pad(operand, pad_value, 0, edge_padding_low,
edge_padding_high, interior_padding);
} else {
std::cout << "No need padding to size, weird!" << std::endl;
output = operand;
}
}

template<typename T>
builder::Op Scatter(
std::shared_ptr<builder::Builder> hlir_builder,
builder::Op& self,
const int64_t dim,
builder::Op& index,
const T scalar_value) {

template <typename T>
builder::Op Scatter(std::shared_ptr<builder::Builder> hlir_builder,
builder::Op& self, const int64_t dim, builder::Op& index,
const T scalar_value) {
builder::PrimitiveType src_dtype = self.GetType().GetPrimitiveType();
builder::Op src = builder::FullLike(index, scalar_value, src_dtype);

auto neg_inf = std::numeric_limits<T>::lowest();
auto self_shape = self.GetType().GetShape();
auto index_shape = index.GetType().GetShape();
Expand All @@ -90,7 +80,7 @@ builder::Op Scatter(
builder::Op mask = builder::Equal(
builder::BroadcastInDim(index, index_broadcast_dims, mask_type),
builder::Iota(hlir_builder, dim, mask_type));

builder::Type selected_src_type(sizes, src.GetType().GetPrimitiveType());
builder::Op selected_src = builder::Select(
mask,
Expand All @@ -105,22 +95,26 @@ builder::Op Scatter(
auto max_res = builder::Max(max_lhs, max_rhs);
hlir_builder->SetOutput({max_res}, "binary_max");

builder::Op scalar_neg_inf = builder::Const(hlir_builder, neg_inf, builder::Type(self.GetType().GetPrimitiveType()));
builder::Op scalar_neg_inf = builder::Const(
hlir_builder, neg_inf, builder::Type(self.GetType().GetPrimitiveType()));
builder::Op reduced_selected_src = builder::Reduce(
{selected_src}, {scalar_neg_inf}, {dim + 1}, {"binary_max"});

// add func binary_or
hlir_builder->AddFunc("binary_or");
builder::Type bool_scalar_type(builder::PrimitiveType::PRED());
auto binary_or_arg0 = hlir_builder->CreateInput(bool_scalar_type, "binary_or");
auto binary_or_arg1 = hlir_builder->CreateInput(bool_scalar_type, "binary_or");
auto binary_or_arg0 =
hlir_builder->CreateInput(bool_scalar_type, "binary_or");
auto binary_or_arg1 =
hlir_builder->CreateInput(bool_scalar_type, "binary_or");
auto binary_or_result = builder::Or(binary_or_arg0, binary_or_arg1);
hlir_builder->SetOutput({binary_or_result}, "binary_or");

builder::Op scalar_false = builder::Const(hlir_builder, false, builder::Type(builder::PrimitiveType::PRED()));
builder::Op scalar_false = builder::Const(
hlir_builder, false, builder::Type(builder::PrimitiveType::PRED()));
builder::Op reduced_mask =
builder::Reduce({mask}, {scalar_false}, {dim + 1}, {"binary_or"});

// check whether scatter result requires padding
bool requires_padding = false;
for (size_t i = 0; i < self_shape.size(); ++i) {
Expand All @@ -136,23 +130,18 @@ builder::Op Scatter(
}

if (requires_padding) {
PadToSize(reduced_selected_src, self_shape, reduced_selected_src, scalar_neg_inf);
PadToSize(reduced_selected_src, self_shape, reduced_selected_src,
scalar_neg_inf);
PadToSize(reduced_mask, self_shape, reduced_mask, scalar_false);
}

builder::Op res = builder::Select(reduced_mask, reduced_selected_src, self);
return res;
}

builder::Op BatchNorm(
std::shared_ptr<builder::Builder> hlir_builder,
builder::Op& input,
builder::Op& weight,
builder::Op& bias,
builder::Op& running_mean,
builder::Op& running_var,
int64_t channel_dim,
bool training,
double momentum,
double eps);
builder::Op BatchNorm(std::shared_ptr<builder::Builder> hlir_builder,
builder::Op& input, builder::Op& weight,
builder::Op& bias, builder::Op& running_mean,
builder::Op& running_var, int64_t channel_dim,
bool training, double momentum, double eps);
} // namespace enflame
Loading

0 comments on commit 4dc5c1c

Please sign in to comment.