Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【paddle_test No.28】replace parts of cc_test with paddle_test #61676

Merged
merged 23 commits into from
Feb 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -43,16 +43,17 @@ class GradNodeScale : public GradNodeBase {
// Functor: perform backward computations
virtual paddle::small_vector<std::vector<paddle::Tensor>,
kSlotSmallVectorSize>
operator()(paddle::small_vector<std::vector<paddle::Tensor>,
kSlotSmallVectorSize>& grads, // NOLINT
bool create_graph = false,
bool is_new_grad = false) override;
TEST_API
operator()(paddle::small_vector<std::vector<paddle::Tensor>,
kSlotSmallVectorSize>& grads, // NOLINT
bool create_graph = false,
bool is_new_grad = false) override;

void ClearTensorWrappers() override { VLOG(6) << "Do nothing here now"; }

void SetTensorWrappers_X(const std::vector<paddle::Tensor>& tensors);

void SetAttributes_scale(float scale);
TEST_API void SetAttributes_scale(float scale);
std::string name() override { return "scale node"; }
// Members: define fwd input tensors
// For Scale there is no fwd input tensor needed
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/eager/api/utils/hook_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,15 @@
namespace egr {
namespace egr_utils_api {

int64_t RegisterGradientHookForTensor(
TEST_API int64_t RegisterGradientHookForTensor(
const paddle::Tensor& tensor,
const std::function<paddle::Tensor(const paddle::Tensor&)>& hook);

void RegisterReduceHookForTensor(const paddle::Tensor& tensor,
const std::function<void()>& hook);
TEST_API void RegisterReduceHookForTensor(const paddle::Tensor& tensor,
const std::function<void()>& hook);
TEST_API void RetainGradForTensor(const paddle::Tensor& tensor);

void RegisterBackwardFinalHook(const std::function<void()>& hook);
TEST_API void RegisterBackwardFinalHook(const std::function<void()>& hook);

} // namespace egr_utils_api
} // namespace egr
2 changes: 1 addition & 1 deletion paddle/fluid/eager/backward.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ TEST_API void Backward(const std::vector<paddle::Tensor>& tensors,
const std::vector<paddle::Tensor>& grad_tensors,
bool retain_graph = false);

std::vector<paddle::Tensor> Grad(
TEST_API std::vector<paddle::Tensor> Grad(
const std::vector<paddle::Tensor>& tensors,
const std::vector<paddle::Tensor>& inputs,
const std::vector<paddle::Tensor>& grad_tensors = {},
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/eager/grad_node_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ class GradNodeBase {
size_t slot_rank);
void SetGradOutMeta(const std::vector<const paddle::Tensor*>& fwd_in,
size_t slot_rank);
void SetGradOutMeta(const paddle::Tensor& fwd_in, size_t slot_rank);
TEST_API void SetGradOutMeta(const paddle::Tensor& fwd_in, size_t slot_rank);
void SetGradOutMeta(const paddle::Tensor& fwd_in,
const AutogradMeta* fwd_in_other,
size_t slot_rank);
Expand Down
39 changes: 20 additions & 19 deletions paddle/fluid/eager/nan_inf_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,41 +35,42 @@ using TupleOfSixTensors =
using TupleOfTensorAndVector =
std::tuple<Tensor, std::vector<Tensor>, std::vector<Tensor>>;

void CheckTensorHasNanOrInf(const std::string& api_name, const Tensor& tensor);
TEST_API void CheckTensorHasNanOrInf(const std::string& api_name,
const Tensor& tensor);

void CheckTensorHasNanOrInf(const std::string& api_name,
const paddle::optional<Tensor>& tensor);
TEST_API void CheckTensorHasNanOrInf(const std::string& api_name,
const paddle::optional<Tensor>& tensor);

void CheckTensorHasNanOrInf(const std::string& api_name,
const TupleOfTwoTensors& tensors);
TEST_API void CheckTensorHasNanOrInf(const std::string& api_name,
const TupleOfTwoTensors& tensors);

void CheckTensorHasNanOrInf(const std::string& api_name,
const TupleOfThreeTensors& tensors);
TEST_API void CheckTensorHasNanOrInf(const std::string& api_name,
const TupleOfThreeTensors& tensors);

void CheckTensorHasNanOrInf(const std::string& api_name,
const TupleOfFourTensors& tensors);
TEST_API void CheckTensorHasNanOrInf(const std::string& api_name,
const TupleOfFourTensors& tensors);

void CheckTensorHasNanOrInf(const std::string& api_name,
const TupleOfFiveTensors& tensors);
TEST_API void CheckTensorHasNanOrInf(const std::string& api_name,
const TupleOfFiveTensors& tensors);

void CheckTensorHasNanOrInf(const std::string& api_name,
const TupleOfSixTensors& tensors);
TEST_API void CheckTensorHasNanOrInf(const std::string& api_name,
const TupleOfSixTensors& tensors);

void CheckTensorHasNanOrInf(const std::string& api_name,
const std::vector<Tensor>& tensors);
TEST_API void CheckTensorHasNanOrInf(const std::string& api_name,
const std::vector<Tensor>& tensors);

void CheckTensorHasNanOrInf(
TEST_API void CheckTensorHasNanOrInf(
const std::string& api_name,
const paddle::optional<std::vector<Tensor>>& tensors);

void CheckTensorHasNanOrInf(const std::string& api_name,
const TupleOfTensorAndVector& tensors);
TEST_API void CheckTensorHasNanOrInf(const std::string& api_name,
const TupleOfTensorAndVector& tensors);

void SetCheckOpList(const std::string& check_op_list);

void SetSkipOpList(const std::string& skip_op_list);

void CheckTensorHasNanOrInf(
TEST_API void CheckTensorHasNanOrInf(
const std::string& api_name,
const paddle::small_vector<std::vector<paddle::Tensor>,
egr::kSlotSmallVectorSize>& tensors);
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/common/memory_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -390,11 +390,11 @@ class MemoryUtils {

namespace memory_utils {

Allocator::AllocationPtr Alloc(const phi::GPUPlace& place,
size_t size,
const phi::Stream& stream);
TEST_API Allocator::AllocationPtr Alloc(const phi::GPUPlace& place,
size_t size,
const phi::Stream& stream);

Allocator::AllocationPtr Alloc(const phi::Place& place, size_t size);
TEST_API Allocator::AllocationPtr Alloc(const phi::Place& place, size_t size);

std::shared_ptr<Allocation> AllocShared(const phi::Place& place,
size_t size,
Expand Down
61 changes: 17 additions & 44 deletions test/cpp/eager/task_tests/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,47 +1,20 @@
cc_test(
test_egr_task_nan_inf_utils
SRCS nan_inf_utils_test.cc
DEPS eager_nan_inf_utils phi common)
paddle_test(test_egr_task_nan_inf_utils SRCS nan_inf_utils_test.cc DEPS common)

if(NOT ((NOT WITH_PYTHON) AND ON_INFER))
cc_test(
test_egr_task_hook
SRCS hook_test.cc
DEPS ${eager_deps} ${fluid_deps} ${generated_deps} eager_scale scale_node)
cc_test(
test_egr_task_backward
SRCS backward_test.cc
DEPS ${eager_deps} ${fluid_deps} ${generated_deps} eager_scale scale_node)
cc_test(
test_egr_task_grad
SRCS grad_test.cc
DEPS ${eager_deps} ${fluid_deps} ${generated_deps} eager_scale scale_node)
cc_test(
test_egr_task_fwd_bwd_joint
SRCS fwd_bwd_joint_test.cc
DEPS ${eager_deps} ${fluid_deps} ${generated_deps} eager_scale scale_node)
cc_test(
test_egr_task_cross_batch
SRCS cross_batch_accumulation_test.cc
DEPS ${eager_deps} ${fluid_deps} ${generated_deps} eager_scale scale_node)
cc_test(
test_egr_task_hook_intermidiate
SRCS hook_test_intermidiate.cc
DEPS ${eager_deps} ${fluid_deps} ${generated_deps} dygraph_node)
cc_test(
test_egr_task_autocodegen
SRCS generated_test.cc
DEPS ${eager_deps} ${fluid_deps} ${generated_deps})
cc_test(
test_egr_task_tensor_utils
SRCS tensor_utils_test.cc
DEPS ${eager_deps} ${generated_deps})
cc_test(
test_egr_task_eager_utils
SRCS eager_utils_test.cc
DEPS ${eager_deps} ${generated_deps})
cc_test(
test_egr_task_forward_autograd
SRCS forward_autograd_test.cc
DEPS ${eager_deps} ${fluid_deps} ${generated_deps} eager_scale scale_node)
paddle_test(test_egr_task_hook SRCS hook_test.cc)
paddle_test(test_egr_task_backward SRCS backward_test.cc)
paddle_test(test_egr_task_grad SRCS grad_test.cc)
paddle_test(test_egr_task_fwd_bwd_joint SRCS fwd_bwd_joint_test.cc)
paddle_test(test_egr_task_cross_batch SRCS cross_batch_accumulation_test.cc)
paddle_test(test_egr_task_hook_intermidiate SRCS hook_test_intermidiate.cc)
paddle_test(test_egr_task_autocodegen SRCS generated_test.cc)
paddle_test(test_egr_task_tensor_utils SRCS tensor_utils_test.cc)
paddle_test(test_egr_task_eager_utils SRCS eager_utils_test.cc)
paddle_test(test_egr_task_forward_autograd SRCS forward_autograd_test.cc)
endif()

if(WITH_ONNXRUNTIME AND WIN32)
# Copy onnxruntime for some c++ test in Windows, since the test will
# be build only in CI, so suppose the generator in Windows is Ninja.
copy_onnx(test_egr_task_nan_inf_utils)
endif()
6 changes: 0 additions & 6 deletions test/cpp/eager/task_tests/generated_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@ PD_DECLARE_KERNEL(matmul, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(matmul_grad, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(add_grad, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid, CPU, ALL_LAYOUT);
PD_DECLARE_KERNEL(sigmoid_grad, CPU, ALL_LAYOUT);

namespace egr {

Expand Down Expand Up @@ -145,7 +143,3 @@ TEST(Generated, ElementwiseAdd) {
}

} // namespace egr

USE_OP_ITSELF(sigmoid);
USE_OP_ITSELF(elementwise_add);
USE_OP_ITSELF(matmul_v2);
4 changes: 0 additions & 4 deletions test/cpp/eager/task_tests/hook_test_intermidiate.cc
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,3 @@ TEST(Hook_intermidiate, Matmul_v2) {

TEST(Hook_intermidiate, BackwardFinal) { test_backward_final_hooks(); }
} // namespace egr

USE_OP_ITSELF(sigmoid);
USE_OP_ITSELF(elementwise_add);
USE_OP_ITSELF(matmul_v2);