Skip to content

Commit

Permalink
[phi] Move sequence_pool to phi - Step 3 :sequence_pool_grad_op (#52680)
Browse files Browse the repository at this point in the history
* [phi] move sequence_pool kernel to phi

* mv kernels impl

* fix parameter error

* clean include

* fix compat filename

* [phi] move fluid sequence_pool_grad to phi

* [phi][compat] sig rm GradVarName

* [phi] fix sequence_pool out type

* [phi] rm impl, add const string

* [phi] fix const str

* fix sequence_pooling cmake

* [phi] mv sequence_pooling_test

* [phi] fix grad sig

* [phi] fix sequence_pool is_test error

* [phi] fix sequence_pooling gpu include

* [phi] mv to impl

* [phi] fix SequencePoolFunctor cu include

* [phi] modify out max_index int32_t

* [phi] add pooltype mapping determine

* [phi] fix sequence_pool_sig

* [phi] fix sequence_pool_sig sum

* [phi] try ci

* [phi] fix max_index optional
  • Loading branch information
gouzil authored Apr 27, 2023
1 parent 182b6f8 commit fe05339
Show file tree
Hide file tree
Showing 10 changed files with 116 additions and 68 deletions.
1 change: 0 additions & 1 deletion paddle/fluid/operators/math/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ math_library(sample_prob)
math_library(sampler DEPS generator)

# math_library(math_function DEPS blas dense_tensor tensor)

if(WITH_XPU)
math_library(beam_search DEPS math_function beam_search_xpu)
else()
Expand Down
12 changes: 1 addition & 11 deletions paddle/fluid/operators/sequence_ops/sequence_pool_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/sequence_ops/sequence_pool_op.h"

#include <memory>
#include <string>
#include "paddle/fluid/framework/op_registry.h"

namespace paddle {
namespace operators {
Expand Down Expand Up @@ -196,10 +193,3 @@ REGISTER_OPERATOR(sequence_pool,
REGISTER_OPERATOR(sequence_pool_grad,
ops::SequencePoolGradOp,
ops::SequencePoolGradOpNoNeedBufferVarsInferer);

PD_REGISTER_STRUCT_KERNEL(sequence_pool_grad,
CPU,
ALL_LAYOUT,
ops::SequencePoolGradKernel,
float,
double) {}
49 changes: 0 additions & 49 deletions paddle/fluid/operators/sequence_ops/sequence_pool_op.h

This file was deleted.

1 change: 0 additions & 1 deletion paddle/fluid/operators/sequence_ops/unity_build_rule.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ register_unity_group(
sequence_expand_op.cu
sequence_mask_op.cu
sequence_pad_op.cu
sequence_pool_op.cu
sequence_expand_as_op.cu
sequence_reshape_op.cu
sequence_reverse_op.cu
Expand Down
26 changes: 26 additions & 0 deletions paddle/phi/kernels/cpu/sequence_pool_grad_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/phi/kernels/sequence_pool_grad_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/sequence_pool_grad_kernel_impl.h"

PD_REGISTER_KERNEL(sequence_pool_grad,
CPU,
ALL_LAYOUT,
phi::SequencePoolGradKernel,
float,
double) {}
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand All @@ -11,8 +11,12 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/sequence_ops/sequence_pool_op.h"

namespace ops = paddle::operators;
PD_REGISTER_STRUCT_KERNEL(
sequence_pool_grad, GPU, ALL_LAYOUT, ops::SequencePoolGradKernel, float) {}
#include "paddle/phi/kernels/sequence_pool_grad_kernel.h"
#include "paddle/phi/kernels/impl/sequence_pool_grad_kernel_impl.h"

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"

PD_REGISTER_KERNEL(
sequence_pool_grad, GPU, ALL_LAYOUT, phi::SequencePoolGradKernel, float) {}
39 changes: 39 additions & 0 deletions paddle/phi/kernels/impl/sequence_pool_grad_kernel_impl.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "paddle/phi/kernels/funcs/sequence_pooling.h"

namespace phi {

template <typename T, typename Context>
void SequencePoolGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& max_index,
const DenseTensor& out_grad,
bool is_test,
const std::string& pooltype,
float pad_value,
DenseTensor* x_grad) {
const phi::DenseTensor* index = nullptr;
if (pooltype == "MAX") {
index = max_index.get_ptr();
}
dev_ctx.template Alloc<T>(x_grad);
phi::funcs::SequencePoolGradFunctor<Context, T> pool;
pool(dev_ctx, pooltype, out_grad, x_grad, index);
}

} // namespace phi
2 changes: 1 addition & 1 deletion paddle/phi/kernels/impl/sequence_pool_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ void SequencePoolKernel(const Context& ctx,
(is_test == false || (ctx.GetPlace() == phi::CPUPlace()) == false)) {
index = max_index;
index->Resize({dims});
ctx.template Alloc<int>(index);
ctx.template Alloc<int32_t>(index);
}
phi::funcs::SequencePoolFunctor<Context, T> pool;
pool(ctx, pooltype, pad_value_, x, out, is_test, index);
Expand Down
30 changes: 30 additions & 0 deletions paddle/phi/kernels/sequence_pool_grad_kernel.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "paddle/phi/core/dense_tensor.h"

namespace phi {
template <typename T, typename Context>
void SequencePoolGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& max_index,
const DenseTensor& out_grad,
bool is_test,
const std::string& pooltype,
float pad_value,
DenseTensor* x_grad);

} // namespace phi
10 changes: 10 additions & 0 deletions paddle/phi/ops/compat/sequence_pool_sig.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,16 @@ KernelSignature SequencePoolOpArgumentMapping(
{"Out", "MaxIndex"});
}

KernelSignature SequencePoolGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("sequence_pool_grad",
{"X", "MaxIndex", "Out@GRAD"},
{"is_test", "pooltype", "pad_value"},
{"X@GRAD"});
}

} // namespace phi

PD_REGISTER_ARG_MAPPING_FN(sequence_pool, phi::SequencePoolOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(sequence_pool_grad,
phi::SequencePoolGradOpArgumentMapping);

0 comments on commit fe05339

Please sign in to comment.