forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Gelu.cpp
64 lines (51 loc) · 2.31 KB
/
Gelu.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Config.h>
#include <ATen/native/Activation.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/gelu_native.h>
#include <ATen/ops/gelu_backward_native.h>
#endif
#if !AT_MKLDNN_ENABLED()
namespace at { namespace native {
Tensor mkldnn_gelu(const Tensor& input, std::string_view approximate) {
TORCH_CHECK(false, "mkldnn_gelu: ATen not compiled with MKLDNN support");
}
Tensor mkldnn_gelu_backward(const Tensor& grad_output, const Tensor& input, std::string_view approximate) {
TORCH_CHECK(false, "mkldnn_gelu_backward: ATen not compiled with MKLDNN support");
}
}}
#else // AT_MKLDNN_ENABLED
#include <ATen/native/mkldnn/MKLDNNCommon.h>
#include <ATen/native/mkldnn/Utils.h>
namespace at::native {
Tensor mkldnn_gelu(const Tensor& input, std::string_view approximate) {
if (input.scalar_type() == ScalarType::BFloat16) {
TORCH_CHECK(mkldnn_bf16_device_check(),
"mkldnn_gelu: bf16 path needs the cpu support avx512bw, avx512vl and avx512dq");
}
TORCH_CHECK(get_gelutype_enum(approximate) == GeluType::None,
"mkldnn_gelu: fast, approximate gelu is not supported");
const ideep::tensor& x = itensor_from_tensor(input);
ideep::tensor y;
ideep::eltwise_forward::compute(
x, y, ideep::algorithm::eltwise_gelu_erf, ideep::prop_kind::forward_training, /*alpha*/ 0.0);
return new_with_itensor_mkldnn(std::move(y), optTypeMetaToScalarType(input.options().dtype_opt()),
input.options().device_opt());
}
Tensor mkldnn_gelu_backward(const Tensor& grad_output, const Tensor& input, std::string_view approximate) {
TORCH_CHECK(get_gelutype_enum(approximate) == GeluType::None,
"mkldnn_gelu_backward: fast, approximate gelu is not supported");
const ideep::tensor& x = itensor_from_tensor(input);
ideep::tensor grady = itensor_from_tensor(grad_output);
ideep::tensor gradx;
ideep::eltwise_backward::compute(x, grady, gradx,
ideep::algorithm::eltwise_gelu_erf, /*alpha*/ 0.0);
return new_with_itensor_mkldnn(std::move(gradx),
optTypeMetaToScalarType(grad_output.options().dtype_opt()),
grad_output.options().device_opt());
}
}
#endif // AT_MKLDNN_ENABLED