diff --git a/python/paddle/fluid/tests/unittests/test_log1p_op.py b/python/paddle/fluid/tests/unittests/test_log1p_op.py new file mode 100644 index 0000000000000..9af95649f8450 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_log1p_op.py @@ -0,0 +1,38 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + +import paddle +import paddle.fluid.core as core +import paddle.static as static + + +class Test_Log1p_Op_Fp16(unittest.TestCase): + def test_api_fp16(self): + paddle.enable_static() + with static.program_guard(static.Program(), static.Program()): + x = [[2, 3, 4], [7, 8, 9]] + x = paddle.to_tensor(x, dtype='float16') + out = paddle.log1p(x) + if core.is_compiled_with_cuda(): + place = paddle.CUDAPlace(0) + exe = static.Executor(place) + (res,) = exe.run(fetch_list=[out]) + + +if __name__ == '__main__': + paddle.enable_static() + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_log_op.py b/python/paddle/fluid/tests/unittests/test_log_op.py new file mode 100644 index 0000000000000..4e15cccc3e25c --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_log_op.py @@ -0,0 +1,38 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + +import paddle +import paddle.fluid.core as core +import paddle.static as static + + +class Test_Log_Op_Fp16(unittest.TestCase): + def test_api_fp16(self): + paddle.enable_static() + with static.program_guard(static.Program(), static.Program()): + x = [[2, 3, 4], [7, 8, 9]] + x = paddle.to_tensor(x, dtype='float16') + out = paddle.log(x) + if core.is_compiled_with_cuda(): + place = paddle.CUDAPlace(0) + exe = static.Executor(place) + (res,) = exe.run(fetch_list=[out]) + + +if __name__ == '__main__': + paddle.enable_static() + unittest.main() diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 9e7c04be8fca9..923a5322308bb 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -135,7 +135,7 @@ def log(x, name=None): Out = \ln(x) Args: - x (Tensor): Input Tensor. Must be one of the following types: float32, float64. + x (Tensor): Input Tensor. Must be one of the following types: float16, float32, float64. name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` @@ -156,7 +156,9 @@ def log(x, name=None): if in_dygraph_mode(): return _C_ops.log(x) else: - check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log") + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], "log" + ) inputs = {'X': [x]} helper = LayerHelper('log', **locals()) dtype = helper.input_dtype(input_param_name='x') @@ -2127,7 +2129,7 @@ def logsumexp(x, axis=None, keepdim=False, name=None): logsumexp(x) = \log\sum exp(x) Args: - x (Tensor): The input Tensor with data type float32 or float64, which + x (Tensor): The input Tensor with data type float16, float32 or float64, which have no more than 4 dimensions. axis (int|list|tuple, optional): The axis along which to perform logsumexp calculations. ``axis`` should be int, list(int) or @@ -2166,7 +2168,9 @@ def logsumexp(x, axis=None, keepdim=False, name=None): if in_dygraph_mode(): return _C_ops.logsumexp(x, axis, keepdim, reduce_all) else: - check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'logsumexp') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], 'logsumexp' + ) helper = LayerHelper('logsumexp', **locals()) attrs = {'axis': axis, 'keepdim': keepdim, 'reduce_all': reduce_all} @@ -2648,7 +2652,7 @@ def log1p(x, name=None): Out = \ln(x+1) Args: - x (Tensor): Input Tensor. Must be one of the following types: float32, float64. + x (Tensor): Input Tensor. Must be one of the following types: float16, float32, float64. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: @@ -2667,7 +2671,9 @@ def log1p(x, name=None): if in_dygraph_mode(): return _C_ops.log1p(x) else: - check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log1p") + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64'], "log1p" + ) inputs = {'X': [x]} helper = LayerHelper('log1p', **locals()) dtype = helper.input_dtype(input_param_name='x')