Skip to content

Commit

Permalink
[pir] add unittest for pir test of Dropout (#58106)
Browse files Browse the repository at this point in the history
* tmp

* modify unsqueeze

* modify part complex bug

* modify

* modify cpu

* [PIR]Migrate maximum into pir

* Polish code

* add ir_grad of static_gradient

* add test

* modify bug

* modify

* test_with_pir

* close one test

* add_math_op_patch

* modify

* modify

* add mean fill_constant test

* modify cpu int32 test

* get_shape_tensor

* delete

* add default place

* add dropout unittest

* Update test/legacy_test/test_elementwise_div_op.py

* modify review comment

* modify add test

---------

Co-authored-by: 0x45f <[email protected]>
  • Loading branch information
xiaoguoguo626807 and 0x45f authored Oct 17, 2023
1 parent fb0f9c5 commit a08db48
Show file tree
Hide file tree
Showing 7 changed files with 173 additions and 77 deletions.
4 changes: 2 additions & 2 deletions python/paddle/nn/functional/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1196,7 +1196,7 @@ def get_attrs(prog, dropout_prob, is_test, seed):

# get mask shape
input_shape = x.shape
if not in_dynamic_or_pir_mode():
if not in_dynamic_mode():
input_shape_tensor = paddle.shape(x)
drop_axes = [axis] if isinstance(axis, int) else list(axis)
if min(drop_axes) < 0 or max(drop_axes) > len(input_shape) - 1:
Expand All @@ -1212,7 +1212,7 @@ def get_attrs(prog, dropout_prob, is_test, seed):
)
)
mask_shape = [1] * len(input_shape)
if not in_dynamic_or_pir_mode():
if not in_dynamic_mode():
for i in drop_axes:
mask_shape[i] = input_shape_tensor[i]
else:
Expand Down
126 changes: 87 additions & 39 deletions python/paddle/pir/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,56 @@ def _item(self):
)
return self

def astype(self, dtype):
"""
**Notes**:
Cast a OpResult to a specified data type.
Args:
self(OpResult): The source OpResult
dtype: The target data type
Returns:
OpResult: OpResult with new dtype
Examples:
In Static Graph Mode:
.. code-block:: python
>>> import paddle
>>> paddle.enable_static()
>>> startup_prog = paddle.static.Program()
>>> main_prog = paddle.static.Program()
>>> with paddle.static.program_guard(startup_prog, main_prog):
... original_value = paddle.static.data(name = "new_value", shape=[2,2], dtype='float32')
... new_value = original_value.astype('int64')
... print("new value's dtype is: {}".format(new_value.dtype))
...
new OpResult's dtype is: paddle.int64
"""
from paddle import _C_ops

if not isinstance(dtype, DataType):
dtype = paddle.pir.core.convert_np_dtype_to_dtype_(dtype)
return _C_ops.cast(self, dtype)

def _scalar_add_(var, value):
return paddle.scale(var, 1.0, value)

def _scalar_sub_(var, value):
return paddle.scale(var, 1.0, -value)

def _scalar_rsub_(var, value):
return paddle.scale(var, -1.0, value)

def _scalar_mul_(var, value):
return paddle.scale(var, value, 0.0)

def _scalar_div_(var, value):
return paddle.scale(var, 1.0 / value, 0.0)

Expand All @@ -168,7 +218,7 @@ def __impl__(self, other_var):
if isinstance(other_var, float):
# in all cases(+, -, *, /, **, //, %), we need cast tensor.dtype to float
if self.dtype in _supported_int_dtype_:
paddle.cast(self, DataType.FLOAT32)
self = astype(self, DataType.FLOAT32)
# here use `scale` replace `elementwise` to get better performance
# but only +, -, *, / can use this method
if scalar_method is not None:
Expand Down Expand Up @@ -253,44 +303,6 @@ def __impl__(self, other_var):
__impl__.__name__ = method_name
return __impl__

def astype(self, dtype):
"""
**Notes**:
Cast a OpResult to a specified data type.
Args:
self(OpResult): The source OpResult
dtype: The target data type
Returns:
OpResult: OpResult with new dtype
Examples:
In Static Graph Mode:
.. code-block:: python
>>> import paddle
>>> paddle.enable_static()
>>> startup_prog = paddle.static.Program()
>>> main_prog = paddle.static.Program()
>>> with paddle.static.program_guard(startup_prog, main_prog):
... original_value = paddle.static.data(name = "new_value", shape=[2,2], dtype='float32')
... new_value = original_value.astype('int64')
... print("new value's dtype is: {}".format(new_value.dtype))
...
new OpResult's dtype is: paddle.int64
"""
from paddle import _C_ops

if not isinstance(dtype, DataType):
dtype = paddle.pir.core.convert_np_dtype_to_dtype_(dtype)
return _C_ops.cast(self, dtype)

import paddle

opresult_methods = [
Expand All @@ -300,6 +312,42 @@ def astype(self, dtype):
('ndimension', ndimension),
('ndim', _ndim),
('astype', astype),
(
'__add__',
_binary_creator_('__add__', paddle.tensor.add, False, _scalar_add_),
),
# a+b == b+a. Do not need to reverse explicitly
(
'__radd__',
_binary_creator_(
'__radd__', paddle.tensor.add, False, _scalar_add_
),
),
(
'__sub__',
_binary_creator_(
'__sub__', paddle.tensor.subtract, False, _scalar_sub_
),
),
(
'__rsub__',
_binary_creator_(
'__rsub__', paddle.tensor.subtract, True, _scalar_rsub_
),
),
(
'__mul__',
_binary_creator_(
'__mul__', paddle.tensor.multiply, False, _scalar_mul_
),
),
# a*b == b*a. Do not need to reverse explicitly
(
'__rmul__',
_binary_creator_(
'__rmul__', paddle.tensor.multiply, False, _scalar_mul_
),
),
(
'__div__',
_binary_creator_(
Expand Down
4 changes: 4 additions & 0 deletions python/paddle/tensor/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -796,6 +796,10 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):

if in_dynamic_or_pir_mode():
shape = paddle.utils.convert_shape_to_list(shape)
if in_pir_mode() and paddle.utils._contain_var(shape):
shape = paddle.utils.get_pir_shape_tensor(
shape, _current_expected_place()
)
return _C_ops.uniform(
shape,
dtype,
Expand Down
2 changes: 2 additions & 0 deletions python/paddle/utils/layers_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -393,6 +393,8 @@ def get_pir_shape_tensor(list_shape, place=_current_expected_place()):
dim.stop_gradient = True
if convert_dtype(dim.dtype) != 'int32':
dim = paddle.cast(x=dim, dtype='int32')
if dim.shape == []:
dim = paddle.reshape(dim, [-1])
shape_tensor_list.append(dim)
else:
temp_out = paddle.full([1], dim, core.DataType.INT32, place)
Expand Down
1 change: 1 addition & 0 deletions test/legacy_test/test_activation_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -2672,6 +2672,7 @@ def setUp(self):
self.rev_comp_rtol = 1e-8
self.rev_comp_atol = 1e-8

@test_with_pir_api
def test_static_api(self):
with static_guard():
with paddle.static.program_guard(paddle.static.Program()):
Expand Down
Loading

0 comments on commit a08db48

Please sign in to comment.