Skip to content

Commit

Permalink
[xdoctest][task 314] reformat example code with google style in pytho…
Browse files Browse the repository at this point in the history
…n/paddle/sparse/nn/layer/activation.py (#57132)

* [Doctest]fix No.314, test=docs_preview

* [Doctest]fix No.314, test=docs_preview
  • Loading branch information
yoyoIcy authored Sep 11, 2023
1 parent b9f45f0 commit 0aad84e
Showing 1 changed file with 63 additions and 62 deletions.
125 changes: 63 additions & 62 deletions python/paddle/sparse/nn/layer/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,14 +39,16 @@ class ReLU(Layer):
Examples:
.. code-block:: python
import paddle
dense_x = paddle.to_tensor([-2., 0., 1.])
sparse_x = dense_x.to_sparse_coo(1)
relu = paddle.sparse.nn.ReLU()
out = relu(sparse_x)
# [0., 0., 1.]
>>> import paddle
>>> dense_x = paddle.to_tensor([-2., 0., 1.])
>>> sparse_x = dense_x.to_sparse_coo(1)
>>> relu = paddle.sparse.nn.ReLU()
>>> out = relu(sparse_x)
>>> print(out)
Tensor(shape=[3], dtype=paddle.float32, place=Place(cpu), stop_gradient=True,
indices=[[0, 2]],
values=[0., 1.])
"""

def __init__(self, name=None):
Expand Down Expand Up @@ -89,49 +91,49 @@ class Softmax(Layer):
Examples:
.. code-block:: python
import paddle
paddle.seed(2022)
mask = paddle.rand((3, 4)) < 0.7
x = paddle.rand((3, 4)) * mask
print(x)
# Tensor(shape=[3, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [[0.08325022, 0.27030438, 0. , 0.83883715],
# [0. , 0.95856029, 0.24004589, 0. ],
# [0.14500992, 0.17088132, 0. , 0. ]])
csr = x.to_sparse_csr()
print(csr)
# Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# crows=[0, 3, 5, 7],
# cols=[0, 1, 3, 1, 2, 0, 1],
# values=[0.08325022, 0.27030438, 0.83883715, 0.95856029, 0.24004589,
# 0.14500992, 0.17088132])
softmax = paddle.sparse.nn.Softmax()
out = softmax(csr)
print(out)
# Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# crows=[0, 3, 5, 7],
# cols=[0, 1, 3, 1, 2, 0, 1],
# values=[0.23070428, 0.27815846, 0.49113727, 0.67227983, 0.32772022,
# 0.49353254, 0.50646752])
coo = x.to_sparse_coo(sparse_dim=2)
print(coo)
# Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# indices=[[0, 0, 0, 1, 1, 1, 2, 2, 2, 2],
# [0, 1, 3, 0, 2, 3, 0, 1, 2, 3]],
# values=[0.83438963, 0.70008713, 0.88831252, 0.02200012, 0.75432241,
# 0.65136462, 0.96088767, 0.82938021, 0.35367414, 0.86653489])
out = softmax(coo)
print(out)
# Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(gpu:0), stop_gradient=True,
# indices=[[0, 0, 0, 1, 1, 1, 2, 2, 2, 2],
# [0, 1, 3, 0, 2, 3, 0, 1, 2, 3]],
# values=[0.34132853, 0.29843226, 0.36023924, 0.20176250, 0.41964683,
# 0.37859073, 0.30015597, 0.26316857, 0.16354507, 0.27313042])
>>> import paddle
>>> paddle.seed(2022)
>>> mask = paddle.rand((3, 4)) < 0.7
>>> x = paddle.rand((3, 4)) * mask
>>> print(x)
Tensor(shape=[3, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0.88156885, 0.14463395, 0.17831714, 0.43818203],
[0.07617740, 0.75576496, 0. , 0.61921930],
[0. , 0. , 0.42460245, 0.03001321]])
>>> csr = x.to_sparse_csr()
>>> print(csr)
Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(cpu), stop_gradient=True,
crows=[0, 4, 7, 9],
cols=[0, 1, 2, 3, 0, 1, 3, 2, 3],
values=[0.88156885, 0.14463395, 0.17831714, 0.43818203, 0.07617740,
0.75576496, 0.61921930, 0.42460245, 0.03001321])
>>> softmax = paddle.sparse.nn.Softmax()
>>> out = softmax(csr)
>>> print(out)
Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(cpu), stop_gradient=True,
crows=[0, 4, 7, 9],
cols=[0, 1, 2, 3, 0, 1, 3, 2, 3],
values=[0.38234913, 0.18298410, 0.18925257, 0.24541418, 0.21302439,
0.42031071, 0.36666498, 0.59738696, 0.40261301])
>>> coo = x.to_sparse_coo(sparse_dim=2)
>>> print(coo)
Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(cpu), stop_gradient=True,
indices=[[0, 0, 0, 0, 1, 1, 1, 2, 2],
[0, 1, 2, 3, 0, 1, 3, 2, 3]],
values=[0.88156885, 0.14463395, 0.17831714, 0.43818203, 0.07617740,
0.75576496, 0.61921930, 0.42460245, 0.03001321])
>>> out = softmax(coo)
>>> print(out)
Tensor(shape=[3, 4], dtype=paddle.float32, place=Place(cpu), stop_gradient=True,
indices=[[0, 0, 0, 0, 1, 1, 1, 2, 2],
[0, 1, 2, 3, 0, 1, 3, 2, 3]],
values=[0.38234913, 0.18298411, 0.18925257, 0.24541420, 0.21302438,
0.42031071, 0.36666498, 0.59738696, 0.40261301])
"""

def __init__(self, axis=-1, name=None):
Expand Down Expand Up @@ -167,13 +169,12 @@ class ReLU6(Layer):
Examples:
.. code-block:: python
import paddle
dense_x = paddle.to_tensor([-2., 0., 8.])
sparse_x = dense_x.to_sparse_coo(1)
relu6 = paddle.sparse.nn.ReLU6()
out = relu6(sparse_x)
>>> import paddle
>>> dense_x = paddle.to_tensor([-2., 0., 8.])
>>> sparse_x = dense_x.to_sparse_coo(1)
>>> relu6 = paddle.sparse.nn.ReLU6()
>>> out = relu6(sparse_x)
"""

def __init__(self, name=None):
Expand Down Expand Up @@ -216,12 +217,12 @@ class LeakyReLU(Layer):
Examples:
.. code-block:: python
import paddle
>>> import paddle
dense_x = paddle.to_tensor([-2., 0., 5.])
sparse_x = dense_x.to_sparse_coo(1)
leaky_relu = paddle.sparse.nn.LeakyReLU(0.5)
out = leaky_relu(sparse_x)
>>> dense_x = paddle.to_tensor([-2., 0., 5.])
>>> sparse_x = dense_x.to_sparse_coo(1)
>>> leaky_relu = paddle.sparse.nn.LeakyReLU(0.5)
>>> out = leaky_relu(sparse_x)
"""

Expand Down

0 comments on commit 0aad84e

Please sign in to comment.