From e83c0111297d76daa5885f1da2aab8434ca1fa0a Mon Sep 17 00:00:00 2001 From: yuchen202 <103028470+yuchen202@users.noreply.github.com> Date: Thu, 28 Sep 2023 23:45:30 +0800 Subject: [PATCH] 0928 --- python/paddle/distribution/bernoulli.py | 8 ++++++ python/paddle/distribution/categorical.py | 8 +++++- python/paddle/distribution/gumbel.py | 5 ++-- python/paddle/hapi/dynamic_flops.py | 6 ++-- python/paddle/hapi/model.py | 34 +++++++++++------------ python/paddle/hapi/model_summary.py | 2 +- 6 files changed, 39 insertions(+), 24 deletions(-) diff --git a/python/paddle/distribution/bernoulli.py b/python/paddle/distribution/bernoulli.py index 7d4849fab48e7..4048641a5c827 100644 --- a/python/paddle/distribution/bernoulli.py +++ b/python/paddle/distribution/bernoulli.py @@ -72,6 +72,7 @@ class Bernoulli(exponential_family.ExponentialFamily): .. code-block:: python + >>> # doctest: +REQUIRES(env:DISTRIBUTED) >>> import paddle >>> from paddle.distribution import Bernoulli @@ -156,6 +157,7 @@ def sample(self, shape): .. code-block:: python + >>> # doctest: +REQUIRES(env:DISTRIBUTED) >>> import paddle >>> from paddle.distribution import Bernoulli @@ -211,6 +213,7 @@ def rsample(self, shape, temperature=1.0): .. code-block:: python + >>> # doctest: +REQUIRES(env:DISTRIBUTED) >>> import paddle >>> from paddle.distribution import Bernoulli @@ -308,6 +311,7 @@ def cdf(self, value): .. code-block:: python + >>> # doctest: +REQUIRES(env:DISTRIBUTED) >>> import paddle >>> from paddle.distribution import Bernoulli @@ -346,6 +350,7 @@ def log_prob(self, value): .. code-block:: python + >>> # doctest: +REQUIRES(env:DISTRIBUTED) >>> import paddle >>> from paddle.distribution import Bernoulli @@ -385,6 +390,7 @@ def prob(self, value): .. code-block:: python + >>> # doctest: +REQUIRES(env:DISTRIBUTED) >>> import paddle >>> from paddle.distribution import Bernoulli @@ -415,6 +421,7 @@ def entropy(self): .. code-block:: python + >>> # doctest: +REQUIRES(env:DISTRIBUTED) >>> import paddle >>> from paddle.distribution import Bernoulli @@ -448,6 +455,7 @@ def kl_divergence(self, other): .. code-block:: python + >>> # doctest: +REQUIRES(env:DISTRIBUTED) >>> import paddle >>> from paddle.distribution import Bernoulli diff --git a/python/paddle/distribution/categorical.py b/python/paddle/distribution/categorical.py index b6484e3f21d56..c7be49e50b471 100644 --- a/python/paddle/distribution/categorical.py +++ b/python/paddle/distribution/categorical.py @@ -46,6 +46,7 @@ class Categorical(distribution.Distribution): Examples: .. code-block:: python + >>> # doctest: +REQUIRES(env:DISTRIBUTED) >>> import paddle >>> from paddle.distribution import Categorical @@ -64,7 +65,7 @@ class Categorical(distribution.Distribution): >>> cat = Categorical(x) >>> cat2 = Categorical(y) - >>> # doctest: +SKIP + >>> # doctest: +SKIP('`paddle.distribution` can not run in xdoctest') >>> paddle.seed(1000) # on CPU device >>> print(cat.sample([2,3])) Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True, @@ -134,6 +135,7 @@ def sample(self, shape): Examples: .. code-block:: python + >>> # doctest: +REQUIRES(env:DISTRIBUTED) >>> import paddle >>> from paddle.distribution import Categorical @@ -191,6 +193,7 @@ def kl_divergence(self, other): Examples: .. code-block:: python + >>> # doctest: +REQUIRES(env:DISTRIBUTED) >>> import paddle >>> from paddle.distribution import Categorical @@ -245,6 +248,7 @@ def entropy(self): Examples: .. code-block:: python + >>> # doctest: +REQUIRES(env:DISTRIBUTED) >>> import paddle >>> from paddle.distribution import Categorical @@ -289,6 +293,7 @@ def probs(self, value): Examples: .. code-block:: python + >>> # doctest: +REQUIRES(env:DISTRIBUTED) >>> import paddle >>> from paddle.distribution import Categorical @@ -336,6 +341,7 @@ def log_prob(self, value): Examples: .. code-block:: python + >>> # doctest: +REQUIRES(env:DISTRIBUTED) >>> import paddle >>> from paddle.distribution import Categorical diff --git a/python/paddle/distribution/gumbel.py b/python/paddle/distribution/gumbel.py index 005801ae6b7cc..e42edff8bd446 100644 --- a/python/paddle/distribution/gumbel.py +++ b/python/paddle/distribution/gumbel.py @@ -46,13 +46,14 @@ class Gumbel(TransformedDistribution): Examples: .. code-block:: python + >>> # doctest: +REQUIRES(env:DISTRIBUTED) >>> import paddle >>> from paddle.distribution.gumbel import Gumbel >>> # Gumbel distributed with loc=0, scale=1 >>> dist = Gumbel(paddle.full([1], 0.0), paddle.full([1], 1.0)) - >>> # doctest: +SKIP + >>> # doctest: +SKIP('`paddle.distribution` can not run in xdoctest') >>> print(dist.sample([2])) Tensor(shape=[2, 1], dtype=float32, place=Place(cpu), stop_gradient=True, [[0.40484068], @@ -63,7 +64,7 @@ class Gumbel(TransformedDistribution): [[-0.95093185], [ 0.32422572]]) - >>> # doctest: -SKIP + >>> # doctest: -SKIP('`paddle.distribution` can not run in xdoctest') >>> value = paddle.full([1], 0.5) >>> print(dist.prob(value)) Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True, diff --git a/python/paddle/hapi/dynamic_flops.py b/python/paddle/hapi/dynamic_flops.py index fcae6e4120ac8..89bad05438b32 100644 --- a/python/paddle/hapi/dynamic_flops.py +++ b/python/paddle/hapi/dynamic_flops.py @@ -59,7 +59,7 @@ def flops(net, input_size, custom_ops=None, print_detail=False): ... nn.Conv2D(6, 16, 5, stride=1, padding=0), ... nn.ReLU(), ... nn.MaxPool2D(2, 2)) - ... + ... if num_classes > 0: ... self.fc = nn.Sequential( ... nn.Linear(400, 120), @@ -73,14 +73,14 @@ def flops(net, input_size, custom_ops=None, print_detail=False): ... x = paddle.flatten(x, 1) ... x = self.fc(x) ... return x - ... + >>> lenet = LeNet() >>> # m is the instance of nn.Layer, x is the intput of layer, y is the output of layer. >>> def count_leaky_relu(m, x, y): ... x = x[0] ... nelements = x.numel() ... m.total_ops += int(nelements) - ... + >>> FLOPs = paddle.flops(lenet, ... [1, 1, 28, 28], ... custom_ops= {nn.LeakyReLU: count_leaky_relu}, diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 8ca5712a3036c..524637a105edb 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -1140,7 +1140,7 @@ class Model: ... ... model = paddle.Model(net) ... optim = paddle.optimizer.SGD(learning_rate=1e-3, parameters=model.parameters()) - ... + ... amp_configs = { ... "level": "O1", ... "custom_white_list": {'conv2d'}, @@ -1150,7 +1150,7 @@ class Model: ... paddle.nn.CrossEntropyLoss(), ... paddle.metric.Accuracy(), ... amp_configs=amp_configs) - ... + ... transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) ... data = paddle.vision.datasets.MNIST(mode='train', transform=transform) ... model.fit(data, epochs=2, batch_size=32, verbose=1) @@ -1158,7 +1158,7 @@ class Model: >>> # mixed precision training is only supported on GPU now. >>> if paddle.is_compiled_with_cuda(): ... run_example_code() - ... + """ def __init__(self, network, inputs=None, labels=None): @@ -1228,7 +1228,7 @@ def train_batch(self, inputs, labels=None, update=True): ... nn.Linear(784, 200), ... nn.Tanh(), ... nn.Linear(200, 10)) - ... + >>> input = InputSpec([None, 784], 'float32', 'x') >>> label = InputSpec([None, 1], 'int64', 'label') >>> model = paddle.Model(net, input, label) @@ -1282,7 +1282,7 @@ def eval_batch(self, inputs, labels=None): ... nn.Linear(784, 200), ... nn.Tanh(), ... nn.Linear(200, 10)) - ... + >>> input = InputSpec([None, 784], 'float32', 'x') >>> label = InputSpec([None, 1], 'int64', 'label') >>> model = paddle.Model(net, input, label) @@ -1337,7 +1337,7 @@ def predict_batch(self, inputs): ... nn.Tanh(), ... nn.Linear(200, 10), ... nn.Softmax()) - ... + >>> model = paddle.Model(net, input, label) >>> model.prepare() >>> data = paddle.rand((1, 784), dtype="float32") @@ -1467,7 +1467,7 @@ def load(self, path, skip_mismatch=False, reset_optimizer=False): ... nn.Tanh(), ... nn.Linear(200, 10), ... nn.Softmax()), input) - ... + >>> model.save('checkpoint/test') >>> model.load('checkpoint/test') @@ -1558,7 +1558,7 @@ def parameters(self, *args, **kwargs): ... nn.Linear(784, 200), ... nn.Tanh(), ... nn.Linear(200, 10)), input) - ... + >>> params = model.parameters() >>> print(params) [Parameter containing: @@ -1835,7 +1835,7 @@ def fit( >>> dynamic = True >>> if not dynamic: ... paddle.enable_static() - ... + >>> transform = T.Compose([T.Transpose(), ... T.Normalize([127.5], [127.5])]) >>> train_dataset = MNIST(mode='train', transform=transform) @@ -1858,7 +1858,7 @@ def fit( ... epochs=2, ... batch_size=64, ... save_dir='mnist_checkpoint') - ... + 2. An example use DataLoader, batch size and shuffle is set in DataLoader. @@ -1873,7 +1873,7 @@ def fit( >>> dynamic = True >>> if not dynamic: ... paddle.enable_static() - ... + >>> transform = T.Compose([T.Transpose(), ... T.Normalize([127.5], [127.5])]) >>> train_dataset = MNIST(mode='train', transform=transform) @@ -1882,7 +1882,7 @@ def fit( >>> val_dataset = MNIST(mode='test', transform=transform) >>> val_loader = paddle.io.DataLoader(val_dataset, ... batch_size=64) - ... + >>> input = InputSpec([None, 1, 28, 28], 'float32', 'image') >>> label = InputSpec([None, 1], 'int64', 'label') @@ -1898,7 +1898,7 @@ def fit( ... val_loader, ... epochs=2, ... save_dir='mnist_checkpoint') - ... + """ assert train_data is not None, "train_data must be given!" @@ -2154,16 +2154,16 @@ def predict( ... def __init__(self, mode, return_label=True): ... super().__init__(mode=mode) ... self.return_label = return_label - ... + ... def __getitem__(self, idx): ... img = np.reshape(self.images[idx], [1, 28, 28]) ... if self.return_label: ... return img, np.array(self.labels[idx]).astype('int64') ... return img - ... + ... def __len__(self): ... return len(self.images) - ... + >>> test_dataset = MnistDataset(mode='test', return_label=False) >>> # imperative mode @@ -2399,7 +2399,7 @@ def summary(self, input_size=None, dtype=None): >>> optim = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters()) >>> model.prepare(optim, paddle.nn.CrossEntropyLoss()) >>> params_info = model.summary() - >>> # doctest: +SKIP + >>> # doctest: +SKIP('`paddle.static` can not run in xdoctest') >>> print(params_info) --------------------------------------------------------------------------- Layer (type) Input Shape Output Shape Param # diff --git a/python/paddle/hapi/model_summary.py b/python/paddle/hapi/model_summary.py index df5791a5fd70d..99cf123aaff34 100644 --- a/python/paddle/hapi/model_summary.py +++ b/python/paddle/hapi/model_summary.py @@ -60,7 +60,7 @@ def summary(net, input_size=None, dtypes=None, input=None): ... nn.Conv2D(6, 16, 5, stride=1, padding=0), ... nn.ReLU(), ... nn.MaxPool2D(2, 2)) - ... + ... if num_classes > 0: ... self.fc = nn.Sequential( ... nn.Linear(400, 120),