From fafdd565266d0c44d85699a596c74d79aa631e6d Mon Sep 17 00:00:00 2001 From: LokeZhou Date: Wed, 5 Jul 2023 12:14:29 +0800 Subject: [PATCH] Upsample (#115) * add nn.Upsample,CUDAExtension,CppExtension,SequentialSampler,is_sparse * fix cpp_extension ut * fix cpp_extension ut * fix UtilsCppExtensionMatcher ci * fix UtilsCppExtensionMatcher * fix cpp_extension ut * UtilsCppExtensionMatcher bug * UtilsCppExtensionMatcher bug * fix UtilsCppExtensionMatcher pop * fix cpp test * add cpp test * add Attribute2Func --- paconvert/api_mapping.json | 42 ++++++ paconvert/api_matcher.py | 7 + paconvert/attribute_mapping.json | 5 +- tests/test_Tensor_is_sparse.py | 29 ++++ tests/test_nn_Upsample.py | 129 ++++++++++++++++ .../test_utils_cpp_extension_CUDAExtension.py | 36 +++++ .../test_utils_cpp_extension_CppExtension.py | 35 +++++ tests/test_utils_data_SequentialSampler.py | 138 ++++++++++++++++++ 8 files changed, 420 insertions(+), 1 deletion(-) create mode 100644 tests/test_Tensor_is_sparse.py create mode 100644 tests/test_nn_Upsample.py create mode 100644 tests/test_utils_cpp_extension_CUDAExtension.py create mode 100644 tests/test_utils_cpp_extension_CppExtension.py create mode 100644 tests/test_utils_data_SequentialSampler.py diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index b52556792..0d98c93c6 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -6732,6 +6732,19 @@ "unflattened_size": "shape" } }, + "torch.nn.Upsample": { + "Matcher": "GenericMatcher", + "paddle_api": "paddle.nn.Upsample", + "args_list": [ + "size", + "scale_factor", + "mode", + "align_corners" + ], + "unsupport_args": [ + "recompute_scale_factor" + ] + }, "torch.nn.UpsamplingBilinear2d": { "Matcher": "UpsampleMatcher", "paddle_api": "paddle.nn.UpsamplingBilinear2D", @@ -9147,10 +9160,32 @@ "Matcher": "GenericMatcher", "paddle_api": "paddle.utils.cpp_extension.BuildExtension.with_options" }, + "torch.utils.cpp_extension.CUDAExtension": { + "Matcher": "GenericMatcher", + "paddle_api": "paddle.utils.cpp_extension.CUDAExtension", + "args_list": [ + "name", + "sources" + ], + "kwargs_change": { + "name": "" + } + }, "torch.utils.cpp_extension.CUDA_HOME": { "Matcher": "GenericMatcher", "paddle_api": "paddle.utils.cpp_extension.cpp_extension.CUDA_HOME" }, + "torch.utils.cpp_extension.CppExtension": { + "Matcher": "GenericMatcher", + "paddle_api": "paddle.utils.cpp_extension.CppExtension", + "args_list": [ + "name", + "sources" + ], + "kwargs_change": { + "name": "" + } + }, "torch.utils.data.BatchSampler": { "Matcher": "TorchUtilDataBatchSampler", "args_list": [ @@ -9203,6 +9238,13 @@ "data_source" ] }, + "torch.utils.data.SequentialSampler": { + "Matcher": "GenericMatcher", + "paddle_api": "paddle.io.SequenceSampler", + "args_list": [ + "data_source" + ] + }, "torch.utils.data.default_collate": { "Matcher": "GenericMatcher", "paddle_api": "paddle.io.dataloader.collate.default_collate_fn", diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index fe390c319..50382d3d1 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -3572,6 +3572,13 @@ def generate_code(self, kwargs): return GenericMatcher.generate_code(self, kwargs) +class Attribute2Func(BaseMatcher): + def get_paddle_class_attribute_nodes(self, node): + self.parse_func(node) + code = "{}()".format(self.paddle_api) + return ast.parse(code).body[0].value + + class LuMatcher(BaseMatcher): def generate_code(self, kwargs): out_v = kwargs.pop("out") if "out" in kwargs else None diff --git a/paconvert/attribute_mapping.json b/paconvert/attribute_mapping.json index 0a24f36a8..88f4d8ef3 100644 --- a/paconvert/attribute_mapping.json +++ b/paconvert/attribute_mapping.json @@ -22,7 +22,10 @@ }, "torch.Tensor.is_meta": {}, "torch.Tensor.is_quantized": {}, - "torch.Tensor.is_sparse": {}, + "torch.Tensor.is_sparse": { + "Matcher": "Attribute2Func", + "paddle_api": "paddle.Tensor.is_sparse" + }, "torch.Tensor.mH": {}, "torch.Tensor.mT": {}, "torch.Tensor.names": {}, diff --git a/tests/test_Tensor_is_sparse.py b/tests/test_Tensor_is_sparse.py new file mode 100644 index 000000000..6a6ca780c --- /dev/null +++ b/tests/test_Tensor_is_sparse.py @@ -0,0 +1,29 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.Tensor.is_sparse") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + a = torch.tensor([[ 0.9254, -0.6213]]) + result = a.is_sparse + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_nn_Upsample.py b/tests/test_nn_Upsample.py new file mode 100644 index 000000000..b136c6bff --- /dev/null +++ b/tests/test_nn_Upsample.py @@ -0,0 +1,129 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.nn.Upsample") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([[[[ 1.1524, 0.4714, 0.2857], + [-1.2533, -0.9829, -1.0981], + [ 0.1507, -1.1431, -2.0361]], + + [[ 0.1024, -0.4482, 0.4137], + [ 0.9385, 0.4565, 0.7702], + [ 0.4135, -0.2587, 0.0482]]]]) + m = torch.nn.Upsample(scale_factor=2, mode='nearest') + result = m(input) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([[[[ 1.1524, 0.4714, 0.2857], + [-1.2533, -0.9829, -1.0981], + [ 0.1507, -1.1431, -2.0361]], + + [[ 0.1024, -0.4482, 0.4137], + [ 0.9385, 0.4565, 0.7702], + [ 0.4135, -0.2587, 0.0482]]]]) + m = torch.nn.Upsample(scale_factor=2, mode='bilinear') + result = m(input) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([[[[ 1.1524, 0.4714, 0.2857], + [-1.2533, -0.9829, -1.0981], + [ 0.1507, -1.1431, -2.0361]], + + [[ 0.1024, -0.4482, 0.4137], + [ 0.9385, 0.4565, 0.7702], + [ 0.4135, -0.2587, 0.0482]]]]) + m = torch.nn.Upsample(scale_factor=2, mode='bilinear',align_corners=True) + result = m(input) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([[[[ 1.1524, 0.4714, 0.2857], + [-1.2533, -0.9829, -1.0981], + [ 0.1507, -1.1431, -2.0361]], + + [[ 0.1024, -0.4482, 0.4137], + [ 0.9385, 0.4565, 0.7702], + [ 0.4135, -0.2587, 0.0482]]]]) + m = torch.nn.Upsample(size=(2,2)) + result = m(input) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([[[[ 1.1524, 0.4714, 0.2857], + [-1.2533, -0.9829, -1.0981], + [ 0.1507, -1.1431, -2.0361]], + + [[ 0.1024, -0.4482, 0.4137], + [ 0.9385, 0.4565, 0.7702], + [ 0.4135, -0.2587, 0.0482]]]]) + m = torch.nn.Upsample(scale_factor=2, mode='bilinear',align_corners=False) + result = m(input) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + input = torch.tensor([[[[ 1.1524, 0.4714, 0.2857], + [-1.2533, -0.9829, -1.0981], + [ 0.1507, -1.1431, -2.0361]], + + [[ 0.1024, -0.4482, 0.4137], + [ 0.9385, 0.4565, 0.7702], + [ 0.4135, -0.2587, 0.0482]]]]) + m = torch.nn.Upsample(scale_factor=2, mode='bilinear',recompute_scale_factor=True) + result = m(input) + """ + ) + obj.run( + pytorch_code, unsupport=True, reason="paddle unsupport recompute_scale_factor " + ) diff --git a/tests/test_utils_cpp_extension_CUDAExtension.py b/tests/test_utils_cpp_extension_CUDAExtension.py new file mode 100644 index 000000000..753042cc5 --- /dev/null +++ b/tests/test_utils_cpp_extension_CUDAExtension.py @@ -0,0 +1,36 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.utils.cpp_extension.CUDAExtension") + + +# The cuda compile not supports +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + from torch.utils.cpp_extension import CUDAExtension + + CUDAExtension( + name='cuda_extension', + sources=['extension.cpp', 'extension_kernel.cu'], + extra_compile_args={'cxx': ['-g'], + 'nvcc': ['-O2']}) + result = True + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_utils_cpp_extension_CppExtension.py b/tests/test_utils_cpp_extension_CppExtension.py new file mode 100644 index 000000000..ebe613ee2 --- /dev/null +++ b/tests/test_utils_cpp_extension_CppExtension.py @@ -0,0 +1,35 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.utils.cpp_extension.CppExtension") + + +# The cpp compile not supports +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + from torch.utils.cpp_extension import CppExtension + + CppExtension( + name='cuda_extension', + sources=['extension.cpp'], + extra_compile_args=['-g']) + result = True + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_utils_data_SequentialSampler.py b/tests/test_utils_data_SequentialSampler.py new file mode 100644 index 000000000..2da3b3fa3 --- /dev/null +++ b/tests/test_utils_data_SequentialSampler.py @@ -0,0 +1,138 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.utils.data.SequentialSampler") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + from torch.utils.data import SequentialSampler + from torch.utils.data import Dataset + import numpy as np + + class Data(Dataset): + def __init__(self): + self.x = np.arange(0,100,1) + + def __getitem__(self, idx): + return self.x[idx] + + def __len__(self): + return self.x.shape[0] + + class MySampler(SequentialSampler): + def __init__(self, data_source): + self.data_source = data_source + + def __iter__(self): + return iter(range(len(self.data_source))) + + def __len__(self): + return len(self.data_source) + + data = Data() + s = MySampler(data) + result = [] + for d in s: + result.append(d) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + from torch.utils.data import SequentialSampler + from torch.utils.data import Dataset + import numpy as np + + class Data(Dataset): + def __init__(self): + self.x = np.arange(0,100,1) + + def __getitem__(self, idx): + return self.x[idx] + + def __len__(self): + return self.x.shape[0] + + class MySampler(SequentialSampler): + def __init__(self, data): + self.data_source = data + + def __iter__(self): + return iter(range(len(self.data_source))) + + def __len__(self): + return len(self.data_source) + + data = Data() + s = MySampler(data=data) + result = [] + for d in s: + result.append(d) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + from torch.utils.data import SequentialSampler + from torch.utils.data import Dataset + import numpy as np + import torch + + class Data(Dataset): + def __init__(self): + self.x = np.arange(0,100,1).reshape(10, 10) + self.y = np.arange(0, 10, 1) + + def __getitem__(self, idx): + return self.x[idx], self.y[idx] + + def __len__(self): + return self.x.shape[0] + + class MySampler(SequentialSampler): + def __init__(self, data): + self.data_source = data + + def __iter__(self): + return iter(range(1, len(self.data_source)+1)) + + def __len__(self): + return len(self.data_source) + + data = Data() + s = MySampler(data) + result = [] + for idx in s: + result.append(idx) + result = torch.tensor(result) + """ + ) + obj.run(pytorch_code, ["result"]) + + +test_case_1() +test_case_2() +test_case_3()